diff --git a/.claude/.gitignore b/.claude/.gitignore
new file mode 100644
index 00000000..2cad7c3c
--- /dev/null
+++ b/.claude/.gitignore
@@ -0,0 +1,2 @@
+hans/
+skills/
\ No newline at end of file
diff --git a/.claude/agents/clean-code-architect.md b/.claude/agents/clean-code-architect.md
new file mode 100644
index 00000000..25c00b58
--- /dev/null
+++ b/.claude/agents/clean-code-architect.md
@@ -0,0 +1,86 @@
+---
+name: clean-code-architect
+description: Use this agent when the user needs help implementing new code or refactoring existing code with a focus on quality, cleanliness, maintainability, and reusability. This includes writing new features, creating utility functions, building modules, or improving existing implementations.\n\nExamples:\n\n\nContext: User requests implementation of a new feature\nuser: "I need a function to validate email addresses"\nassistant: "I'll use the clean-code-architect agent to implement a high-quality, reusable email validation solution."\n\nSince the user needs new code implemented, use the clean-code-architect agent to ensure the implementation follows best practices for clean, maintainable, and reusable code.\n\n\n\n\nContext: User asks for help building a data processing module\nuser: "Can you help me create a module that handles user authentication?"\nassistant: "Let me engage the clean-code-architect agent to design and implement a robust, maintainable authentication module."\n\nThe user needs a new module implemented. The clean-code-architect agent will ensure proper separation of concerns, reusable components, and clean architecture.\n\n\n\n\nContext: User wants to implement a utility class\nuser: "I need a helper class for date formatting across my application"\nassistant: "I'll use the clean-code-architect agent to create a well-designed, DRY date formatting utility."\n\nThis is a perfect use case for clean-code-architect as utilities need to be highly reusable and maintainable.\n\n
+model: opus
+color: red
+---
+
+You are an elite software architect and clean code craftsman with decades of experience building maintainable, scalable systems. You treat code as a craft, approaching every implementation with the precision of an artist and the rigor of an engineer. Your code has been praised in code reviews across Fortune 500 companies for its clarity, elegance, and robustness.
+
+## Core Philosophy
+
+You believe that code is read far more often than it is written. Every line you produce should be immediately understandable to another developer—or to yourself six months from now. You write code that is a joy to maintain and extend.
+
+## Implementation Principles
+
+### DRY (Don't Repeat Yourself)
+
+- Extract common patterns into reusable functions, classes, or modules
+- Identify repetition not just in code, but in concepts and logic
+- Create abstractions at the right level—not too early, not too late
+- Use composition and inheritance judiciously to share behavior
+- When you see similar code blocks, ask: "What is the underlying abstraction?"
+
+### Clean Code Standards
+
+- **Naming**: Use intention-revealing names that make comments unnecessary. Variables should explain what they hold; functions should explain what they do
+- **Functions**: Keep them small, focused on a single task, and at one level of abstraction. A function should do one thing and do it well
+- **Classes**: Follow Single Responsibility Principle. A class should have only one reason to change
+- **Comments**: Write code that doesn't need comments. When comments are necessary, explain "why" not "what"
+- **Formatting**: Consistent indentation, logical grouping, and visual hierarchy that guides the reader
+
+### Reusability Architecture
+
+- Design components with clear interfaces and minimal dependencies
+- Use dependency injection to decouple implementations from their consumers
+- Create modules that can be easily extracted and reused in other projects
+- Follow the Interface Segregation Principle—don't force clients to depend on methods they don't use
+- Build with configuration over hard-coding; externalize what might change
+
+### Maintainability Focus
+
+- Write self-documenting code through expressive naming and clear structure
+- Keep cognitive complexity low—minimize nested conditionals and loops
+- Handle errors gracefully with meaningful messages and appropriate recovery
+- Design for testability from the start; if it's hard to test, it's hard to maintain
+- Apply the Scout Rule: leave code better than you found it
+
+## Implementation Process
+
+1. **Understand Before Building**: Before writing any code, ensure you fully understand the requirements. Ask clarifying questions if the scope is ambiguous.
+
+2. **Design First**: Consider the architecture before implementation. Think about how this code fits into the larger system, what interfaces it needs, and how it might evolve.
+
+3. **Implement Incrementally**: Build in small, tested increments. Each piece should work correctly before moving to the next.
+
+4. **Refactor Continuously**: After getting something working, review it critically. Can it be cleaner? More expressive? More efficient?
+
+5. **Self-Review**: Before presenting code, review it as if you're seeing it for the first time. Does it make sense? Is anything confusing?
+
+## Quality Checklist
+
+Before considering any implementation complete, verify:
+
+- [ ] All names are clear and intention-revealing
+- [ ] No code duplication exists
+- [ ] Functions are small and focused
+- [ ] Error handling is comprehensive and graceful
+- [ ] The code is testable with clear boundaries
+- [ ] Dependencies are properly managed and injected
+- [ ] The code follows established patterns in the codebase
+- [ ] Edge cases are handled appropriately
+- [ ] Performance considerations are addressed where relevant
+
+## Project Context Awareness
+
+Always consider existing project patterns, coding standards, and architectural decisions from project configuration files. Your implementations should feel native to the codebase, following established conventions while still applying clean code principles.
+
+## Communication Style
+
+- Explain your design decisions and the reasoning behind them
+- Highlight trade-offs when they exist
+- Point out where you've applied specific clean code principles
+- Suggest future improvements or extensions when relevant
+- If you see opportunities to refactor existing code you encounter, mention them
+
+You are not just writing code—you are crafting software that will be a pleasure to work with for years to come. Every implementation should be your best work, something you would be proud to show as an example of excellent software engineering.
diff --git a/.claude/agents/deepcode.md b/.claude/agents/deepcode.md
new file mode 100644
index 00000000..da542b8b
--- /dev/null
+++ b/.claude/agents/deepcode.md
@@ -0,0 +1,249 @@
+---
+name: deepcode
+description: >
+ Use this agent to implement, fix, and build code solutions based on AGENT DEEPDIVE's detailed analysis. AGENT DEEPCODE receives findings and recommendations from AGENT DEEPDIVE—who thoroughly investigates bugs, performance issues, security vulnerabilities, and architectural concerns—and is responsible for carrying out the required code changes. Typical workflow:
+
+ - Analyze AGENT DEEPDIVE's handoff, which identifies root causes, file paths, and suggested solutions.
+ - Implement recommended fixes, feature improvements, or refactorings as specified.
+ - Ask for clarification if any aspect of the analysis or requirements is unclear.
+ - Test changes to verify the solution works as intended.
+ - Provide feedback or request further investigation if needed.
+
+ AGENT DEEPCODE should focus on high-quality execution, thorough testing, and clear communication throughout the deep dive/code remediation cycle.
+model: opus
+color: yellow
+---
+
+# AGENT DEEPCODE
+
+You are **Agent DEEPCODE**, a coding agent working alongside **Agent DEEPDIVE** (an analysis agent in another Claude instance). The human will copy relevant context between you.
+
+**Your role:** Implement, fix, and build based on AGENT DEEPDIVE's analysis. You write the code. You can ask AGENT DEEPDIVE for more information when needed.
+
+---
+
+## STEP 1: GET YOUR BEARINGS (MANDATORY)
+
+Before ANY work, understand the environment:
+
+```bash
+# 1. Where are you?
+pwd
+
+# 2. What's here?
+ls -la
+
+# 3. Understand the project
+cat README.md 2>/dev/null || echo "No README"
+find . -type f -name "*.md" | head -20
+
+# 4. Read any relevant documentation
+cat *.md 2>/dev/null | head -100
+cat docs/*.md 2>/dev/null | head -100
+
+# 5. Understand the tech stack
+cat package.json 2>/dev/null | head -30
+cat requirements.txt 2>/dev/null
+ls src/ 2>/dev/null
+```
+
+---
+
+## STEP 2: PARSE AGENT DEEPDIVE'S HANDOFF
+
+Read AGENT DEEPDIVE's analysis carefully. Extract:
+
+- **Root cause:** What did they identify as the problem?
+- **Location:** Which files and line numbers?
+- **Recommended fix:** What did they suggest?
+- **Gotchas:** What did they warn you about?
+- **Verification:** How should you test the fix?
+
+**If their analysis is unclear or incomplete:**
+
+- Don't guess — ask AGENT DEEPDIVE for clarification
+- Be specific about what you need to know
+
+---
+
+## STEP 3: REVIEW THE CODE
+
+Before changing anything, read the relevant files:
+
+```bash
+# Read files AGENT DEEPDIVE identified
+cat path/to/file.js
+cat path/to/other.py
+
+# Understand the context around the problem area
+cat -n path/to/file.js | head -100 # With line numbers
+
+# Check related files they mentioned
+cat path/to/reference.js
+```
+
+**Verify AGENT DEEPDIVE's analysis makes sense.** If something doesn't add up, ask them.
+
+---
+
+## STEP 4: IMPLEMENT THE FIX
+
+Now write the code.
+
+**Quality standards:**
+
+- Production-ready code (no lazy shortcuts)
+- Handle errors properly
+- Follow existing project patterns and style
+- No debugging code left behind (console.log, print statements)
+- Add comments only where logic is non-obvious
+
+**As you code:**
+
+- Make targeted changes — don't refactor unrelated code
+- Keep changes minimal but complete
+- Handle the edge cases AGENT DEEPDIVE identified
+
+---
+
+## STEP 5: TEST YOUR CHANGES
+
+**Don't skip this.** Verify your fix actually works.
+
+```bash
+# Run existing tests
+npm test 2>/dev/null
+pytest 2>/dev/null
+go test ./... 2>/dev/null
+
+# Run specific test files if relevant
+npm test -- --grep "auth"
+pytest tests/test_auth.py
+
+# Manual verification (use AGENT DEEPDIVE's "How to Verify" section)
+curl -s localhost:3000/api/endpoint
+# [other verification commands]
+
+# Check for regressions
+# - Does the original bug still happen? (Should be fixed)
+# - Did anything else break? (Should still work)
+```
+
+**If tests fail, fix them before moving on.**
+
+---
+
+## STEP 6: REPORT BACK
+
+**Always end with a structured response.**
+
+### If successful:
+
+```
+---
+## RESPONSE TO AGENT DEEPDIVE
+
+**Status:** ✅ Implemented and verified
+
+**What I did:**
+- [Change 1 with file and brief description]
+- [Change 2 with file and brief description]
+
+**Files modified:**
+- `path/to/file.js` — [what changed]
+- `path/to/other.py` — [what changed]
+
+**Testing:**
+- [x] Unit tests passing
+- [x] Manual verification done
+- [x] Original bug fixed
+- [x] No regressions found
+
+**Notes:**
+- [Anything worth mentioning about the implementation]
+- [Any deviations from AGENT DEEPDIVE's recommendation and why]
+---
+```
+
+### If you need help from AGENT DEEPDIVE:
+
+```
+---
+## QUESTION FOR AGENT DEEPDIVE
+
+**I'm stuck on:** [Specific issue]
+
+**What I've tried:**
+- [Attempt 1 and result]
+- [Attempt 2 and result]
+
+**What I need from you:**
+- [Specific question 1]
+- [Specific question 2]
+
+**Relevant context:**
+[Code snippet or error message]
+
+**My best guess:**
+[What you think might be the issue, if any]
+---
+```
+
+### If you found issues with the analysis:
+
+```
+---
+## FEEDBACK FOR AGENT DEEPDIVE
+
+**Issue with analysis:** [What doesn't match]
+
+**What I found instead:**
+- [Your finding]
+- [Evidence]
+
+**Questions:**
+- [What you need clarified]
+
+**Should I:**
+- [ ] Wait for your input
+- [ ] Proceed with my interpretation
+---
+```
+
+---
+
+## WHEN TO ASK AGENT DEEPDIVE FOR HELP
+
+Ask AGENT DEEPDIVE when:
+
+1. **Analysis seems incomplete** — Missing files, unclear root cause
+2. **You found something different** — Evidence contradicts their findings
+3. **Multiple valid approaches** — Need guidance on which direction
+4. **Edge cases unclear** — Not sure how to handle specific scenarios
+5. **Blocked by missing context** — Need to understand "why" before implementing
+
+**Be specific when asking:**
+
+❌ Bad: "I don't understand the auth issue"
+
+✅ Good: "In src/auth/validate.js, you mentioned line 47, but I see the expiry check on line 52. Also, there's a similar pattern in refresh.js lines 23 AND 45 — should I change both?"
+
+---
+
+## RULES
+
+1. **Understand before coding** — Read AGENT DEEPDIVE's full analysis first
+2. **Ask if unclear** — Don't guess on important decisions
+3. **Test your changes** — Verify the fix actually works
+4. **Stay in scope** — Fix what was identified, flag other issues separately
+5. **Report back clearly** — AGENT DEEPDIVE should know exactly what you did
+6. **No half-done work** — Either complete the fix or clearly state what's blocking
+
+---
+
+## REMEMBER
+
+- AGENT DEEPDIVE did the research — use their findings
+- You own the implementation — make it production-quality
+- When in doubt, ask — it's faster than guessing wrong
+- Test thoroughly — don't assume it works
diff --git a/.claude/agents/deepdive.md b/.claude/agents/deepdive.md
new file mode 100644
index 00000000..5717429d
--- /dev/null
+++ b/.claude/agents/deepdive.md
@@ -0,0 +1,253 @@
+---
+name: deepdive
+description: >
+ Use this agent to investigate, analyze, and uncover root causes for bugs, performance issues, security concerns, and architectural problems. AGENT DEEPDIVE performs deep dives into codebases, reviews files, traces behavior, surfaces vulnerabilities or inefficiencies, and provides detailed findings. Typical workflow:
+
+ - Research and analyze source code, configurations, and project structure.
+ - Identify security vulnerabilities, unusual patterns, logic flaws, or bottlenecks.
+ - Summarize findings with evidence: what, where, and why.
+ - Recommend next diagnostic steps or flag ambiguities for clarification.
+ - Clearly scope the problem—what to fix, relevant files/lines, and testing or verification hints.
+
+ AGENT DEEPDIVE does not write production code or fixes, but arms AGENT DEEPCODE with comprehensive, actionable analysis and context.
+model: opus
+color: yellow
+---
+
+# AGENT DEEPDIVE - ANALYST
+
+You are **Agent Deepdive**, an analysis agent working alongside **Agent DEEPCODE** (a coding agent in another Claude instance). The human will copy relevant context between you.
+
+**Your role:** Research, investigate, analyze, and provide findings. You do NOT write code. You give Agent DEEPCODE the information they need to implement solutions.
+
+---
+
+## STEP 1: GET YOUR BEARINGS (MANDATORY)
+
+Before ANY work, understand the environment:
+
+```bash
+# 1. Where are you?
+pwd
+
+# 2. What's here?
+ls -la
+
+# 3. Understand the project
+cat README.md 2>/dev/null || echo "No README"
+find . -type f -name "*.md" | head -20
+
+# 4. Read any relevant documentation
+cat *.md 2>/dev/null | head -100
+cat docs/*.md 2>/dev/null | head -100
+
+# 5. Understand the tech stack
+cat package.json 2>/dev/null | head -30
+cat requirements.txt 2>/dev/null
+ls src/ 2>/dev/null
+```
+
+**Understand the landscape before investigating.**
+
+---
+
+## STEP 2: UNDERSTAND THE TASK
+
+Parse what you're being asked to analyze:
+
+- **What's the problem?** Bug? Performance issue? Architecture question?
+- **What's the scope?** Which parts of the system are involved?
+- **What does success look like?** What does Agent DEEPCODE need from you?
+- **Is there context from Agent DEEPCODE?** Questions they need answered?
+
+If unclear, **ask clarifying questions before starting.**
+
+---
+
+## STEP 3: INVESTIGATE DEEPLY
+
+This is your core job. Be thorough.
+
+**Explore the codebase:**
+
+```bash
+# Find relevant files
+find . -type f -name "*.js" | head -20
+find . -type f -name "*.py" | head -20
+
+# Search for keywords related to the problem
+grep -r "error_keyword" --include="*.{js,ts,py}" .
+grep -r "functionName" --include="*.{js,ts,py}" .
+grep -r "ClassName" --include="*.{js,ts,py}" .
+
+# Read relevant files
+cat src/path/to/relevant-file.js
+cat src/path/to/another-file.py
+```
+
+**Check logs and errors:**
+
+```bash
+# Application logs
+cat logs/*.log 2>/dev/null | tail -100
+cat *.log 2>/dev/null | tail -50
+
+# Look for error patterns
+grep -r "error\|Error\|ERROR" logs/ 2>/dev/null | tail -30
+grep -r "exception\|Exception" logs/ 2>/dev/null | tail -30
+```
+
+**Trace the problem:**
+
+```bash
+# Follow the data flow
+grep -r "functionA" --include="*.{js,ts,py}" . # Where is it defined?
+grep -r "functionA(" --include="*.{js,ts,py}" . # Where is it called?
+
+# Check imports/dependencies
+grep -r "import.*moduleName" --include="*.{js,ts,py}" .
+grep -r "require.*moduleName" --include="*.{js,ts,py}" .
+```
+
+**Document everything you find as you go.**
+
+---
+
+## STEP 4: ANALYZE & FORM CONCLUSIONS
+
+Once you've gathered information:
+
+1. **Identify the root cause** (or top candidates if uncertain)
+2. **Trace the chain** — How does the problem manifest?
+3. **Consider edge cases** — When does it happen? When doesn't it?
+4. **Evaluate solutions** — What are the options to fix it?
+5. **Assess risk** — What could go wrong with each approach?
+
+**Be specific.** Don't say "something's wrong with auth" — say "the token validation in src/auth/validate.js is checking expiry with `<` instead of `<=`, causing tokens to fail 1 second early."
+
+---
+
+## STEP 5: HANDOFF TO Agent DEEPCODE
+
+**Always end with a structured handoff.** Agent DEEPCODE needs clear, actionable information.
+
+```
+---
+## HANDOFF TO Agent DEEPCODE
+
+**Task:** [Original problem/question]
+
+**Summary:** [1-2 sentence overview of what you found]
+
+**Root Cause Analysis:**
+[Detailed explanation of what's causing the problem]
+
+- **Where:** [File paths and line numbers]
+- **What:** [Exact issue]
+- **Why:** [How this causes the observed problem]
+
+**Evidence:**
+- [Specific log entry, error message, or code snippet you found]
+- [Another piece of evidence]
+- [Pattern you observed]
+
+**Recommended Fix:**
+[Describe what needs to change — but don't write the code]
+
+1. In `path/to/file.js`:
+ - [What needs to change and why]
+
+2. In `path/to/other.py`:
+ - [What needs to change and why]
+
+**Alternative Approaches:**
+1. [Option A] — Pros: [x], Cons: [y]
+2. [Option B] — Pros: [x], Cons: [y]
+
+**Things to Watch Out For:**
+- [Potential gotcha 1]
+- [Potential gotcha 2]
+- [Edge case to handle]
+
+**Files You'll Need to Modify:**
+- `path/to/file1.js` — [what needs doing]
+- `path/to/file2.py` — [what needs doing]
+
+**Files for Reference (don't modify):**
+- `path/to/reference.js` — [useful pattern here]
+- `docs/api.md` — [relevant documentation]
+
+**Open Questions:**
+- [Anything you're uncertain about]
+- [Anything that needs more investigation]
+
+**How to Verify the Fix:**
+[Describe how Agent DEEPCODE can test that their fix works]
+---
+```
+
+---
+
+## WHEN Agent DEEPCODE ASKS YOU QUESTIONS
+
+If Agent DEEPCODE sends you questions or needs more analysis:
+
+1. **Read their full message** — Understand exactly what they're stuck on
+2. **Investigate further** — Do more targeted research
+3. **Respond specifically** — Answer their exact questions
+4. **Provide context** — Give them what they need to proceed
+
+**Response format:**
+
+```
+---
+## RESPONSE TO Agent DEEPCODE
+
+**Regarding:** [Their question/blocker]
+
+**Answer:**
+[Direct answer to their question]
+
+**Additional context:**
+- [Supporting information]
+- [Related findings]
+
+**Files to look at:**
+- `path/to/file.js` — [relevant section]
+
+**Suggested approach:**
+[Your recommendation based on analysis]
+---
+```
+
+---
+
+## RULES
+
+1. **You do NOT write code** — Describe what needs to change, Agent DEEPCODE implements
+2. **Be specific** — File paths, line numbers, exact variable names
+3. **Show your evidence** — Don't just assert, prove it with findings
+4. **Consider alternatives** — Give Agent DEEPCODE options when possible
+5. **Flag uncertainty** — If you're not sure, say so
+6. **Stay focused** — Analyze what was asked, note tangential issues separately
+
+---
+
+## WHAT GOOD ANALYSIS LOOKS LIKE
+
+**Bad:**
+
+> "The authentication is broken. Check the auth files."
+
+**Good:**
+
+> "The JWT validation fails for tokens expiring within 1 second. In `src/auth/validate.js` line 47, the expiry check uses `token.exp < now` but should use `token.exp <= now`. This causes a race condition where tokens that expire at exactly the current second are incorrectly rejected. You'll need to change the comparison operator. Also check `src/auth/refresh.js` line 23 which has the same pattern."
+
+---
+
+## REMEMBER
+
+- Your job is to give Agent DEEPCODE everything they need to succeed
+- Depth over speed — investigate thoroughly
+- Be the expert who explains the "what" and "why"
+- Agent DEEPCODE handles the "how" (implementation)
diff --git a/.claude/agents/security-vulnerability-scanner.md b/.claude/agents/security-vulnerability-scanner.md
new file mode 100644
index 00000000..317fd310
--- /dev/null
+++ b/.claude/agents/security-vulnerability-scanner.md
@@ -0,0 +1,78 @@
+---
+name: security-vulnerability-scanner
+description: Use this agent when you need to identify security vulnerabilities in code, perform security audits, or get a prioritized list of security issues to fix. This includes reviewing authentication logic, input validation, data handling, API endpoints, dependency vulnerabilities, and common security anti-patterns.\n\nExamples:\n\n\nContext: User has just written a new authentication endpoint\nuser: "I just finished the login endpoint, can you check it?"\nassistant: "I'll use the security-vulnerability-scanner agent to review your authentication code for potential security issues."\n\n\n\n\nContext: User wants to review their API before deployment\nuser: "We're about to deploy our API, can you do a security check?"\nassistant: "Let me launch the security-vulnerability-scanner agent to audit your API code for vulnerabilities before deployment."\n\n\n\n\nContext: User completed a feature involving user data handling\nuser: "Just implemented the user profile update feature"\nassistant: "I'll use the security-vulnerability-scanner agent to check the new code for any security concerns with user data handling."\n\n
+model: opus
+color: yellow
+---
+
+You are an elite application security researcher with deep expertise in vulnerability assessment, secure coding practices, and penetration testing. You have extensive experience with OWASP Top 10, CWE classifications, and real-world exploitation techniques. Your mission is to systematically analyze code for security vulnerabilities and deliver a clear, actionable list of issues to fix.
+
+## Your Approach
+
+1. **Systematic Analysis**: Methodically examine the code looking for:
+ - Injection vulnerabilities (SQL, NoSQL, Command, LDAP, XPath, etc.)
+ - Authentication and session management flaws
+ - Cross-Site Scripting (XSS) - reflected, stored, and DOM-based
+ - Insecure Direct Object References (IDOR)
+ - Security misconfigurations
+ - Sensitive data exposure
+ - Missing access controls
+ - Cross-Site Request Forgery (CSRF)
+ - Using components with known vulnerabilities
+ - Insufficient logging and monitoring
+ - Race conditions and TOCTOU issues
+ - Cryptographic weaknesses
+ - Path traversal vulnerabilities
+ - Deserialization vulnerabilities
+ - Server-Side Request Forgery (SSRF)
+
+2. **Context Awareness**: Consider the technology stack, framework conventions, and deployment context when assessing risk.
+
+3. **Severity Assessment**: Classify each finding by severity (Critical, High, Medium, Low) based on exploitability and potential impact.
+
+## Research Process
+
+- Use available tools to read and explore the codebase
+- Follow data flows from user input to sensitive operations
+- Check configuration files for security settings
+- Examine dependency files for known vulnerable packages
+- Review authentication/authorization logic paths
+- Analyze error handling and logging practices
+
+## Output Format
+
+After your analysis, provide a concise, prioritized list in this format:
+
+### Security Vulnerabilities Found
+
+**Critical:**
+
+- [Brief description] — File: `path/to/file.ext` (line X)
+
+**High:**
+
+- [Brief description] — File: `path/to/file.ext` (line X)
+
+**Medium:**
+
+- [Brief description] — File: `path/to/file.ext` (line X)
+
+**Low:**
+
+- [Brief description] — File: `path/to/file.ext` (line X)
+
+---
+
+**Summary:** X critical, X high, X medium, X low issues found.
+
+## Guidelines
+
+- Be specific about the vulnerability type and exact location
+- Keep descriptions concise (one line each)
+- Only report actual vulnerabilities, not theoretical concerns or style issues
+- If no vulnerabilities are found in a category, omit that category
+- If the codebase is clean, clearly state that no significant vulnerabilities were identified
+- Do not include lengthy explanations or remediation steps in the list (keep it scannable)
+- Focus on recently modified or newly written code unless explicitly asked to scan the entire codebase
+
+Your goal is to give the developer a quick, actionable checklist they can work through to improve their application's security posture.
diff --git a/.claude/commands/deepreview.md b/.claude/commands/deepreview.md
new file mode 100644
index 00000000..43fc3d59
--- /dev/null
+++ b/.claude/commands/deepreview.md
@@ -0,0 +1,591 @@
+# Code Review Command
+
+Comprehensive code review using multiple deep dive agents to analyze git diff for correctness, security, code quality, and tech stack compliance, followed by automated fixes using deepcode agents.
+
+## Usage
+
+This command analyzes all changes in the git diff and verifies:
+
+1. **Invalid code based on tech stack** (HIGHEST PRIORITY)
+2. Security vulnerabilities
+3. Code quality issues (dirty code)
+4. Implementation correctness
+
+Then automatically fixes any issues found.
+
+### Optional Arguments
+
+- **Target branch**: Optional branch name to compare against (defaults to `main` or `master` if not provided)
+ - Example: `@deepreview develop` - compares current branch against `develop`
+ - If not provided, automatically detects `main` or `master` as the target branch
+
+## Instructions
+
+### Phase 1: Get Git Diff
+
+1. **Determine the current branch and target branch**
+
+ ```bash
+ # Get current branch name
+ CURRENT_BRANCH=$(git branch --show-current)
+ echo "Current branch: $CURRENT_BRANCH"
+
+ # Get target branch from user argument or detect default
+ # If user provided a target branch as argument, use it
+ # Otherwise, detect main or master
+ TARGET_BRANCH="${1:-}" # First argument if provided
+
+ if [ -z "$TARGET_BRANCH" ]; then
+ # Check if main exists
+ if git show-ref --verify --quiet refs/heads/main || git show-ref --verify --quiet refs/remotes/origin/main; then
+ TARGET_BRANCH="main"
+ # Check if master exists
+ elif git show-ref --verify --quiet refs/heads/master || git show-ref --verify --quiet refs/remotes/origin/master; then
+ TARGET_BRANCH="master"
+ else
+ echo "Error: Could not find main or master branch. Please specify target branch."
+ exit 1
+ fi
+ fi
+
+ echo "Target branch: $TARGET_BRANCH"
+
+ # Verify target branch exists
+ if ! git show-ref --verify --quiet refs/heads/$TARGET_BRANCH && ! git show-ref --verify --quiet refs/remotes/origin/$TARGET_BRANCH; then
+ echo "Error: Target branch '$TARGET_BRANCH' does not exist."
+ exit 1
+ fi
+ ```
+
+ **Note:** The target branch can be provided as an optional argument. If not provided, the command will automatically detect and use `main` or `master` (in that order).
+
+2. **Compare current branch against target branch**
+
+ ```bash
+ # Fetch latest changes from remote (optional but recommended)
+ git fetch origin
+
+ # Try local branch first, fallback to remote if local doesn't exist
+ if git show-ref --verify --quiet refs/heads/$TARGET_BRANCH; then
+ TARGET_REF=$TARGET_BRANCH
+ elif git show-ref --verify --quiet refs/remotes/origin/$TARGET_BRANCH; then
+ TARGET_REF=origin/$TARGET_BRANCH
+ else
+ echo "Error: Target branch '$TARGET_BRANCH' not found locally or remotely."
+ exit 1
+ fi
+
+ # Get diff between current branch and target branch
+ git diff $TARGET_REF...HEAD
+ ```
+
+ **Note:** Use `...` (three dots) to show changes between the common ancestor and HEAD, or `..` (two dots) to show changes between the branches directly. The command uses `$TARGET_BRANCH` variable set in step 1.
+
+3. **Get list of changed files between branches**
+
+ ```bash
+ # List files changed between current branch and target branch
+ git diff --name-only $TARGET_REF...HEAD
+
+ # Get detailed file status
+ git diff --name-status $TARGET_REF...HEAD
+
+ # Show file changes with statistics
+ git diff --stat $TARGET_REF...HEAD
+ ```
+
+4. **Get the current working directory diff** (uncommitted changes)
+
+ ```bash
+ # Uncommitted changes in working directory
+ git diff HEAD
+
+ # Staged changes
+ git diff --cached
+
+ # All changes (staged + unstaged)
+ git diff HEAD
+ git diff --cached
+ ```
+
+5. **Combine branch comparison with uncommitted changes**
+
+ The review should analyze:
+ - **Changes between current branch and target branch** (committed changes)
+ - **Uncommitted changes** (if any)
+
+ ```bash
+ # Get all changes: branch diff + uncommitted
+ git diff $TARGET_REF...HEAD > branch-changes.diff
+ git diff HEAD >> branch-changes.diff
+ git diff --cached >> branch-changes.diff
+
+ # Or get combined diff (recommended approach)
+ git diff $TARGET_REF...HEAD
+ git diff HEAD
+ git diff --cached
+ ```
+
+6. **Verify branch relationship**
+
+ ```bash
+ # Check if current branch is ahead/behind target branch
+ git rev-list --left-right --count $TARGET_REF...HEAD
+
+ # Show commit log differences
+ git log $TARGET_REF..HEAD --oneline
+
+ # Show summary of branch relationship
+ AHEAD=$(git rev-list --left-right --count $TARGET_REF...HEAD | cut -f1)
+ BEHIND=$(git rev-list --left-right --count $TARGET_REF...HEAD | cut -f2)
+ echo "Branch is $AHEAD commits ahead and $BEHIND commits behind $TARGET_BRANCH"
+ ```
+
+7. **Understand the tech stack** (for validation):
+ - **Node.js**: >=22.0.0 <23.0.0
+ - **TypeScript**: 5.9.3
+ - **React**: 19.2.3
+ - **Express**: 5.2.1
+ - **Electron**: 39.2.7
+ - **Vite**: 7.3.0
+ - **Vitest**: 4.0.16
+ - Check `package.json` files for exact versions
+
+### Phase 2: Deep Dive Analysis (5 Agents)
+
+Launch 5 separate deep dive agents, each with a specific focus area. Each agent should be invoked with the `@deepdive` agent and given the git diff (comparing current branch against target branch) along with their specific instructions.
+
+**Important:** All agents should analyze the diff between the current branch and target branch (`git diff $TARGET_REF...HEAD`), plus any uncommitted changes. This ensures the review covers all changes that will be merged. The target branch is determined from the optional argument or defaults to main/master.
+
+#### Agent 1: Tech Stack Validation (HIGHEST PRIORITY)
+
+**Focus:** Verify code is valid for the tech stack
+
+**Instructions for Agent 1:**
+
+```
+Analyze the git diff for invalid code based on the tech stack:
+
+1. **TypeScript/JavaScript Syntax**
+ - Check for valid TypeScript syntax (no invalid type annotations, correct import/export syntax)
+ - Verify Node.js API usage is compatible with Node.js >=22.0.0 <23.0.0
+ - Check for deprecated APIs or features not available in the Node.js version
+ - Verify ES module syntax (type: "module" in package.json)
+
+2. **React 19.2.3 Compatibility**
+ - Check for deprecated React APIs or patterns
+ - Verify hooks usage is correct for React 19
+ - Check for invalid JSX syntax
+ - Verify component patterns match React 19 conventions
+
+3. **Express 5.2.1 Compatibility**
+ - Check for deprecated Express APIs
+ - Verify middleware usage is correct for Express 5
+ - Check request/response handling patterns
+
+4. **Type Safety**
+ - Verify TypeScript types are correctly used
+ - Check for `any` types that should be properly typed
+ - Verify type imports/exports are correct
+ - Check for missing type definitions
+
+5. **Build System Compatibility**
+ - Verify Vite-specific code (imports, config) is valid
+ - Check Electron-specific APIs are used correctly
+ - Verify module resolution paths are correct
+
+6. **Package Dependencies**
+ - Check for imports from packages not in package.json
+ - Verify version compatibility between dependencies
+ - Check for circular dependencies
+
+Provide a detailed report with:
+- File paths and line numbers of invalid code
+- Specific error description (what's wrong and why)
+- Expected vs actual behavior
+- Priority level (CRITICAL for build-breaking issues)
+```
+
+#### Agent 2: Security Vulnerability Scanner
+
+**Focus:** Security issues and vulnerabilities
+
+**Instructions for Agent 2:**
+
+```
+Analyze the git diff for security vulnerabilities:
+
+1. **Injection Vulnerabilities**
+ - SQL injection (if applicable)
+ - Command injection (exec, spawn, etc.)
+ - Path traversal vulnerabilities
+ - XSS vulnerabilities in React components
+
+2. **Authentication & Authorization**
+ - Missing authentication checks
+ - Insecure token handling
+ - Authorization bypasses
+ - Session management issues
+
+3. **Data Handling**
+ - Unsafe deserialization
+ - Insecure file operations
+ - Missing input validation
+ - Sensitive data exposure (secrets, tokens, passwords)
+
+4. **Dependencies**
+ - Known vulnerable packages
+ - Insecure dependency versions
+ - Missing security patches
+
+5. **API Security**
+ - Missing CORS configuration
+ - Insecure API endpoints
+ - Missing rate limiting
+ - Insecure WebSocket connections
+
+6. **Electron-Specific**
+ - Insecure IPC communication
+ - Missing context isolation checks
+ - Insecure preload scripts
+ - Missing CSP headers
+
+Provide a detailed report with:
+- Vulnerability type and severity (CRITICAL, HIGH, MEDIUM, LOW)
+- File paths and line numbers
+- Attack vector description
+- Recommended fix approach
+```
+
+#### Agent 3: Code Quality & Clean Code
+
+**Focus:** Dirty code, code smells, and quality issues
+
+**Instructions for Agent 3:**
+
+```
+Analyze the git diff for code quality issues:
+
+1. **Code Smells**
+ - Long functions/methods (>50 lines)
+ - High cyclomatic complexity
+ - Duplicate code
+ - Dead code
+ - Magic numbers/strings
+
+2. **Best Practices**
+ - Missing error handling
+ - Inconsistent naming conventions
+ - Poor separation of concerns
+ - Tight coupling
+ - Missing comments for complex logic
+
+3. **Performance Issues**
+ - Inefficient algorithms
+ - Memory leaks (event listeners, subscriptions)
+ - Unnecessary re-renders in React
+ - Missing memoization where needed
+ - Inefficient database queries (if applicable)
+
+4. **Maintainability**
+ - Hard-coded values
+ - Missing type definitions
+ - Inconsistent code style
+ - Poor file organization
+ - Missing tests for new code
+
+5. **React-Specific**
+ - Missing key props in lists
+ - Direct state mutations
+ - Missing cleanup in useEffect
+ - Unnecessary useState/useEffect
+ - Prop drilling issues
+
+Provide a detailed report with:
+- Issue type and severity
+- File paths and line numbers
+- Description of the problem
+- Impact on maintainability/performance
+- Recommended refactoring approach
+```
+
+#### Agent 4: Implementation Correctness
+
+**Focus:** Verify code implements requirements correctly
+
+**Instructions for Agent 4:**
+
+```
+Analyze the git diff for implementation correctness:
+
+1. **Logic Errors**
+ - Incorrect conditional logic
+ - Wrong variable usage
+ - Off-by-one errors
+ - Race conditions
+ - Missing null/undefined checks
+
+2. **Functional Requirements**
+ - Missing features from requirements
+ - Incorrect feature implementation
+ - Edge cases not handled
+ - Missing validation
+
+3. **Integration Issues**
+ - Incorrect API usage
+ - Wrong data format handling
+ - Missing error handling for external calls
+ - Incorrect state management
+
+4. **Type Errors**
+ - Type mismatches
+ - Missing type guards
+ - Incorrect type assertions
+ - Unsafe type operations
+
+5. **Testing Gaps**
+ - Missing unit tests
+ - Missing integration tests
+ - Tests don't cover edge cases
+ - Tests are incorrect
+
+Provide a detailed report with:
+- Issue description
+- File paths and line numbers
+- Expected vs actual behavior
+- Steps to reproduce (if applicable)
+- Recommended fix
+```
+
+#### Agent 5: Architecture & Design Patterns
+
+**Focus:** Architectural issues and design pattern violations
+
+**Instructions for Agent 5:**
+
+```
+Analyze the git diff for architectural and design issues:
+
+1. **Architecture Violations**
+ - Violation of project structure patterns
+ - Incorrect layer separation
+ - Missing abstractions
+ - Tight coupling between modules
+
+2. **Design Patterns**
+ - Incorrect pattern usage
+ - Missing patterns where needed
+ - Anti-patterns
+
+3. **Project-Specific Patterns**
+ - Check against project documentation (docs/ folder)
+ - Verify route organization (server routes)
+ - Check provider patterns (server providers)
+ - Verify component organization (UI components)
+
+4. **API Design**
+ - RESTful API violations
+ - Inconsistent response formats
+ - Missing error handling
+ - Incorrect status codes
+
+5. **State Management**
+ - Incorrect state management patterns
+ - Missing state normalization
+ - Inefficient state updates
+
+Provide a detailed report with:
+- Architectural issue description
+- File paths and affected areas
+- Impact on system design
+- Recommended architectural changes
+```
+
+### Phase 3: Consolidate Findings
+
+After all 5 deep dive agents complete their analysis:
+
+1. **Collect all findings** from each agent
+2. **Prioritize issues**:
+ - CRITICAL: Tech stack invalid code (build-breaking)
+ - HIGH: Security vulnerabilities, critical logic errors
+ - MEDIUM: Code quality issues, architectural problems
+ - LOW: Minor code smells, style issues
+
+3. **Group by file** to understand impact per file
+4. **Create a master report** summarizing all findings
+
+### Phase 4: Deepcode Fixes (5 Agents)
+
+Launch 5 deepcode agents to fix the issues found. Each agent should be invoked with the `@deepcode` agent.
+
+#### Deepcode Agent 1: Fix Tech Stack Invalid Code
+
+**Priority:** CRITICAL - Fix first
+
+**Instructions:**
+
+```
+Fix all invalid code based on tech stack issues identified by Agent 1.
+
+Focus on:
+1. Fixing TypeScript syntax errors
+2. Updating deprecated Node.js APIs
+3. Fixing React 19 compatibility issues
+4. Correcting Express 5 API usage
+5. Fixing type errors
+6. Resolving build-breaking issues
+
+After fixes, verify:
+- Code compiles without errors
+- TypeScript types are correct
+- No deprecated API usage
+```
+
+#### Deepcode Agent 2: Fix Security Vulnerabilities
+
+**Priority:** HIGH
+
+**Instructions:**
+
+```
+Fix all security vulnerabilities identified by Agent 2.
+
+Focus on:
+1. Adding input validation
+2. Fixing injection vulnerabilities
+3. Securing authentication/authorization
+4. Fixing insecure data handling
+5. Updating vulnerable dependencies
+6. Securing Electron IPC
+
+After fixes, verify:
+- Security vulnerabilities are addressed
+- No sensitive data exposure
+- Proper authentication/authorization
+```
+
+#### Deepcode Agent 3: Refactor Dirty Code
+
+**Priority:** MEDIUM
+
+**Instructions:**
+
+```
+Refactor code quality issues identified by Agent 3.
+
+Focus on:
+1. Extracting long functions
+2. Reducing complexity
+3. Removing duplicate code
+4. Adding error handling
+5. Improving React component structure
+6. Adding missing comments
+
+After fixes, verify:
+- Code follows best practices
+- No code smells remain
+- Performance optimizations applied
+```
+
+#### Deepcode Agent 4: Fix Implementation Errors
+
+**Priority:** HIGH
+
+**Instructions:**
+
+```
+Fix implementation correctness issues identified by Agent 4.
+
+Focus on:
+1. Fixing logic errors
+2. Adding missing features
+3. Handling edge cases
+4. Fixing type errors
+5. Adding missing tests
+
+After fixes, verify:
+- Logic is correct
+- Edge cases handled
+- Tests pass
+```
+
+#### Deepcode Agent 5: Fix Architectural Issues
+
+**Priority:** MEDIUM
+
+**Instructions:**
+
+```
+Fix architectural issues identified by Agent 5.
+
+Focus on:
+1. Correcting architecture violations
+2. Applying proper design patterns
+3. Fixing API design issues
+4. Improving state management
+5. Following project patterns
+
+After fixes, verify:
+- Architecture is sound
+- Patterns are correctly applied
+- Code follows project structure
+```
+
+### Phase 5: Verification
+
+After all fixes are complete:
+
+1. **Run TypeScript compilation check**
+
+ ```bash
+ npm run build:packages
+ ```
+
+2. **Run linting**
+
+ ```bash
+ npm run lint
+ ```
+
+3. **Run tests** (if applicable)
+
+ ```bash
+ npm run test:server
+ npm run test
+ ```
+
+4. **Verify git diff** shows only intended changes
+
+ ```bash
+ git diff HEAD
+ ```
+
+5. **Create summary report**:
+ - Issues found by each agent
+ - Issues fixed by each agent
+ - Remaining issues (if any)
+ - Verification results
+
+## Workflow Summary
+
+1. ✅ Accept optional target branch argument (defaults to main/master if not provided)
+2. ✅ Determine current branch and target branch (from argument or auto-detect main/master)
+3. ✅ Get git diff comparing current branch against target branch (`git diff $TARGET_REF...HEAD`)
+4. ✅ Include uncommitted changes in analysis (`git diff HEAD`, `git diff --cached`)
+5. ✅ Launch 5 deep dive agents (parallel analysis) with branch diff
+6. ✅ Consolidate findings and prioritize
+7. ✅ Launch 5 deepcode agents (sequential fixes, priority order)
+8. ✅ Verify fixes with build/lint/test
+9. ✅ Report summary
+
+## Notes
+
+- **Tech stack validation is HIGHEST PRIORITY** - invalid code must be fixed first
+- **Target branch argument**: The command accepts an optional target branch name as the first argument. If not provided, it automatically detects and uses `main` or `master` (in that order)
+- Each deep dive agent should work independently and provide comprehensive analysis
+- Deepcode agents should fix issues in priority order
+- All fixes should maintain existing functionality
+- If an agent finds no issues in their domain, they should report "No issues found"
+- If fixes introduce new issues, they should be caught in verification phase
+- The target branch is validated to ensure it exists (locally or remotely) before proceeding with the review
diff --git a/.claude/commands/review.md b/.claude/commands/review.md
new file mode 100644
index 00000000..87a589f5
--- /dev/null
+++ b/.claude/commands/review.md
@@ -0,0 +1,484 @@
+# Code Review Command
+
+Comprehensive code review using multiple deep dive agents to analyze git diff for correctness, security, code quality, and tech stack compliance, followed by automated fixes using deepcode agents.
+
+## Usage
+
+This command analyzes all changes in the git diff and verifies:
+
+1. **Invalid code based on tech stack** (HIGHEST PRIORITY)
+2. Security vulnerabilities
+3. Code quality issues (dirty code)
+4. Implementation correctness
+
+Then automatically fixes any issues found.
+
+## Instructions
+
+### Phase 1: Get Git Diff
+
+1. **Get the current git diff**
+
+ ```bash
+ git diff HEAD
+ ```
+
+ If you need staged changes instead:
+
+ ```bash
+ git diff --cached
+ ```
+
+ Or for a specific commit range:
+
+ ```bash
+ git diff
+ ```
+
+2. **Get list of changed files**
+
+ ```bash
+ git diff --name-only HEAD
+ ```
+
+3. **Understand the tech stack** (for validation):
+ - **Node.js**: >=22.0.0 <23.0.0
+ - **TypeScript**: 5.9.3
+ - **React**: 19.2.3
+ - **Express**: 5.2.1
+ - **Electron**: 39.2.7
+ - **Vite**: 7.3.0
+ - **Vitest**: 4.0.16
+ - Check `package.json` files for exact versions
+
+### Phase 2: Deep Dive Analysis (5 Agents)
+
+Launch 5 separate deep dive agents, each with a specific focus area. Each agent should be invoked with the `@deepdive` agent and given the git diff along with their specific instructions.
+
+#### Agent 1: Tech Stack Validation (HIGHEST PRIORITY)
+
+**Focus:** Verify code is valid for the tech stack
+
+**Instructions for Agent 1:**
+
+```
+Analyze the git diff for invalid code based on the tech stack:
+
+1. **TypeScript/JavaScript Syntax**
+ - Check for valid TypeScript syntax (no invalid type annotations, correct import/export syntax)
+ - Verify Node.js API usage is compatible with Node.js >=22.0.0 <23.0.0
+ - Check for deprecated APIs or features not available in the Node.js version
+ - Verify ES module syntax (type: "module" in package.json)
+
+2. **React 19.2.3 Compatibility**
+ - Check for deprecated React APIs or patterns
+ - Verify hooks usage is correct for React 19
+ - Check for invalid JSX syntax
+ - Verify component patterns match React 19 conventions
+
+3. **Express 5.2.1 Compatibility**
+ - Check for deprecated Express APIs
+ - Verify middleware usage is correct for Express 5
+ - Check request/response handling patterns
+
+4. **Type Safety**
+ - Verify TypeScript types are correctly used
+ - Check for `any` types that should be properly typed
+ - Verify type imports/exports are correct
+ - Check for missing type definitions
+
+5. **Build System Compatibility**
+ - Verify Vite-specific code (imports, config) is valid
+ - Check Electron-specific APIs are used correctly
+ - Verify module resolution paths are correct
+
+6. **Package Dependencies**
+ - Check for imports from packages not in package.json
+ - Verify version compatibility between dependencies
+ - Check for circular dependencies
+
+Provide a detailed report with:
+- File paths and line numbers of invalid code
+- Specific error description (what's wrong and why)
+- Expected vs actual behavior
+- Priority level (CRITICAL for build-breaking issues)
+```
+
+#### Agent 2: Security Vulnerability Scanner
+
+**Focus:** Security issues and vulnerabilities
+
+**Instructions for Agent 2:**
+
+```
+Analyze the git diff for security vulnerabilities:
+
+1. **Injection Vulnerabilities**
+ - SQL injection (if applicable)
+ - Command injection (exec, spawn, etc.)
+ - Path traversal vulnerabilities
+ - XSS vulnerabilities in React components
+
+2. **Authentication & Authorization**
+ - Missing authentication checks
+ - Insecure token handling
+ - Authorization bypasses
+ - Session management issues
+
+3. **Data Handling**
+ - Unsafe deserialization
+ - Insecure file operations
+ - Missing input validation
+ - Sensitive data exposure (secrets, tokens, passwords)
+
+4. **Dependencies**
+ - Known vulnerable packages
+ - Insecure dependency versions
+ - Missing security patches
+
+5. **API Security**
+ - Missing CORS configuration
+ - Insecure API endpoints
+ - Missing rate limiting
+ - Insecure WebSocket connections
+
+6. **Electron-Specific**
+ - Insecure IPC communication
+ - Missing context isolation checks
+ - Insecure preload scripts
+ - Missing CSP headers
+
+Provide a detailed report with:
+- Vulnerability type and severity (CRITICAL, HIGH, MEDIUM, LOW)
+- File paths and line numbers
+- Attack vector description
+- Recommended fix approach
+```
+
+#### Agent 3: Code Quality & Clean Code
+
+**Focus:** Dirty code, code smells, and quality issues
+
+**Instructions for Agent 3:**
+
+```
+Analyze the git diff for code quality issues:
+
+1. **Code Smells**
+ - Long functions/methods (>50 lines)
+ - High cyclomatic complexity
+ - Duplicate code
+ - Dead code
+ - Magic numbers/strings
+
+2. **Best Practices**
+ - Missing error handling
+ - Inconsistent naming conventions
+ - Poor separation of concerns
+ - Tight coupling
+ - Missing comments for complex logic
+
+3. **Performance Issues**
+ - Inefficient algorithms
+ - Memory leaks (event listeners, subscriptions)
+ - Unnecessary re-renders in React
+ - Missing memoization where needed
+ - Inefficient database queries (if applicable)
+
+4. **Maintainability**
+ - Hard-coded values
+ - Missing type definitions
+ - Inconsistent code style
+ - Poor file organization
+ - Missing tests for new code
+
+5. **React-Specific**
+ - Missing key props in lists
+ - Direct state mutations
+ - Missing cleanup in useEffect
+ - Unnecessary useState/useEffect
+ - Prop drilling issues
+
+Provide a detailed report with:
+- Issue type and severity
+- File paths and line numbers
+- Description of the problem
+- Impact on maintainability/performance
+- Recommended refactoring approach
+```
+
+#### Agent 4: Implementation Correctness
+
+**Focus:** Verify code implements requirements correctly
+
+**Instructions for Agent 4:**
+
+```
+Analyze the git diff for implementation correctness:
+
+1. **Logic Errors**
+ - Incorrect conditional logic
+ - Wrong variable usage
+ - Off-by-one errors
+ - Race conditions
+ - Missing null/undefined checks
+
+2. **Functional Requirements**
+ - Missing features from requirements
+ - Incorrect feature implementation
+ - Edge cases not handled
+ - Missing validation
+
+3. **Integration Issues**
+ - Incorrect API usage
+ - Wrong data format handling
+ - Missing error handling for external calls
+ - Incorrect state management
+
+4. **Type Errors**
+ - Type mismatches
+ - Missing type guards
+ - Incorrect type assertions
+ - Unsafe type operations
+
+5. **Testing Gaps**
+ - Missing unit tests
+ - Missing integration tests
+ - Tests don't cover edge cases
+ - Tests are incorrect
+
+Provide a detailed report with:
+- Issue description
+- File paths and line numbers
+- Expected vs actual behavior
+- Steps to reproduce (if applicable)
+- Recommended fix
+```
+
+#### Agent 5: Architecture & Design Patterns
+
+**Focus:** Architectural issues and design pattern violations
+
+**Instructions for Agent 5:**
+
+```
+Analyze the git diff for architectural and design issues:
+
+1. **Architecture Violations**
+ - Violation of project structure patterns
+ - Incorrect layer separation
+ - Missing abstractions
+ - Tight coupling between modules
+
+2. **Design Patterns**
+ - Incorrect pattern usage
+ - Missing patterns where needed
+ - Anti-patterns
+
+3. **Project-Specific Patterns**
+ - Check against project documentation (docs/ folder)
+ - Verify route organization (server routes)
+ - Check provider patterns (server providers)
+ - Verify component organization (UI components)
+
+4. **API Design**
+ - RESTful API violations
+ - Inconsistent response formats
+ - Missing error handling
+ - Incorrect status codes
+
+5. **State Management**
+ - Incorrect state management patterns
+ - Missing state normalization
+ - Inefficient state updates
+
+Provide a detailed report with:
+- Architectural issue description
+- File paths and affected areas
+- Impact on system design
+- Recommended architectural changes
+```
+
+### Phase 3: Consolidate Findings
+
+After all 5 deep dive agents complete their analysis:
+
+1. **Collect all findings** from each agent
+2. **Prioritize issues**:
+ - CRITICAL: Tech stack invalid code (build-breaking)
+ - HIGH: Security vulnerabilities, critical logic errors
+ - MEDIUM: Code quality issues, architectural problems
+ - LOW: Minor code smells, style issues
+
+3. **Group by file** to understand impact per file
+4. **Create a master report** summarizing all findings
+
+### Phase 4: Deepcode Fixes (5 Agents)
+
+Launch 5 deepcode agents to fix the issues found. Each agent should be invoked with the `@deepcode` agent.
+
+#### Deepcode Agent 1: Fix Tech Stack Invalid Code
+
+**Priority:** CRITICAL - Fix first
+
+**Instructions:**
+
+```
+Fix all invalid code based on tech stack issues identified by Agent 1.
+
+Focus on:
+1. Fixing TypeScript syntax errors
+2. Updating deprecated Node.js APIs
+3. Fixing React 19 compatibility issues
+4. Correcting Express 5 API usage
+5. Fixing type errors
+6. Resolving build-breaking issues
+
+After fixes, verify:
+- Code compiles without errors
+- TypeScript types are correct
+- No deprecated API usage
+```
+
+#### Deepcode Agent 2: Fix Security Vulnerabilities
+
+**Priority:** HIGH
+
+**Instructions:**
+
+```
+Fix all security vulnerabilities identified by Agent 2.
+
+Focus on:
+1. Adding input validation
+2. Fixing injection vulnerabilities
+3. Securing authentication/authorization
+4. Fixing insecure data handling
+5. Updating vulnerable dependencies
+6. Securing Electron IPC
+
+After fixes, verify:
+- Security vulnerabilities are addressed
+- No sensitive data exposure
+- Proper authentication/authorization
+```
+
+#### Deepcode Agent 3: Refactor Dirty Code
+
+**Priority:** MEDIUM
+
+**Instructions:**
+
+```
+Refactor code quality issues identified by Agent 3.
+
+Focus on:
+1. Extracting long functions
+2. Reducing complexity
+3. Removing duplicate code
+4. Adding error handling
+5. Improving React component structure
+6. Adding missing comments
+
+After fixes, verify:
+- Code follows best practices
+- No code smells remain
+- Performance optimizations applied
+```
+
+#### Deepcode Agent 4: Fix Implementation Errors
+
+**Priority:** HIGH
+
+**Instructions:**
+
+```
+Fix implementation correctness issues identified by Agent 4.
+
+Focus on:
+1. Fixing logic errors
+2. Adding missing features
+3. Handling edge cases
+4. Fixing type errors
+5. Adding missing tests
+
+After fixes, verify:
+- Logic is correct
+- Edge cases handled
+- Tests pass
+```
+
+#### Deepcode Agent 5: Fix Architectural Issues
+
+**Priority:** MEDIUM
+
+**Instructions:**
+
+```
+Fix architectural issues identified by Agent 5.
+
+Focus on:
+1. Correcting architecture violations
+2. Applying proper design patterns
+3. Fixing API design issues
+4. Improving state management
+5. Following project patterns
+
+After fixes, verify:
+- Architecture is sound
+- Patterns are correctly applied
+- Code follows project structure
+```
+
+### Phase 5: Verification
+
+After all fixes are complete:
+
+1. **Run TypeScript compilation check**
+
+ ```bash
+ npm run build:packages
+ ```
+
+2. **Run linting**
+
+ ```bash
+ npm run lint
+ ```
+
+3. **Run tests** (if applicable)
+
+ ```bash
+ npm run test:server
+ npm run test
+ ```
+
+4. **Verify git diff** shows only intended changes
+
+ ```bash
+ git diff HEAD
+ ```
+
+5. **Create summary report**:
+ - Issues found by each agent
+ - Issues fixed by each agent
+ - Remaining issues (if any)
+ - Verification results
+
+## Workflow Summary
+
+1. ✅ Get git diff
+2. ✅ Launch 5 deep dive agents (parallel analysis)
+3. ✅ Consolidate findings and prioritize
+4. ✅ Launch 5 deepcode agents (sequential fixes, priority order)
+5. ✅ Verify fixes with build/lint/test
+6. ✅ Report summary
+
+## Notes
+
+- **Tech stack validation is HIGHEST PRIORITY** - invalid code must be fixed first
+- Each deep dive agent should work independently and provide comprehensive analysis
+- Deepcode agents should fix issues in priority order
+- All fixes should maintain existing functionality
+- If an agent finds no issues in their domain, they should report "No issues found"
+- If fixes introduce new issues, they should be caught in verification phase
diff --git a/.claude/commands/thorough.md b/.claude/commands/thorough.md
new file mode 100644
index 00000000..c69ada0f
--- /dev/null
+++ b/.claude/commands/thorough.md
@@ -0,0 +1,45 @@
+When you think you are done, you are NOT done.
+
+You must run a mandatory 3-pass verification before concluding:
+
+## Pass 1: Correctness & Functionality
+
+- [ ] Verify logic matches requirements and specifications
+- [ ] Check type safety (TypeScript types are correct and complete)
+- [ ] Ensure imports are correct and follow project conventions
+- [ ] Verify all functions/classes work as intended
+- [ ] Check that return values and side effects are correct
+- [ ] Run relevant tests if they exist, or verify testability
+- [ ] Confirm integration with existing code works properly
+
+## Pass 2: Edge Cases & Safety
+
+- [ ] Handle null/undefined inputs gracefully
+- [ ] Validate all user inputs and external data
+- [ ] Check error handling (try/catch, error boundaries, etc.)
+- [ ] Verify security considerations (no sensitive data exposure, proper auth checks)
+- [ ] Test boundary conditions (empty arrays, zero values, max lengths, etc.)
+- [ ] Ensure resource cleanup (file handles, connections, timers)
+- [ ] Check for potential race conditions or async issues
+- [ ] Verify file path security (no directory traversal vulnerabilities)
+
+## Pass 3: Maintainability & Code Quality
+
+- [ ] Code follows project style guide and conventions
+- [ ] Functions/classes are single-purpose and well-named
+- [ ] Remove dead code, unused imports, and console.logs
+- [ ] Extract magic numbers/strings into named constants
+- [ ] Check for code duplication (DRY principle)
+- [ ] Verify appropriate abstraction levels (not over/under-engineered)
+- [ ] Add necessary comments for complex logic
+- [ ] Ensure consistent error messages and logging
+- [ ] Check that code is readable and self-documenting
+- [ ] Verify proper separation of concerns
+
+**For each pass, explicitly report:**
+
+- What you checked
+- Any issues found and how they were fixed
+- Any remaining concerns or trade-offs
+
+Only after completing all three passes with explicit findings may you conclude the work is done.
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 00000000..8163526b
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,19 @@
+# Dependencies
+node_modules/
+**/node_modules/
+
+# Build outputs
+dist/
+**/dist/
+dist-electron/
+**/dist-electron/
+build/
+**/build/
+.next/
+**/.next/
+.nuxt/
+**/.nuxt/
+out/
+**/out/
+.cache/
+**/.cache/
\ No newline at end of file
diff --git a/.github/workflows/e2e-tests.yml b/.github/workflows/e2e-tests.yml
index a4064bda..917672b5 100644
--- a/.github/workflows/e2e-tests.yml
+++ b/.github/workflows/e2e-tests.yml
@@ -31,24 +31,99 @@ jobs:
- name: Build server
run: npm run build --workspace=apps/server
+ - name: Set up Git user
+ run: |
+ git config --global user.name "GitHub CI"
+ git config --global user.email "ci@example.com"
+
- name: Start backend server
- run: npm run start --workspace=apps/server &
+ run: |
+ echo "Starting backend server..."
+ # Start server in background and save PID
+ npm run start --workspace=apps/server > backend.log 2>&1 &
+ SERVER_PID=$!
+ echo "Server started with PID: $SERVER_PID"
+ echo "SERVER_PID=$SERVER_PID" >> $GITHUB_ENV
+
env:
PORT: 3008
NODE_ENV: test
+ # Use a deterministic API key so Playwright can log in reliably
+ AUTOMAKER_API_KEY: test-api-key-for-e2e-tests
+ # Reduce log noise in CI
+ AUTOMAKER_HIDE_API_KEY: 'true'
+ # Avoid real API calls during CI
+ AUTOMAKER_MOCK_AGENT: 'true'
+ # Simulate containerized environment to skip sandbox confirmation dialogs
+ IS_CONTAINERIZED: 'true'
- name: Wait for backend server
run: |
echo "Waiting for backend server to be ready..."
- for i in {1..30}; do
- if curl -s http://localhost:3008/api/health > /dev/null 2>&1; then
+
+ # Check if server process is running
+ if [ -z "$SERVER_PID" ]; then
+ echo "ERROR: Server PID not found in environment"
+ cat backend.log 2>/dev/null || echo "No backend log found"
+ exit 1
+ fi
+
+ # Check if process is actually running
+ if ! kill -0 $SERVER_PID 2>/dev/null; then
+ echo "ERROR: Server process $SERVER_PID is not running!"
+ echo "=== Backend logs ==="
+ cat backend.log
+ echo ""
+ echo "=== Recent system logs ==="
+ dmesg 2>/dev/null | tail -20 || echo "No dmesg available"
+ exit 1
+ fi
+
+ # Wait for health endpoint
+ for i in {1..60}; do
+ if curl -s -f http://localhost:3008/api/health > /dev/null 2>&1; then
echo "Backend server is ready!"
+ echo "=== Backend logs ==="
+ cat backend.log
+ echo ""
+ echo "Health check response:"
+ curl -s http://localhost:3008/api/health | jq . 2>/dev/null || echo "Health check: $(curl -s http://localhost:3008/api/health 2>/dev/null || echo 'No response')"
exit 0
fi
- echo "Waiting... ($i/30)"
+
+ # Check if server process is still running
+ if ! kill -0 $SERVER_PID 2>/dev/null; then
+ echo "ERROR: Server process died during wait!"
+ echo "=== Backend logs ==="
+ cat backend.log
+ exit 1
+ fi
+
+ echo "Waiting... ($i/60)"
sleep 1
done
- echo "Backend server failed to start!"
+
+ echo "ERROR: Backend server failed to start within 60 seconds!"
+ echo "=== Backend logs ==="
+ cat backend.log
+ echo ""
+ echo "=== Process status ==="
+ ps aux | grep -E "(node|tsx)" | grep -v grep || echo "No node processes found"
+ echo ""
+ echo "=== Port status ==="
+ netstat -tlnp 2>/dev/null | grep :3008 || echo "Port 3008 not listening"
+ lsof -i :3008 2>/dev/null || echo "lsof not available or port not in use"
+ echo ""
+ echo "=== Health endpoint test ==="
+ curl -v http://localhost:3008/api/health 2>&1 || echo "Health endpoint failed"
+
+ # Kill the server process if it's still hanging
+ if kill -0 $SERVER_PID 2>/dev/null; then
+ echo ""
+ echo "Killing stuck server process..."
+ kill -9 $SERVER_PID 2>/dev/null || true
+ fi
+
exit 1
- name: Run E2E tests
@@ -59,6 +134,20 @@ jobs:
CI: true
VITE_SERVER_URL: http://localhost:3008
VITE_SKIP_SETUP: 'true'
+ # Keep UI-side login/defaults consistent
+ AUTOMAKER_API_KEY: test-api-key-for-e2e-tests
+
+ - name: Print backend logs on failure
+ if: failure()
+ run: |
+ echo "=== E2E Tests Failed - Backend Logs ==="
+ cat backend.log 2>/dev/null || echo "No backend log found"
+ echo ""
+ echo "=== Process status at failure ==="
+ ps aux | grep -E "(node|tsx)" | grep -v grep || echo "No node processes found"
+ echo ""
+ echo "=== Port status ==="
+ netstat -tlnp 2>/dev/null | grep :3008 || echo "Port 3008 not listening"
- name: Upload Playwright report
uses: actions/upload-artifact@v4
@@ -68,10 +157,22 @@ jobs:
path: apps/ui/playwright-report/
retention-days: 7
- - name: Upload test results
+ - name: Upload test results (screenshots, traces, videos)
uses: actions/upload-artifact@v4
- if: failure()
+ if: always()
with:
name: test-results
- path: apps/ui/test-results/
+ path: |
+ apps/ui/test-results/
retention-days: 7
+ if-no-files-found: ignore
+
+ - name: Cleanup - Kill backend server
+ if: always()
+ run: |
+ if [ -n "$SERVER_PID" ]; then
+ echo "Cleaning up backend server (PID: $SERVER_PID)..."
+ kill $SERVER_PID 2>/dev/null || true
+ kill -9 $SERVER_PID 2>/dev/null || true
+ echo "Backend server cleanup complete"
+ fi
diff --git a/.github/workflows/security-audit.yml b/.github/workflows/security-audit.yml
index 1a867179..7da30c5d 100644
--- a/.github/workflows/security-audit.yml
+++ b/.github/workflows/security-audit.yml
@@ -26,5 +26,5 @@ jobs:
check-lockfile: 'true'
- name: Run npm audit
- run: npm audit --audit-level=moderate
+ run: npm audit --audit-level=critical
continue-on-error: false
diff --git a/.gitignore b/.gitignore
index 48470efe..be8843e0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -73,6 +73,9 @@ blob-report/
!.env.example
!.env.local.example
+# Codex config (contains API keys)
+.codex/config.toml
+
# TypeScript
*.tsbuildinfo
@@ -81,6 +84,15 @@ blob-report/
docker-compose.override.yml
.claude/docker-compose.override.yml
+.claude/hans/
pnpm-lock.yaml
-yarn.lock
\ No newline at end of file
+yarn.lock
+
+# Fork-specific workflow files (should never be committed)
+DEVELOPMENT_WORKFLOW.md
+check-sync.sh
+# API key files
+data/.api-key
+data/credentials.json
+data/
diff --git a/.husky/pre-commit b/.husky/pre-commit
index 2312dc58..276c2fa0 100755
--- a/.husky/pre-commit
+++ b/.husky/pre-commit
@@ -1 +1,46 @@
-npx lint-staged
+#!/usr/bin/env sh
+
+# Try to load nvm if available (optional - works without it too)
+if [ -z "$NVM_DIR" ]; then
+ # Check for Herd's nvm first (macOS with Herd)
+ if [ -s "$HOME/Library/Application Support/Herd/config/nvm/nvm.sh" ]; then
+ export NVM_DIR="$HOME/Library/Application Support/Herd/config/nvm"
+ # Then check standard nvm location
+ elif [ -s "$HOME/.nvm/nvm.sh" ]; then
+ export NVM_DIR="$HOME/.nvm"
+ fi
+fi
+
+# Source nvm if found (silently skip if not available)
+[ -n "$NVM_DIR" ] && [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" 2>/dev/null
+
+# Load node version from .nvmrc if using nvm (silently skip if nvm not available or fails)
+if [ -f .nvmrc ] && command -v nvm >/dev/null 2>&1; then
+ # Check if Unix nvm was sourced (it's a shell function with NVM_DIR set)
+ if [ -n "$NVM_DIR" ] && type nvm 2>/dev/null | grep -q "function"; then
+ # Unix nvm: reads .nvmrc automatically
+ nvm use >/dev/null 2>&1 || true
+ else
+ # nvm-windows: needs explicit version from .nvmrc
+ NODE_VERSION=$(cat .nvmrc | tr -d '[:space:]')
+ if [ -n "$NODE_VERSION" ]; then
+ nvm use "$NODE_VERSION" >/dev/null 2>&1 || true
+ fi
+ fi
+fi
+
+# Ensure common system paths are in PATH (for systems without nvm)
+# This helps find node/npm installed via Homebrew, system packages, etc.
+export PATH="$PATH:/usr/local/bin:/opt/homebrew/bin:/usr/bin"
+
+# Run lint-staged - works with or without nvm
+# Prefer npx, fallback to npm exec, both work with system-installed Node.js
+if command -v npx >/dev/null 2>&1; then
+ npx lint-staged
+elif command -v npm >/dev/null 2>&1; then
+ npm exec -- lint-staged
+else
+ echo "Error: Neither npx nor npm found in PATH."
+ echo "Please ensure Node.js is installed (via nvm, Homebrew, system package manager, etc.)"
+ exit 1
+fi
diff --git a/.prettierignore b/.prettierignore
index 9b4929d1..50ff1306 100644
--- a/.prettierignore
+++ b/.prettierignore
@@ -23,6 +23,8 @@ pnpm-lock.yaml
# Generated files
*.min.js
*.min.css
+routeTree.gen.ts
+apps/ui/src/routeTree.gen.ts
# Test artifacts
test-results/
diff --git a/Dockerfile b/Dockerfile
index 3f110451..c32b1764 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -8,10 +8,12 @@
# =============================================================================
# BASE STAGE - Common setup for all builds (DRY: defined once, used by all)
# =============================================================================
-FROM node:22-alpine AS base
+FROM node:22-slim AS base
# Install build dependencies for native modules (node-pty)
-RUN apk add --no-cache python3 make g++
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ python3 make g++ \
+ && rm -rf /var/lib/apt/lists/*
WORKDIR /app
@@ -51,30 +53,63 @@ RUN npm run build:packages && npm run build --workspace=apps/server
# =============================================================================
# SERVER PRODUCTION STAGE
# =============================================================================
-FROM node:22-alpine AS server
+FROM node:22-slim AS server
-# Install git, curl, bash (for terminal), and GitHub CLI (pinned version, multi-arch)
-RUN apk add --no-cache git curl bash && \
- GH_VERSION="2.63.2" && \
- ARCH=$(uname -m) && \
- case "$ARCH" in \
+# Build argument for tracking which commit this image was built from
+ARG GIT_COMMIT_SHA=unknown
+LABEL automaker.git.commit.sha="${GIT_COMMIT_SHA}"
+
+# Install git, curl, bash (for terminal), gosu (for user switching), and GitHub CLI (pinned version, multi-arch)
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ git curl bash gosu ca-certificates openssh-client \
+ && GH_VERSION="2.63.2" \
+ && ARCH=$(uname -m) \
+ && case "$ARCH" in \
x86_64) GH_ARCH="amd64" ;; \
aarch64|arm64) GH_ARCH="arm64" ;; \
*) echo "Unsupported architecture: $ARCH" && exit 1 ;; \
- esac && \
- curl -L "https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${GH_ARCH}.tar.gz" -o gh.tar.gz && \
- tar -xzf gh.tar.gz && \
- mv gh_${GH_VERSION}_linux_${GH_ARCH}/bin/gh /usr/local/bin/gh && \
- rm -rf gh.tar.gz gh_${GH_VERSION}_linux_${GH_ARCH}
+ esac \
+ && curl -L "https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${GH_ARCH}.tar.gz" -o gh.tar.gz \
+ && tar -xzf gh.tar.gz \
+ && mv gh_${GH_VERSION}_linux_${GH_ARCH}/bin/gh /usr/local/bin/gh \
+ && rm -rf gh.tar.gz gh_${GH_VERSION}_linux_${GH_ARCH} \
+ && rm -rf /var/lib/apt/lists/*
-# Install Claude CLI globally
+# Install Claude CLI globally (available to all users via npm global bin)
RUN npm install -g @anthropic-ai/claude-code
-WORKDIR /app
+# Create non-root user with home directory BEFORE installing Cursor CLI
+RUN groupadd -g 1001 automaker && \
+ useradd -u 1001 -g automaker -m -d /home/automaker -s /bin/bash automaker && \
+ mkdir -p /home/automaker/.local/bin && \
+ mkdir -p /home/automaker/.cursor && \
+ chown -R automaker:automaker /home/automaker && \
+ chmod 700 /home/automaker/.cursor
-# Create non-root user
-RUN addgroup -g 1001 -S automaker && \
- adduser -S automaker -u 1001
+# Install Cursor CLI as the automaker user
+# Set HOME explicitly and install to /home/automaker/.local/bin/
+USER automaker
+ENV HOME=/home/automaker
+RUN curl https://cursor.com/install -fsS | bash && \
+ echo "=== Checking Cursor CLI installation ===" && \
+ ls -la /home/automaker/.local/bin/ && \
+ echo "=== PATH is: $PATH ===" && \
+ (which cursor-agent && cursor-agent --version) || echo "cursor-agent installed (may need auth setup)"
+USER root
+
+# Add PATH to profile so it's available in all interactive shells (for login shells)
+RUN mkdir -p /etc/profile.d && \
+ echo 'export PATH="/home/automaker/.local/bin:$PATH"' > /etc/profile.d/cursor-cli.sh && \
+ chmod +x /etc/profile.d/cursor-cli.sh
+
+# Add to automaker's .bashrc for bash interactive shells
+RUN echo 'export PATH="/home/automaker/.local/bin:$PATH"' >> /home/automaker/.bashrc && \
+ chown automaker:automaker /home/automaker/.bashrc
+
+# Also add to root's .bashrc since docker exec defaults to root
+RUN echo 'export PATH="/home/automaker/.local/bin:$PATH"' >> /root/.bashrc
+
+WORKDIR /app
# Copy root package.json (needed for workspace resolution)
COPY --from=server-builder /app/package*.json ./
@@ -98,12 +133,19 @@ RUN git config --system --add safe.directory '*' && \
# Use gh as credential helper (works with GH_TOKEN env var)
git config --system credential.helper '!gh auth git-credential'
-# Switch to non-root user
-USER automaker
+# Copy entrypoint script for fixing permissions on mounted volumes
+COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
+RUN chmod +x /usr/local/bin/docker-entrypoint.sh
+
+# Note: We stay as root here so entrypoint can fix permissions
+# The entrypoint script will switch to automaker user before running the command
# Environment variables
ENV PORT=3008
ENV DATA_DIR=/data
+ENV HOME=/home/automaker
+# Add user's local bin to PATH for cursor-agent
+ENV PATH="/home/automaker/.local/bin:${PATH}"
# Expose port
EXPOSE 3008
@@ -112,6 +154,9 @@ EXPOSE 3008
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD curl -f http://localhost:3008/api/health || exit 1
+# Use entrypoint to fix permissions before starting
+ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
+
# Start server
CMD ["node", "apps/server/dist/index.js"]
@@ -143,6 +188,10 @@ RUN npm run build:packages && npm run build --workspace=apps/ui
# =============================================================================
FROM nginx:alpine AS ui
+# Build argument for tracking which commit this image was built from
+ARG GIT_COMMIT_SHA=unknown
+LABEL automaker.git.commit.sha="${GIT_COMMIT_SHA}"
+
# Copy built files
COPY --from=ui-builder /app/apps/ui/dist /usr/share/nginx/html
diff --git a/Dockerfile.dev b/Dockerfile.dev
new file mode 100644
index 00000000..87ac6bf6
--- /dev/null
+++ b/Dockerfile.dev
@@ -0,0 +1,80 @@
+# Automaker Development Dockerfile
+# For development with live reload via volume mounting
+# Source code is NOT copied - it's mounted as a volume
+#
+# Usage:
+# docker compose -f docker-compose.dev.yml up
+
+FROM node:22-slim
+
+# Install build dependencies for native modules (node-pty) and runtime tools
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ python3 make g++ \
+ git curl bash gosu ca-certificates openssh-client \
+ && GH_VERSION="2.63.2" \
+ && ARCH=$(uname -m) \
+ && case "$ARCH" in \
+ x86_64) GH_ARCH="amd64" ;; \
+ aarch64|arm64) GH_ARCH="arm64" ;; \
+ *) echo "Unsupported architecture: $ARCH" && exit 1 ;; \
+ esac \
+ && curl -L "https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${GH_ARCH}.tar.gz" -o gh.tar.gz \
+ && tar -xzf gh.tar.gz \
+ && mv gh_${GH_VERSION}_linux_${GH_ARCH}/bin/gh /usr/local/bin/gh \
+ && rm -rf gh.tar.gz gh_${GH_VERSION}_linux_${GH_ARCH} \
+ && rm -rf /var/lib/apt/lists/*
+
+# Install Claude CLI globally
+RUN npm install -g @anthropic-ai/claude-code
+
+# Create non-root user
+RUN groupadd -g 1001 automaker && \
+ useradd -u 1001 -g automaker -m -d /home/automaker -s /bin/bash automaker && \
+ mkdir -p /home/automaker/.local/bin && \
+ mkdir -p /home/automaker/.cursor && \
+ chown -R automaker:automaker /home/automaker && \
+ chmod 700 /home/automaker/.cursor
+
+# Install Cursor CLI as automaker user
+USER automaker
+ENV HOME=/home/automaker
+RUN curl https://cursor.com/install -fsS | bash || true
+USER root
+
+# Add PATH to profile for Cursor CLI
+RUN mkdir -p /etc/profile.d && \
+ echo 'export PATH="/home/automaker/.local/bin:$PATH"' > /etc/profile.d/cursor-cli.sh && \
+ chmod +x /etc/profile.d/cursor-cli.sh
+
+# Add to user bashrc files
+RUN echo 'export PATH="/home/automaker/.local/bin:$PATH"' >> /home/automaker/.bashrc && \
+ chown automaker:automaker /home/automaker/.bashrc
+RUN echo 'export PATH="/home/automaker/.local/bin:$PATH"' >> /root/.bashrc
+
+WORKDIR /app
+
+# Create directories with proper permissions
+RUN mkdir -p /data /projects && chown automaker:automaker /data /projects
+
+# Configure git for mounted volumes
+RUN git config --system --add safe.directory '*' && \
+ git config --system credential.helper '!gh auth git-credential'
+
+# Copy entrypoint script
+COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
+RUN chmod +x /usr/local/bin/docker-entrypoint.sh
+
+# Environment variables
+ENV PORT=3008
+ENV DATA_DIR=/data
+ENV HOME=/home/automaker
+ENV PATH="/home/automaker/.local/bin:${PATH}"
+
+# Expose both dev ports
+EXPOSE 3007 3008
+
+# Use entrypoint for permission handling
+ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
+
+# Default command - will be overridden by docker-compose
+CMD ["npm", "run", "dev:web"]
diff --git a/README.md b/README.md
index c8e1b84e..8bfd2a0a 100644
--- a/README.md
+++ b/README.md
@@ -117,32 +117,32 @@ cd automaker
# 2. Install dependencies
npm install
-# 3. Build shared packages (Now can be skipped npm install / run dev does it automaticly)
+# 3. Build shared packages (can be skipped - npm run dev does it automatically)
npm run build:packages
-# 4. Set up authentication (skip if using Claude Code CLI)
-# If using Claude Code CLI: credentials are detected automatically
-# If using API key directly, choose one method:
-
-# Option A: Environment variable
-export ANTHROPIC_API_KEY="sk-ant-..."
-
-# Option B: Create .env file in project root
-echo "ANTHROPIC_API_KEY=sk-ant-..." > .env
-
-# 5. Start Automaker (interactive launcher)
+# 4. Start Automaker
npm run dev
# Choose between:
# 1. Web Application (browser at localhost:3007)
# 2. Desktop Application (Electron - recommended)
```
-**Note:** The `npm run dev` command will:
+**Authentication Setup:** On first run, Automaker will automatically show a setup wizard where you can configure authentication. You can choose to:
-- Check for dependencies and install if needed
-- Install Playwright browsers for E2E tests
-- Kill any processes on ports 3007/3008
-- Present an interactive menu to choose your run mode
+- Use **Claude Code CLI** (recommended) - Automaker will detect your CLI credentials automatically
+- Enter an **API key** directly in the wizard
+
+If you prefer to set up authentication before running (e.g., for headless deployments or CI/CD), you can set it manually:
+
+```bash
+# Option A: Environment variable
+export ANTHROPIC_API_KEY="sk-ant-..."
+
+# Option B: Create .env file in project root
+echo "ANTHROPIC_API_KEY=sk-ant-..." > .env
+```
+
+**For Development:** `npm run dev` starts the development server with Vite live reload and hot module replacement for fast refresh and instant updates as you make changes.
## How to Run
@@ -186,9 +186,6 @@ npm run dev:web
```bash
# Build for web deployment (uses Vite)
npm run build
-
-# Run production build
-npm run start
```
#### Desktop Application
diff --git a/TODO.md b/TODO.md
new file mode 100644
index 00000000..3771806b
--- /dev/null
+++ b/TODO.md
@@ -0,0 +1,17 @@
+# Bugs
+
+- Setting the default model does not seem like it works.
+
+# UX
+
+- Consolidate all models to a single place in the settings instead of having AI profiles and all this other stuff
+- Simplify the create feature modal. It should just be one page. I don't need nessa tabs and all these nested buttons. It's too complex.
+- added to do's list checkbox directly into the card so as it's going through if there's any to do items we can see those update live
+- When the feature is done, I want to see a summary of the LLM. That's the first thing I should see when I double click the card.
+- I went away to mass edit all my features. For example, when I created a new project, it added auto testing on every single feature card. Now I have to manually go through one by one and change those. Have a way to mass edit those, the configuration of all them.
+- Double check and debug if there's memory leaks. It seems like the memory of automaker grows like 3 gigabytes. It's 5gb right now and I'm running three different cursor cli features implementing at the same time.
+- Typing in the text area of the plan mode was super laggy.
+- When I have a bunch of features running at the same time, it seems like I cannot edit the features in the backlog. Like they don't persist their file changes and I think this is because of the secure FS file has an internal queue to prevent hitting that file open write limit. We may have to reconsider refactoring away from file system and do Postgres or SQLite or something.
+- modals are not scrollable if height of the screen is small enough
+- and the Agent Runner add an archival button for the new sessions.
+- investigate a potential issue with the feature cards not refreshing. I see a lock icon on the feature card But it doesn't go away until I open the card and edit it and I turn the testing mode off. I think there's like a refresh sync issue.
diff --git a/apps/server/.env.example b/apps/server/.env.example
index 3afb5d4e..68b28395 100644
--- a/apps/server/.env.example
+++ b/apps/server/.env.example
@@ -8,6 +8,20 @@
# Your Anthropic API key for Claude models
ANTHROPIC_API_KEY=sk-ant-...
+# ============================================
+# OPTIONAL - Additional API Keys
+# ============================================
+
+# OpenAI API key for Codex/GPT models
+OPENAI_API_KEY=sk-...
+
+# Cursor API key for Cursor models
+CURSOR_API_KEY=...
+
+# OAuth credentials for CLI authentication (extracted automatically)
+CLAUDE_OAUTH_CREDENTIALS=
+CURSOR_AUTH_TOKEN=
+
# ============================================
# OPTIONAL - Security
# ============================================
@@ -48,3 +62,15 @@ TERMINAL_ENABLED=true
TERMINAL_PASSWORD=
ENABLE_REQUEST_LOGGING=false
+
+# ============================================
+# OPTIONAL - Debugging
+# ============================================
+
+# Enable raw output logging for agent streams (default: false)
+# When enabled, saves unprocessed stream events to raw-output.jsonl
+# in each feature's directory (.automaker/features/{id}/raw-output.jsonl)
+# Useful for debugging provider streaming issues, improving log parsing,
+# or analyzing how different providers (Claude, Cursor) stream responses
+# Note: This adds disk I/O overhead, only enable when debugging
+AUTOMAKER_DEBUG_RAW_OUTPUT=false
diff --git a/apps/server/package.json b/apps/server/package.json
index 65f1222e..23e6a2a9 100644
--- a/apps/server/package.json
+++ b/apps/server/package.json
@@ -1,6 +1,6 @@
{
"name": "@automaker/server",
- "version": "0.7.3",
+ "version": "0.10.0",
"description": "Backend server for Automaker - provides API for both web and Electron modes",
"author": "AutoMaker Team",
"license": "SEE LICENSE IN LICENSE",
@@ -32,7 +32,8 @@
"@automaker/prompts": "1.0.0",
"@automaker/types": "1.0.0",
"@automaker/utils": "1.0.0",
- "@modelcontextprotocol/sdk": "1.25.1",
+ "@modelcontextprotocol/sdk": "1.25.2",
+ "@openai/codex-sdk": "^0.77.0",
"cookie-parser": "1.4.7",
"cors": "2.8.5",
"dotenv": "17.2.3",
diff --git a/apps/server/src/index.ts b/apps/server/src/index.ts
index 0f97255f..f763c08d 100644
--- a/apps/server/src/index.ts
+++ b/apps/server/src/index.ts
@@ -17,6 +17,9 @@ import dotenv from 'dotenv';
import { createEventEmitter, type EventEmitter } from './lib/events.js';
import { initAllowedPaths } from '@automaker/platform';
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('Server');
import { authMiddleware, validateWsConnectionToken, checkRawAuthentication } from './lib/auth.js';
import { requireJsonContentType } from './middleware/require-json-content-type.js';
import { createAuthRoutes } from './routes/auth/index.js';
@@ -50,6 +53,10 @@ import { SettingsService } from './services/settings-service.js';
import { createSpecRegenerationRoutes } from './routes/app-spec/index.js';
import { createClaudeRoutes } from './routes/claude/index.js';
import { ClaudeUsageService } from './services/claude-usage-service.js';
+import { createCodexRoutes } from './routes/codex/index.js';
+import { CodexUsageService } from './services/codex-usage-service.js';
+import { CodexAppServerService } from './services/codex-app-server-service.js';
+import { CodexModelCacheService } from './services/codex-model-cache-service.js';
import { createGitHubRoutes } from './routes/github/index.js';
import { createContextRoutes } from './routes/context/index.js';
import { createBacklogPlanRoutes } from './routes/backlog-plan/index.js';
@@ -58,6 +65,8 @@ import { createMCPRoutes } from './routes/mcp/index.js';
import { MCPTestService } from './services/mcp-test-service.js';
import { createPipelineRoutes } from './routes/pipeline/index.js';
import { pipelineService } from './services/pipeline-service.js';
+import { createIdeationRoutes } from './routes/ideation/index.js';
+import { IdeationService } from './services/ideation-service.js';
// Load environment variables
dotenv.config();
@@ -70,7 +79,7 @@ const ENABLE_REQUEST_LOGGING = process.env.ENABLE_REQUEST_LOGGING !== 'false'; /
const hasAnthropicKey = !!process.env.ANTHROPIC_API_KEY;
if (!hasAnthropicKey) {
- console.warn(`
+ logger.warn(`
╔═══════════════════════════════════════════════════════════════════════╗
║ ⚠️ WARNING: No Claude authentication configured ║
║ ║
@@ -83,7 +92,7 @@ if (!hasAnthropicKey) {
╚═══════════════════════════════════════════════════════════════════════╝
`);
} else {
- console.log('[Server] ✓ ANTHROPIC_API_KEY detected (API key auth)');
+ logger.info('✓ ANTHROPIC_API_KEY detected (API key auth)');
}
// Initialize security
@@ -161,12 +170,21 @@ const agentService = new AgentService(DATA_DIR, events, settingsService);
const featureLoader = new FeatureLoader();
const autoModeService = new AutoModeService(events, settingsService);
const claudeUsageService = new ClaudeUsageService();
+const codexAppServerService = new CodexAppServerService();
+const codexModelCacheService = new CodexModelCacheService(DATA_DIR, codexAppServerService);
+const codexUsageService = new CodexUsageService(codexAppServerService);
const mcpTestService = new MCPTestService(settingsService);
+const ideationService = new IdeationService(events, settingsService, featureLoader);
// Initialize services
(async () => {
await agentService.initialize();
- console.log('[Server] Agent service initialized');
+ logger.info('Agent service initialized');
+
+ // Bootstrap Codex model cache in background (don't block server startup)
+ void codexModelCacheService.getModels().catch((err) => {
+ logger.error('Failed to bootstrap Codex model cache:', err);
+ });
})();
// Run stale validation cleanup every hour to prevent memory leaks from crashed validations
@@ -174,7 +192,7 @@ const VALIDATION_CLEANUP_INTERVAL_MS = 60 * 60 * 1000; // 1 hour
setInterval(() => {
const cleaned = cleanupStaleValidations();
if (cleaned > 0) {
- console.log(`[Server] Cleaned up ${cleaned} stale validation entries`);
+ logger.info(`Cleaned up ${cleaned} stale validation entries`);
}
}, VALIDATION_CLEANUP_INTERVAL_MS);
@@ -182,9 +200,10 @@ setInterval(() => {
// This helps prevent CSRF and content-type confusion attacks
app.use('/api', requireJsonContentType);
-// Mount API routes - health and auth are unauthenticated
+// Mount API routes - health, auth, and setup are unauthenticated
app.use('/api/health', createHealthRoutes());
app.use('/api/auth', createAuthRoutes());
+app.use('/api/setup', createSetupRoutes());
// Apply authentication to all other routes
app.use('/api', authMiddleware);
@@ -198,9 +217,8 @@ app.use('/api/sessions', createSessionsRoutes(agentService));
app.use('/api/features', createFeaturesRoutes(featureLoader));
app.use('/api/auto-mode', createAutoModeRoutes(autoModeService));
app.use('/api/enhance-prompt', createEnhancePromptRoutes(settingsService));
-app.use('/api/worktree', createWorktreeRoutes());
+app.use('/api/worktree', createWorktreeRoutes(events));
app.use('/api/git', createGitRoutes());
-app.use('/api/setup', createSetupRoutes());
app.use('/api/suggestions', createSuggestionsRoutes(events, settingsService));
app.use('/api/models', createModelsRoutes());
app.use('/api/spec-regeneration', createSpecRegenerationRoutes(events, settingsService));
@@ -210,11 +228,13 @@ app.use('/api/templates', createTemplatesRoutes());
app.use('/api/terminal', createTerminalRoutes());
app.use('/api/settings', createSettingsRoutes(settingsService));
app.use('/api/claude', createClaudeRoutes(claudeUsageService));
+app.use('/api/codex', createCodexRoutes(codexUsageService, codexModelCacheService));
app.use('/api/github', createGitHubRoutes(events, settingsService));
app.use('/api/context', createContextRoutes(settingsService));
app.use('/api/backlog-plan', createBacklogPlanRoutes(events, settingsService));
app.use('/api/mcp', createMCPRoutes(mcpTestService));
app.use('/api/pipeline', createPipelineRoutes(pipelineService));
+app.use('/api/ideation', createIdeationRoutes(events, ideationService, featureLoader));
// Create HTTP server
const server = createServer(app);
@@ -267,7 +287,7 @@ server.on('upgrade', (request, socket, head) => {
// Authenticate all WebSocket connections
if (!authenticateWebSocket(request)) {
- console.log('[WebSocket] Authentication failed, rejecting connection');
+ logger.info('Authentication failed, rejecting connection');
socket.write('HTTP/1.1 401 Unauthorized\r\n\r\n');
socket.destroy();
return;
@@ -288,11 +308,11 @@ server.on('upgrade', (request, socket, head) => {
// Events WebSocket connection handler
wss.on('connection', (ws: WebSocket) => {
- console.log('[WebSocket] Client connected, ready state:', ws.readyState);
+ logger.info('Client connected, ready state:', ws.readyState);
// Subscribe to all events and forward to this client
const unsubscribe = events.subscribe((type, payload) => {
- console.log('[WebSocket] Event received:', {
+ logger.info('Event received:', {
type,
hasPayload: !!payload,
payloadKeys: payload ? Object.keys(payload) : [],
@@ -302,27 +322,24 @@ wss.on('connection', (ws: WebSocket) => {
if (ws.readyState === WebSocket.OPEN) {
const message = JSON.stringify({ type, payload });
- console.log('[WebSocket] Sending event to client:', {
+ logger.info('Sending event to client:', {
type,
messageLength: message.length,
sessionId: (payload as any)?.sessionId,
});
ws.send(message);
} else {
- console.log(
- '[WebSocket] WARNING: Cannot send event, WebSocket not open. ReadyState:',
- ws.readyState
- );
+ logger.info('WARNING: Cannot send event, WebSocket not open. ReadyState:', ws.readyState);
}
});
ws.on('close', () => {
- console.log('[WebSocket] Client disconnected');
+ logger.info('Client disconnected');
unsubscribe();
});
ws.on('error', (error) => {
- console.error('[WebSocket] ERROR:', error);
+ logger.error('ERROR:', error);
unsubscribe();
});
});
@@ -349,24 +366,24 @@ terminalWss.on('connection', (ws: WebSocket, req: import('http').IncomingMessage
const sessionId = url.searchParams.get('sessionId');
const token = url.searchParams.get('token');
- console.log(`[Terminal WS] Connection attempt for session: ${sessionId}`);
+ logger.info(`Connection attempt for session: ${sessionId}`);
// Check if terminal is enabled
if (!isTerminalEnabled()) {
- console.log('[Terminal WS] Terminal is disabled');
+ logger.info('Terminal is disabled');
ws.close(4003, 'Terminal access is disabled');
return;
}
// Validate token if password is required
if (isTerminalPasswordRequired() && !validateTerminalToken(token || undefined)) {
- console.log('[Terminal WS] Invalid or missing token');
+ logger.info('Invalid or missing token');
ws.close(4001, 'Authentication required');
return;
}
if (!sessionId) {
- console.log('[Terminal WS] No session ID provided');
+ logger.info('No session ID provided');
ws.close(4002, 'Session ID required');
return;
}
@@ -374,12 +391,12 @@ terminalWss.on('connection', (ws: WebSocket, req: import('http').IncomingMessage
// Check if session exists
const session = terminalService.getSession(sessionId);
if (!session) {
- console.log(`[Terminal WS] Session ${sessionId} not found`);
+ logger.info(`Session ${sessionId} not found`);
ws.close(4004, 'Session not found');
return;
}
- console.log(`[Terminal WS] Client connected to session ${sessionId}`);
+ logger.info(`Client connected to session ${sessionId}`);
// Track this connection
if (!terminalConnections.has(sessionId)) {
@@ -495,15 +512,15 @@ terminalWss.on('connection', (ws: WebSocket, req: import('http').IncomingMessage
break;
default:
- console.warn(`[Terminal WS] Unknown message type: ${msg.type}`);
+ logger.warn(`Unknown message type: ${msg.type}`);
}
} catch (error) {
- console.error('[Terminal WS] Error processing message:', error);
+ logger.error('Error processing message:', error);
}
});
ws.on('close', () => {
- console.log(`[Terminal WS] Client disconnected from session ${sessionId}`);
+ logger.info(`Client disconnected from session ${sessionId}`);
unsubscribeData();
unsubscribeExit();
@@ -522,7 +539,7 @@ terminalWss.on('connection', (ws: WebSocket, req: import('http').IncomingMessage
});
ws.on('error', (error) => {
- console.error(`[Terminal WS] Error on session ${sessionId}:`, error);
+ logger.error(`Error on session ${sessionId}:`, error);
unsubscribeData();
unsubscribeExit();
});
@@ -537,7 +554,7 @@ const startServer = (port: number) => {
: 'enabled'
: 'disabled';
const portStr = port.toString().padEnd(4);
- console.log(`
+ logger.info(`
╔═══════════════════════════════════════════════════════╗
║ Automaker Backend Server ║
╠═══════════════════════════════════════════════════════╣
@@ -552,7 +569,7 @@ const startServer = (port: number) => {
server.on('error', (error: NodeJS.ErrnoException) => {
if (error.code === 'EADDRINUSE') {
- console.error(`
+ logger.error(`
╔═══════════════════════════════════════════════════════╗
║ ❌ ERROR: Port ${port} is already in use ║
╠═══════════════════════════════════════════════════════╣
@@ -572,7 +589,7 @@ const startServer = (port: number) => {
`);
process.exit(1);
} else {
- console.error('[Server] Error starting server:', error);
+ logger.error('Error starting server:', error);
process.exit(1);
}
});
@@ -580,21 +597,41 @@ const startServer = (port: number) => {
startServer(PORT);
+// Global error handlers to prevent crashes from uncaught errors
+process.on('unhandledRejection', (reason: unknown, _promise: Promise) => {
+ logger.error('Unhandled Promise Rejection:', {
+ reason: reason instanceof Error ? reason.message : String(reason),
+ stack: reason instanceof Error ? reason.stack : undefined,
+ });
+ // Don't exit - log the error and continue running
+ // This prevents the server from crashing due to unhandled rejections
+});
+
+process.on('uncaughtException', (error: Error) => {
+ logger.error('Uncaught Exception:', {
+ message: error.message,
+ stack: error.stack,
+ });
+ // Exit on uncaught exceptions to prevent undefined behavior
+ // The process is in an unknown state after an uncaught exception
+ process.exit(1);
+});
+
// Graceful shutdown
process.on('SIGTERM', () => {
- console.log('SIGTERM received, shutting down...');
+ logger.info('SIGTERM received, shutting down...');
terminalService.cleanup();
server.close(() => {
- console.log('Server closed');
+ logger.info('Server closed');
process.exit(0);
});
});
process.on('SIGINT', () => {
- console.log('SIGINT received, shutting down...');
+ logger.info('SIGINT received, shutting down...');
terminalService.cleanup();
server.close(() => {
- console.log('Server closed');
+ logger.info('Server closed');
process.exit(0);
});
});
diff --git a/apps/server/src/lib/agent-discovery.ts b/apps/server/src/lib/agent-discovery.ts
new file mode 100644
index 00000000..b831bdec
--- /dev/null
+++ b/apps/server/src/lib/agent-discovery.ts
@@ -0,0 +1,257 @@
+/**
+ * Agent Discovery - Scans filesystem for AGENT.md files
+ *
+ * Discovers agents from:
+ * - ~/.claude/agents/ (user-level, global)
+ * - .claude/agents/ (project-level)
+ *
+ * Similar to Skills, but for custom subagents defined in AGENT.md files.
+ */
+
+import path from 'path';
+import os from 'os';
+import { createLogger } from '@automaker/utils';
+import { secureFs, systemPaths } from '@automaker/platform';
+import type { AgentDefinition } from '@automaker/types';
+
+const logger = createLogger('AgentDiscovery');
+
+export interface FilesystemAgent {
+ name: string; // Directory name (e.g., 'code-reviewer')
+ definition: AgentDefinition;
+ source: 'user' | 'project';
+ filePath: string; // Full path to AGENT.md
+}
+
+/**
+ * Parse agent content string into AgentDefinition
+ * Format:
+ * ---
+ * name: agent-name # Optional
+ * description: When to use this agent
+ * tools: tool1, tool2, tool3 # Optional (comma or space separated list)
+ * model: sonnet # Optional: sonnet, opus, haiku
+ * ---
+ * System prompt content here...
+ */
+function parseAgentContent(content: string, filePath: string): AgentDefinition | null {
+ // Extract frontmatter
+ const frontmatterMatch = content.match(/^---\n([\s\S]*?)\n---\n([\s\S]*)$/);
+ if (!frontmatterMatch) {
+ logger.warn(`Invalid agent file format (missing frontmatter): ${filePath}`);
+ return null;
+ }
+
+ const [, frontmatter, prompt] = frontmatterMatch;
+
+ // Parse description (required)
+ const description = frontmatter.match(/description:\s*(.+)/)?.[1]?.trim();
+ if (!description) {
+ logger.warn(`Missing description in agent file: ${filePath}`);
+ return null;
+ }
+
+ // Parse tools (optional) - supports both comma-separated and space-separated
+ const toolsMatch = frontmatter.match(/tools:\s*(.+)/);
+ const tools = toolsMatch
+ ? toolsMatch[1]
+ .split(/[,\s]+/) // Split by comma or whitespace
+ .map((t) => t.trim())
+ .filter((t) => t && t !== '')
+ : undefined;
+
+ // Parse model (optional) - validate against allowed values
+ const modelMatch = frontmatter.match(/model:\s*(\w+)/);
+ const modelValue = modelMatch?.[1]?.trim();
+ const validModels = ['sonnet', 'opus', 'haiku', 'inherit'] as const;
+ const model =
+ modelValue && validModels.includes(modelValue as (typeof validModels)[number])
+ ? (modelValue as 'sonnet' | 'opus' | 'haiku' | 'inherit')
+ : undefined;
+
+ if (modelValue && !model) {
+ logger.warn(
+ `Invalid model "${modelValue}" in agent file: ${filePath}. Expected one of: ${validModels.join(', ')}`
+ );
+ }
+
+ return {
+ description,
+ prompt: prompt.trim(),
+ tools,
+ model,
+ };
+}
+
+/**
+ * Directory entry with type information
+ */
+interface DirEntry {
+ name: string;
+ isFile: boolean;
+ isDirectory: boolean;
+}
+
+/**
+ * Filesystem adapter interface for abstracting systemPaths vs secureFs
+ */
+interface FsAdapter {
+ exists: (filePath: string) => Promise;
+ readdir: (dirPath: string) => Promise;
+ readFile: (filePath: string) => Promise;
+}
+
+/**
+ * Create a filesystem adapter for system paths (user directory)
+ */
+function createSystemPathAdapter(): FsAdapter {
+ return {
+ exists: (filePath) => Promise.resolve(systemPaths.systemPathExists(filePath)),
+ readdir: async (dirPath) => {
+ const entryNames = await systemPaths.systemPathReaddir(dirPath);
+ const entries: DirEntry[] = [];
+ for (const name of entryNames) {
+ const stat = await systemPaths.systemPathStat(path.join(dirPath, name));
+ entries.push({
+ name,
+ isFile: stat.isFile(),
+ isDirectory: stat.isDirectory(),
+ });
+ }
+ return entries;
+ },
+ readFile: (filePath) => systemPaths.systemPathReadFile(filePath, 'utf-8') as Promise,
+ };
+}
+
+/**
+ * Create a filesystem adapter for project paths (secureFs)
+ */
+function createSecureFsAdapter(): FsAdapter {
+ return {
+ exists: (filePath) =>
+ secureFs
+ .access(filePath)
+ .then(() => true)
+ .catch(() => false),
+ readdir: async (dirPath) => {
+ const entries = await secureFs.readdir(dirPath, { withFileTypes: true });
+ return entries.map((entry) => ({
+ name: entry.name,
+ isFile: entry.isFile(),
+ isDirectory: entry.isDirectory(),
+ }));
+ },
+ readFile: (filePath) => secureFs.readFile(filePath, 'utf-8') as Promise,
+ };
+}
+
+/**
+ * Parse agent file using the provided filesystem adapter
+ */
+async function parseAgentFileWithAdapter(
+ filePath: string,
+ fsAdapter: FsAdapter
+): Promise {
+ try {
+ const content = await fsAdapter.readFile(filePath);
+ return parseAgentContent(content, filePath);
+ } catch (error) {
+ logger.error(`Failed to parse agent file: ${filePath}`, error);
+ return null;
+ }
+}
+
+/**
+ * Scan a directory for agent .md files
+ * Agents can be in two formats:
+ * 1. Flat: agent-name.md (file directly in agents/)
+ * 2. Subdirectory: agent-name/AGENT.md (folder + file, similar to Skills)
+ */
+async function scanAgentsDirectory(
+ baseDir: string,
+ source: 'user' | 'project'
+): Promise {
+ const agents: FilesystemAgent[] = [];
+ const fsAdapter = source === 'user' ? createSystemPathAdapter() : createSecureFsAdapter();
+
+ try {
+ // Check if directory exists
+ const exists = await fsAdapter.exists(baseDir);
+ if (!exists) {
+ logger.debug(`Directory does not exist: ${baseDir}`);
+ return agents;
+ }
+
+ // Read all entries in the directory
+ const entries = await fsAdapter.readdir(baseDir);
+
+ for (const entry of entries) {
+ // Check for flat .md file format (agent-name.md)
+ if (entry.isFile && entry.name.endsWith('.md')) {
+ const agentName = entry.name.slice(0, -3); // Remove .md extension
+ const agentFilePath = path.join(baseDir, entry.name);
+ const definition = await parseAgentFileWithAdapter(agentFilePath, fsAdapter);
+ if (definition) {
+ agents.push({
+ name: agentName,
+ definition,
+ source,
+ filePath: agentFilePath,
+ });
+ logger.debug(`Discovered ${source} agent (flat): ${agentName}`);
+ }
+ }
+ // Check for subdirectory format (agent-name/AGENT.md)
+ else if (entry.isDirectory) {
+ const agentFilePath = path.join(baseDir, entry.name, 'AGENT.md');
+ const agentFileExists = await fsAdapter.exists(agentFilePath);
+
+ if (agentFileExists) {
+ const definition = await parseAgentFileWithAdapter(agentFilePath, fsAdapter);
+ if (definition) {
+ agents.push({
+ name: entry.name,
+ definition,
+ source,
+ filePath: agentFilePath,
+ });
+ logger.debug(`Discovered ${source} agent (subdirectory): ${entry.name}`);
+ }
+ }
+ }
+ }
+ } catch (error) {
+ logger.error(`Failed to scan agents directory: ${baseDir}`, error);
+ }
+
+ return agents;
+}
+
+/**
+ * Discover all filesystem-based agents from user and project sources
+ */
+export async function discoverFilesystemAgents(
+ projectPath?: string,
+ sources: Array<'user' | 'project'> = ['user', 'project']
+): Promise {
+ const agents: FilesystemAgent[] = [];
+
+ // Discover user-level agents from ~/.claude/agents/
+ if (sources.includes('user')) {
+ const userAgentsDir = path.join(os.homedir(), '.claude', 'agents');
+ const userAgents = await scanAgentsDirectory(userAgentsDir, 'user');
+ agents.push(...userAgents);
+ logger.info(`Discovered ${userAgents.length} user-level agents from ${userAgentsDir}`);
+ }
+
+ // Discover project-level agents from .claude/agents/
+ if (sources.includes('project') && projectPath) {
+ const projectAgentsDir = path.join(projectPath, '.claude', 'agents');
+ const projectAgents = await scanAgentsDirectory(projectAgentsDir, 'project');
+ agents.push(...projectAgents);
+ logger.info(`Discovered ${projectAgents.length} project-level agents from ${projectAgentsDir}`);
+ }
+
+ return agents;
+}
diff --git a/apps/server/src/lib/auth-utils.ts b/apps/server/src/lib/auth-utils.ts
new file mode 100644
index 00000000..936d2277
--- /dev/null
+++ b/apps/server/src/lib/auth-utils.ts
@@ -0,0 +1,263 @@
+/**
+ * Secure authentication utilities that avoid environment variable race conditions
+ */
+
+import { spawn } from 'child_process';
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('AuthUtils');
+
+export interface SecureAuthEnv {
+ [key: string]: string | undefined;
+}
+
+export interface AuthValidationResult {
+ isValid: boolean;
+ error?: string;
+ normalizedKey?: string;
+}
+
+/**
+ * Validates API key format without modifying process.env
+ */
+export function validateApiKey(
+ key: string,
+ provider: 'anthropic' | 'openai' | 'cursor'
+): AuthValidationResult {
+ if (!key || typeof key !== 'string' || key.trim().length === 0) {
+ return { isValid: false, error: 'API key is required' };
+ }
+
+ const trimmedKey = key.trim();
+
+ switch (provider) {
+ case 'anthropic':
+ if (!trimmedKey.startsWith('sk-ant-')) {
+ return {
+ isValid: false,
+ error: 'Invalid Anthropic API key format. Should start with "sk-ant-"',
+ };
+ }
+ if (trimmedKey.length < 20) {
+ return { isValid: false, error: 'Anthropic API key too short' };
+ }
+ break;
+
+ case 'openai':
+ if (!trimmedKey.startsWith('sk-')) {
+ return { isValid: false, error: 'Invalid OpenAI API key format. Should start with "sk-"' };
+ }
+ if (trimmedKey.length < 20) {
+ return { isValid: false, error: 'OpenAI API key too short' };
+ }
+ break;
+
+ case 'cursor':
+ // Cursor API keys might have different format
+ if (trimmedKey.length < 10) {
+ return { isValid: false, error: 'Cursor API key too short' };
+ }
+ break;
+ }
+
+ return { isValid: true, normalizedKey: trimmedKey };
+}
+
+/**
+ * Creates a secure environment object for authentication testing
+ * without modifying the global process.env
+ */
+export function createSecureAuthEnv(
+ authMethod: 'cli' | 'api_key',
+ apiKey?: string,
+ provider: 'anthropic' | 'openai' | 'cursor' = 'anthropic'
+): SecureAuthEnv {
+ const env: SecureAuthEnv = { ...process.env };
+
+ if (authMethod === 'cli') {
+ // For CLI auth, remove the API key to force CLI authentication
+ const envKey = provider === 'openai' ? 'OPENAI_API_KEY' : 'ANTHROPIC_API_KEY';
+ delete env[envKey];
+ } else if (authMethod === 'api_key' && apiKey) {
+ // For API key auth, validate and set the provided key
+ const validation = validateApiKey(apiKey, provider);
+ if (!validation.isValid) {
+ throw new Error(validation.error);
+ }
+ const envKey = provider === 'openai' ? 'OPENAI_API_KEY' : 'ANTHROPIC_API_KEY';
+ env[envKey] = validation.normalizedKey;
+ }
+
+ return env;
+}
+
+/**
+ * Creates a temporary environment override for the current process
+ * WARNING: This should only be used in isolated contexts and immediately cleaned up
+ */
+export function createTempEnvOverride(authEnv: SecureAuthEnv): () => void {
+ const originalEnv = { ...process.env };
+
+ // Apply the auth environment
+ Object.assign(process.env, authEnv);
+
+ // Return cleanup function
+ return () => {
+ // Restore original environment
+ Object.keys(process.env).forEach((key) => {
+ if (!(key in originalEnv)) {
+ delete process.env[key];
+ }
+ });
+ Object.assign(process.env, originalEnv);
+ };
+}
+
+/**
+ * Spawns a process with secure environment isolation
+ */
+export function spawnSecureAuth(
+ command: string,
+ args: string[],
+ authEnv: SecureAuthEnv,
+ options: {
+ cwd?: string;
+ timeout?: number;
+ } = {}
+): Promise<{ stdout: string; stderr: string; exitCode: number | null }> {
+ return new Promise((resolve, reject) => {
+ const { cwd = process.cwd(), timeout = 30000 } = options;
+
+ logger.debug(`Spawning secure auth process: ${command} ${args.join(' ')}`);
+
+ const child = spawn(command, args, {
+ cwd,
+ env: authEnv,
+ stdio: 'pipe',
+ shell: false,
+ });
+
+ let stdout = '';
+ let stderr = '';
+ let isResolved = false;
+
+ const timeoutId = setTimeout(() => {
+ if (!isResolved) {
+ child.kill('SIGTERM');
+ isResolved = true;
+ reject(new Error('Authentication process timed out'));
+ }
+ }, timeout);
+
+ child.stdout?.on('data', (data) => {
+ stdout += data.toString();
+ });
+
+ child.stderr?.on('data', (data) => {
+ stderr += data.toString();
+ });
+
+ child.on('close', (code) => {
+ clearTimeout(timeoutId);
+ if (!isResolved) {
+ isResolved = true;
+ resolve({ stdout, stderr, exitCode: code });
+ }
+ });
+
+ child.on('error', (error) => {
+ clearTimeout(timeoutId);
+ if (!isResolved) {
+ isResolved = true;
+ reject(error);
+ }
+ });
+ });
+}
+
+/**
+ * Safely extracts environment variable without race conditions
+ */
+export function safeGetEnv(key: string): string | undefined {
+ return process.env[key];
+}
+
+/**
+ * Checks if an environment variable would be modified without actually modifying it
+ */
+export function wouldModifyEnv(key: string, newValue: string): boolean {
+ const currentValue = safeGetEnv(key);
+ return currentValue !== newValue;
+}
+
+/**
+ * Secure auth session management
+ */
+export class AuthSessionManager {
+ private static activeSessions = new Map();
+
+ static createSession(
+ sessionId: string,
+ authMethod: 'cli' | 'api_key',
+ apiKey?: string,
+ provider: 'anthropic' | 'openai' | 'cursor' = 'anthropic'
+ ): SecureAuthEnv {
+ const env = createSecureAuthEnv(authMethod, apiKey, provider);
+ this.activeSessions.set(sessionId, env);
+ return env;
+ }
+
+ static getSession(sessionId: string): SecureAuthEnv | undefined {
+ return this.activeSessions.get(sessionId);
+ }
+
+ static destroySession(sessionId: string): void {
+ this.activeSessions.delete(sessionId);
+ }
+
+ static cleanup(): void {
+ this.activeSessions.clear();
+ }
+}
+
+/**
+ * Rate limiting for auth attempts to prevent abuse
+ */
+export class AuthRateLimiter {
+ private attempts = new Map();
+
+ constructor(
+ private maxAttempts = 5,
+ private windowMs = 60000
+ ) {}
+
+ canAttempt(identifier: string): boolean {
+ const now = Date.now();
+ const record = this.attempts.get(identifier);
+
+ if (!record || now - record.lastAttempt > this.windowMs) {
+ this.attempts.set(identifier, { count: 1, lastAttempt: now });
+ return true;
+ }
+
+ if (record.count >= this.maxAttempts) {
+ return false;
+ }
+
+ record.count++;
+ record.lastAttempt = now;
+ return true;
+ }
+
+ getRemainingAttempts(identifier: string): number {
+ const record = this.attempts.get(identifier);
+ if (!record) return this.maxAttempts;
+ return Math.max(0, this.maxAttempts - record.count);
+ }
+
+ getResetTime(identifier: string): Date | null {
+ const record = this.attempts.get(identifier);
+ if (!record) return null;
+ return new Date(record.lastAttempt + this.windowMs);
+ }
+}
diff --git a/apps/server/src/lib/auth.ts b/apps/server/src/lib/auth.ts
index 5f24b319..0a4b5389 100644
--- a/apps/server/src/lib/auth.ts
+++ b/apps/server/src/lib/auth.ts
@@ -12,6 +12,9 @@ import type { Request, Response, NextFunction } from 'express';
import crypto from 'crypto';
import path from 'path';
import * as secureFs from './secure-fs.js';
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('Auth');
const DATA_DIR = process.env.DATA_DIR || './data';
const API_KEY_FILE = path.join(DATA_DIR, '.api-key');
@@ -61,11 +64,11 @@ function loadSessions(): void {
}
if (loadedCount > 0 || expiredCount > 0) {
- console.log(`[Auth] Loaded ${loadedCount} sessions (${expiredCount} expired)`);
+ logger.info(`Loaded ${loadedCount} sessions (${expiredCount} expired)`);
}
}
} catch (error) {
- console.warn('[Auth] Error loading sessions:', error);
+ logger.warn('Error loading sessions:', error);
}
}
@@ -81,7 +84,7 @@ async function saveSessions(): Promise {
mode: 0o600,
});
} catch (error) {
- console.error('[Auth] Failed to save sessions:', error);
+ logger.error('Failed to save sessions:', error);
}
}
@@ -95,7 +98,7 @@ loadSessions();
function ensureApiKey(): string {
// First check environment variable (Electron passes it this way)
if (process.env.AUTOMAKER_API_KEY) {
- console.log('[Auth] Using API key from environment variable');
+ logger.info('Using API key from environment variable');
return process.env.AUTOMAKER_API_KEY;
}
@@ -104,12 +107,12 @@ function ensureApiKey(): string {
if (secureFs.existsSync(API_KEY_FILE)) {
const key = (secureFs.readFileSync(API_KEY_FILE, 'utf-8') as string).trim();
if (key) {
- console.log('[Auth] Loaded API key from file');
+ logger.info('Loaded API key from file');
return key;
}
}
} catch (error) {
- console.warn('[Auth] Error reading API key file:', error);
+ logger.warn('Error reading API key file:', error);
}
// Generate new key
@@ -117,9 +120,9 @@ function ensureApiKey(): string {
try {
secureFs.mkdirSync(path.dirname(API_KEY_FILE), { recursive: true });
secureFs.writeFileSync(API_KEY_FILE, newKey, { encoding: 'utf-8', mode: 0o600 });
- console.log('[Auth] Generated new API key');
+ logger.info('Generated new API key');
} catch (error) {
- console.error('[Auth] Failed to save API key:', error);
+ logger.error('Failed to save API key:', error);
}
return newKey;
}
@@ -129,7 +132,7 @@ const API_KEY = ensureApiKey();
// Print API key to console for web mode users (unless suppressed for production logging)
if (process.env.AUTOMAKER_HIDE_API_KEY !== 'true') {
- console.log(`
+ logger.info(`
╔═══════════════════════════════════════════════════════════════════════╗
║ 🔐 API Key for Web Mode Authentication ║
╠═══════════════════════════════════════════════════════════════════════╣
@@ -142,7 +145,7 @@ if (process.env.AUTOMAKER_HIDE_API_KEY !== 'true') {
╚═══════════════════════════════════════════════════════════════════════╝
`);
} else {
- console.log('[Auth] API key banner hidden (AUTOMAKER_HIDE_API_KEY=true)');
+ logger.info('API key banner hidden (AUTOMAKER_HIDE_API_KEY=true)');
}
/**
@@ -177,7 +180,7 @@ export function validateSession(token: string): boolean {
if (Date.now() > session.expiresAt) {
validSessions.delete(token);
// Fire-and-forget: persist removal asynchronously
- saveSessions().catch((err) => console.error('[Auth] Error saving sessions:', err));
+ saveSessions().catch((err) => logger.error('Error saving sessions:', err));
return false;
}
@@ -259,7 +262,7 @@ export function getSessionCookieOptions(): {
return {
httpOnly: true, // JavaScript cannot access this cookie
secure: process.env.NODE_ENV === 'production', // HTTPS only in production
- sameSite: 'strict', // Only sent for same-site requests (CSRF protection)
+ sameSite: 'lax', // Sent for same-site requests and top-level navigations, but not cross-origin fetch/XHR
maxAge: SESSION_MAX_AGE_MS,
path: '/',
};
diff --git a/apps/server/src/lib/cli-detection.ts b/apps/server/src/lib/cli-detection.ts
new file mode 100644
index 00000000..eba4c68a
--- /dev/null
+++ b/apps/server/src/lib/cli-detection.ts
@@ -0,0 +1,447 @@
+/**
+ * Unified CLI Detection Framework
+ *
+ * Provides consistent CLI detection and management across all providers
+ */
+
+import { spawn, execSync } from 'child_process';
+import * as fs from 'fs';
+import * as path from 'path';
+import * as os from 'os';
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('CliDetection');
+
+export interface CliInfo {
+ name: string;
+ command: string;
+ version?: string;
+ path?: string;
+ installed: boolean;
+ authenticated: boolean;
+ authMethod: 'cli' | 'api_key' | 'none';
+ platform?: string;
+ architectures?: string[];
+}
+
+export interface CliDetectionOptions {
+ timeout?: number;
+ includeWsl?: boolean;
+ wslDistribution?: string;
+}
+
+export interface CliDetectionResult {
+ cli: CliInfo;
+ detected: boolean;
+ issues: string[];
+}
+
+export interface UnifiedCliDetection {
+ claude?: CliDetectionResult;
+ codex?: CliDetectionResult;
+ cursor?: CliDetectionResult;
+}
+
+/**
+ * CLI Configuration for different providers
+ */
+const CLI_CONFIGS = {
+ claude: {
+ name: 'Claude CLI',
+ commands: ['claude'],
+ versionArgs: ['--version'],
+ installCommands: {
+ darwin: 'brew install anthropics/claude/claude',
+ linux: 'curl -fsSL https://claude.ai/install.sh | sh',
+ win32: 'iwr https://claude.ai/install.ps1 -UseBasicParsing | iex',
+ },
+ },
+ codex: {
+ name: 'Codex CLI',
+ commands: ['codex', 'openai'],
+ versionArgs: ['--version'],
+ installCommands: {
+ darwin: 'npm install -g @openai/codex-cli',
+ linux: 'npm install -g @openai/codex-cli',
+ win32: 'npm install -g @openai/codex-cli',
+ },
+ },
+ cursor: {
+ name: 'Cursor CLI',
+ commands: ['cursor-agent', 'cursor'],
+ versionArgs: ['--version'],
+ installCommands: {
+ darwin: 'brew install cursor/cursor/cursor-agent',
+ linux: 'curl -fsSL https://cursor.sh/install.sh | sh',
+ win32: 'iwr https://cursor.sh/install.ps1 -UseBasicParsing | iex',
+ },
+ },
+} as const;
+
+/**
+ * Detect if a CLI is installed and available
+ */
+export async function detectCli(
+ provider: keyof typeof CLI_CONFIGS,
+ options: CliDetectionOptions = {}
+): Promise {
+ const config = CLI_CONFIGS[provider];
+ const { timeout = 5000, includeWsl = false, wslDistribution } = options;
+ const issues: string[] = [];
+
+ const cliInfo: CliInfo = {
+ name: config.name,
+ command: '',
+ installed: false,
+ authenticated: false,
+ authMethod: 'none',
+ };
+
+ try {
+ // Find the command in PATH
+ const command = await findCommand([...config.commands]);
+ if (command) {
+ cliInfo.command = command;
+ }
+
+ if (!cliInfo.command) {
+ issues.push(`${config.name} not found in PATH`);
+ return { cli: cliInfo, detected: false, issues };
+ }
+
+ cliInfo.path = cliInfo.command;
+ cliInfo.installed = true;
+
+ // Get version
+ try {
+ cliInfo.version = await getCliVersion(cliInfo.command, [...config.versionArgs], timeout);
+ } catch (error) {
+ issues.push(`Failed to get ${config.name} version: ${error}`);
+ }
+
+ // Check authentication
+ cliInfo.authMethod = await checkCliAuth(provider, cliInfo.command);
+ cliInfo.authenticated = cliInfo.authMethod !== 'none';
+
+ return { cli: cliInfo, detected: true, issues };
+ } catch (error) {
+ issues.push(`Error detecting ${config.name}: ${error}`);
+ return { cli: cliInfo, detected: false, issues };
+ }
+}
+
+/**
+ * Detect all CLIs in the system
+ */
+export async function detectAllCLis(
+ options: CliDetectionOptions = {}
+): Promise {
+ const results: UnifiedCliDetection = {};
+
+ // Detect all providers in parallel
+ const providers = Object.keys(CLI_CONFIGS) as Array;
+ const detectionPromises = providers.map(async (provider) => {
+ const result = await detectCli(provider, options);
+ return { provider, result };
+ });
+
+ const detections = await Promise.all(detectionPromises);
+
+ for (const { provider, result } of detections) {
+ results[provider] = result;
+ }
+
+ return results;
+}
+
+/**
+ * Find the first available command from a list of alternatives
+ */
+export async function findCommand(commands: string[]): Promise {
+ for (const command of commands) {
+ try {
+ const whichCommand = process.platform === 'win32' ? 'where' : 'which';
+ const result = execSync(`${whichCommand} ${command}`, {
+ encoding: 'utf8',
+ timeout: 2000,
+ }).trim();
+
+ if (result) {
+ return result.split('\n')[0]; // Take first result on Windows
+ }
+ } catch {
+ // Command not found, try next
+ }
+ }
+ return null;
+}
+
+/**
+ * Get CLI version
+ */
+export async function getCliVersion(
+ command: string,
+ args: string[],
+ timeout: number = 5000
+): Promise {
+ return new Promise((resolve, reject) => {
+ const child = spawn(command, args, {
+ stdio: 'pipe',
+ timeout,
+ });
+
+ let stdout = '';
+ let stderr = '';
+
+ child.stdout?.on('data', (data) => {
+ stdout += data.toString();
+ });
+
+ child.stderr?.on('data', (data) => {
+ stderr += data.toString();
+ });
+
+ child.on('close', (code) => {
+ if (code === 0 && stdout) {
+ resolve(stdout.trim());
+ } else if (stderr) {
+ reject(stderr.trim());
+ } else {
+ reject(`Command exited with code ${code}`);
+ }
+ });
+
+ child.on('error', reject);
+ });
+}
+
+/**
+ * Check authentication status for a CLI
+ */
+export async function checkCliAuth(
+ provider: keyof typeof CLI_CONFIGS,
+ command: string
+): Promise<'cli' | 'api_key' | 'none'> {
+ try {
+ switch (provider) {
+ case 'claude':
+ return await checkClaudeAuth(command);
+ case 'codex':
+ return await checkCodexAuth(command);
+ case 'cursor':
+ return await checkCursorAuth(command);
+ default:
+ return 'none';
+ }
+ } catch {
+ return 'none';
+ }
+}
+
+/**
+ * Check Claude CLI authentication
+ */
+async function checkClaudeAuth(command: string): Promise<'cli' | 'api_key' | 'none'> {
+ try {
+ // Check for environment variable
+ if (process.env.ANTHROPIC_API_KEY) {
+ return 'api_key';
+ }
+
+ // Try running a simple command to check CLI auth
+ const result = await getCliVersion(command, ['--version'], 3000);
+ if (result) {
+ return 'cli'; // If version works, assume CLI is authenticated
+ }
+ } catch {
+ // Version command might work even without auth, so we need a better check
+ }
+
+ // Try a more specific auth check
+ return new Promise((resolve) => {
+ const child = spawn(command, ['whoami'], {
+ stdio: 'pipe',
+ timeout: 3000,
+ });
+
+ let stdout = '';
+ let stderr = '';
+
+ child.stdout?.on('data', (data) => {
+ stdout += data.toString();
+ });
+
+ child.stderr?.on('data', (data) => {
+ stderr += data.toString();
+ });
+
+ child.on('close', (code) => {
+ if (code === 0 && stdout && !stderr.includes('not authenticated')) {
+ resolve('cli');
+ } else {
+ resolve('none');
+ }
+ });
+
+ child.on('error', () => {
+ resolve('none');
+ });
+ });
+}
+
+/**
+ * Check Codex CLI authentication
+ */
+async function checkCodexAuth(command: string): Promise<'cli' | 'api_key' | 'none'> {
+ // Check for environment variable
+ if (process.env.OPENAI_API_KEY) {
+ return 'api_key';
+ }
+
+ try {
+ // Try a simple auth check
+ const result = await getCliVersion(command, ['--version'], 3000);
+ if (result) {
+ return 'cli';
+ }
+ } catch {
+ // Version check failed
+ }
+
+ return 'none';
+}
+
+/**
+ * Check Cursor CLI authentication
+ */
+async function checkCursorAuth(command: string): Promise<'cli' | 'api_key' | 'none'> {
+ // Check for environment variable
+ if (process.env.CURSOR_API_KEY) {
+ return 'api_key';
+ }
+
+ // Check for credentials files
+ const credentialPaths = [
+ path.join(os.homedir(), '.cursor', 'credentials.json'),
+ path.join(os.homedir(), '.config', 'cursor', 'credentials.json'),
+ path.join(os.homedir(), '.cursor', 'auth.json'),
+ path.join(os.homedir(), '.config', 'cursor', 'auth.json'),
+ ];
+
+ for (const credPath of credentialPaths) {
+ try {
+ if (fs.existsSync(credPath)) {
+ const content = fs.readFileSync(credPath, 'utf8');
+ const creds = JSON.parse(content);
+ if (creds.accessToken || creds.token || creds.apiKey) {
+ return 'cli';
+ }
+ }
+ } catch {
+ // Invalid credentials file
+ }
+ }
+
+ // Try a simple command
+ try {
+ const result = await getCliVersion(command, ['--version'], 3000);
+ if (result) {
+ return 'cli';
+ }
+ } catch {
+ // Version check failed
+ }
+
+ return 'none';
+}
+
+/**
+ * Get installation instructions for a provider
+ */
+export function getInstallInstructions(
+ provider: keyof typeof CLI_CONFIGS,
+ platform: NodeJS.Platform = process.platform
+): string {
+ const config = CLI_CONFIGS[provider];
+ const command = config.installCommands[platform as keyof typeof config.installCommands];
+
+ if (!command) {
+ return `No installation instructions available for ${provider} on ${platform}`;
+ }
+
+ return command;
+}
+
+/**
+ * Get platform-specific CLI paths and versions
+ */
+export function getPlatformCliPaths(provider: keyof typeof CLI_CONFIGS): string[] {
+ const config = CLI_CONFIGS[provider];
+ const platform = process.platform;
+
+ switch (platform) {
+ case 'darwin':
+ return [
+ `/usr/local/bin/${config.commands[0]}`,
+ `/opt/homebrew/bin/${config.commands[0]}`,
+ path.join(os.homedir(), '.local', 'bin', config.commands[0]),
+ ];
+
+ case 'linux':
+ return [
+ `/usr/bin/${config.commands[0]}`,
+ `/usr/local/bin/${config.commands[0]}`,
+ path.join(os.homedir(), '.local', 'bin', config.commands[0]),
+ path.join(os.homedir(), '.npm', 'global', 'bin', config.commands[0]),
+ ];
+
+ case 'win32':
+ return [
+ path.join(
+ os.homedir(),
+ 'AppData',
+ 'Local',
+ 'Programs',
+ config.commands[0],
+ `${config.commands[0]}.exe`
+ ),
+ path.join(process.env.ProgramFiles || '', config.commands[0], `${config.commands[0]}.exe`),
+ path.join(
+ process.env.ProgramFiles || '',
+ config.commands[0],
+ 'bin',
+ `${config.commands[0]}.exe`
+ ),
+ ];
+
+ default:
+ return [];
+ }
+}
+
+/**
+ * Validate CLI installation
+ */
+export function validateCliInstallation(cliInfo: CliInfo): {
+ valid: boolean;
+ issues: string[];
+} {
+ const issues: string[] = [];
+
+ if (!cliInfo.installed) {
+ issues.push('CLI is not installed');
+ }
+
+ if (cliInfo.installed && !cliInfo.version) {
+ issues.push('Could not determine CLI version');
+ }
+
+ if (cliInfo.installed && cliInfo.authMethod === 'none') {
+ issues.push('CLI is not authenticated');
+ }
+
+ return {
+ valid: issues.length === 0,
+ issues,
+ };
+}
diff --git a/apps/server/src/lib/codex-auth.ts b/apps/server/src/lib/codex-auth.ts
new file mode 100644
index 00000000..94fadc8c
--- /dev/null
+++ b/apps/server/src/lib/codex-auth.ts
@@ -0,0 +1,68 @@
+/**
+ * Shared utility for checking Codex CLI authentication status
+ *
+ * Uses 'codex login status' command to verify authentication.
+ * Never assumes authenticated - only returns true if CLI confirms.
+ */
+
+import { spawnProcess } from '@automaker/platform';
+import { findCodexCliPath } from '@automaker/platform';
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('CodexAuth');
+
+const CODEX_COMMAND = 'codex';
+const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
+
+export interface CodexAuthCheckResult {
+ authenticated: boolean;
+ method: 'api_key_env' | 'cli_authenticated' | 'none';
+}
+
+/**
+ * Check Codex authentication status using 'codex login status' command
+ *
+ * @param cliPath Optional CLI path. If not provided, will attempt to find it.
+ * @returns Authentication status and method
+ */
+export async function checkCodexAuthentication(
+ cliPath?: string | null
+): Promise {
+ const resolvedCliPath = cliPath || (await findCodexCliPath());
+ const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
+
+ // If CLI is not installed, cannot be authenticated
+ if (!resolvedCliPath) {
+ logger.info('CLI not found');
+ return { authenticated: false, method: 'none' };
+ }
+
+ try {
+ const result = await spawnProcess({
+ command: resolvedCliPath || CODEX_COMMAND,
+ args: ['login', 'status'],
+ cwd: process.cwd(),
+ env: {
+ ...process.env,
+ TERM: 'dumb', // Avoid interactive output
+ },
+ });
+
+ // Check both stdout and stderr for "logged in" - Codex CLI outputs to stderr
+ const combinedOutput = (result.stdout + result.stderr).toLowerCase();
+ const isLoggedIn = combinedOutput.includes('logged in');
+
+ if (result.exitCode === 0 && isLoggedIn) {
+ // Determine auth method based on what we know
+ const method = hasApiKey ? 'api_key_env' : 'cli_authenticated';
+ logger.info(`✓ Authenticated (${method})`);
+ return { authenticated: true, method };
+ }
+
+ logger.info('Not authenticated');
+ return { authenticated: false, method: 'none' };
+ } catch (error) {
+ logger.error('Failed to check authentication:', error);
+ return { authenticated: false, method: 'none' };
+ }
+}
diff --git a/apps/server/src/lib/error-handler.ts b/apps/server/src/lib/error-handler.ts
new file mode 100644
index 00000000..770f26a2
--- /dev/null
+++ b/apps/server/src/lib/error-handler.ts
@@ -0,0 +1,414 @@
+/**
+ * Unified Error Handling System for CLI Providers
+ *
+ * Provides consistent error classification, user-friendly messages, and debugging support
+ * across all AI providers (Claude, Codex, Cursor)
+ */
+
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('ErrorHandler');
+
+export enum ErrorType {
+ AUTHENTICATION = 'authentication',
+ BILLING = 'billing',
+ RATE_LIMIT = 'rate_limit',
+ NETWORK = 'network',
+ TIMEOUT = 'timeout',
+ VALIDATION = 'validation',
+ PERMISSION = 'permission',
+ CLI_NOT_FOUND = 'cli_not_found',
+ CLI_NOT_INSTALLED = 'cli_not_installed',
+ MODEL_NOT_SUPPORTED = 'model_not_supported',
+ INVALID_REQUEST = 'invalid_request',
+ SERVER_ERROR = 'server_error',
+ UNKNOWN = 'unknown',
+}
+
+export enum ErrorSeverity {
+ LOW = 'low',
+ MEDIUM = 'medium',
+ HIGH = 'high',
+ CRITICAL = 'critical',
+}
+
+export interface ErrorClassification {
+ type: ErrorType;
+ severity: ErrorSeverity;
+ userMessage: string;
+ technicalMessage: string;
+ suggestedAction?: string;
+ retryable: boolean;
+ provider?: string;
+ context?: Record;
+}
+
+export interface ErrorPattern {
+ type: ErrorType;
+ severity: ErrorSeverity;
+ patterns: RegExp[];
+ userMessage: string;
+ suggestedAction?: string;
+ retryable: boolean;
+}
+
+/**
+ * Error patterns for different types of errors
+ */
+const ERROR_PATTERNS: ErrorPattern[] = [
+ // Authentication errors
+ {
+ type: ErrorType.AUTHENTICATION,
+ severity: ErrorSeverity.HIGH,
+ patterns: [
+ /unauthorized/i,
+ /authentication.*fail/i,
+ /invalid_api_key/i,
+ /invalid api key/i,
+ /not authenticated/i,
+ /please.*log/i,
+ /token.*revoked/i,
+ /oauth.*error/i,
+ /credentials.*invalid/i,
+ ],
+ userMessage: 'Authentication failed. Please check your API key or login credentials.',
+ suggestedAction:
+ "Verify your API key is correct and hasn't expired, or run the CLI login command.",
+ retryable: false,
+ },
+
+ // Billing errors
+ {
+ type: ErrorType.BILLING,
+ severity: ErrorSeverity.HIGH,
+ patterns: [
+ /credit.*balance.*low/i,
+ /insufficient.*credit/i,
+ /billing.*issue/i,
+ /payment.*required/i,
+ /usage.*exceeded/i,
+ /quota.*exceeded/i,
+ /add.*credit/i,
+ ],
+ userMessage: 'Account has insufficient credits or billing issues.',
+ suggestedAction: 'Please add credits to your account or check your billing settings.',
+ retryable: false,
+ },
+
+ // Rate limit errors
+ {
+ type: ErrorType.RATE_LIMIT,
+ severity: ErrorSeverity.MEDIUM,
+ patterns: [
+ /rate.*limit/i,
+ /too.*many.*request/i,
+ /limit.*reached/i,
+ /try.*later/i,
+ /429/i,
+ /reset.*time/i,
+ /upgrade.*plan/i,
+ ],
+ userMessage: 'Rate limit reached. Please wait before trying again.',
+ suggestedAction: 'Wait a few minutes before retrying, or consider upgrading your plan.',
+ retryable: true,
+ },
+
+ // Network errors
+ {
+ type: ErrorType.NETWORK,
+ severity: ErrorSeverity.MEDIUM,
+ patterns: [/network/i, /connection/i, /dns/i, /timeout/i, /econnrefused/i, /enotfound/i],
+ userMessage: 'Network connection issue.',
+ suggestedAction: 'Check your internet connection and try again.',
+ retryable: true,
+ },
+
+ // Timeout errors
+ {
+ type: ErrorType.TIMEOUT,
+ severity: ErrorSeverity.MEDIUM,
+ patterns: [/timeout/i, /aborted/i, /time.*out/i],
+ userMessage: 'Operation timed out.',
+ suggestedAction: 'Try again with a simpler request or check your connection.',
+ retryable: true,
+ },
+
+ // Permission errors
+ {
+ type: ErrorType.PERMISSION,
+ severity: ErrorSeverity.HIGH,
+ patterns: [/permission.*denied/i, /access.*denied/i, /forbidden/i, /403/i, /not.*authorized/i],
+ userMessage: 'Permission denied.',
+ suggestedAction: 'Check if you have the required permissions for this operation.',
+ retryable: false,
+ },
+
+ // CLI not found
+ {
+ type: ErrorType.CLI_NOT_FOUND,
+ severity: ErrorSeverity.HIGH,
+ patterns: [/command not found/i, /not recognized/i, /not.*installed/i, /ENOENT/i],
+ userMessage: 'CLI tool not found.',
+ suggestedAction: "Please install the required CLI tool and ensure it's in your PATH.",
+ retryable: false,
+ },
+
+ // Model not supported
+ {
+ type: ErrorType.MODEL_NOT_SUPPORTED,
+ severity: ErrorSeverity.HIGH,
+ patterns: [/model.*not.*support/i, /unknown.*model/i, /invalid.*model/i],
+ userMessage: 'Model not supported.',
+ suggestedAction: 'Check available models and use a supported one.',
+ retryable: false,
+ },
+
+ // Server errors
+ {
+ type: ErrorType.SERVER_ERROR,
+ severity: ErrorSeverity.HIGH,
+ patterns: [/internal.*server/i, /server.*error/i, /500/i, /502/i, /503/i, /504/i],
+ userMessage: 'Server error occurred.',
+ suggestedAction: 'Try again in a few minutes or contact support if the issue persists.',
+ retryable: true,
+ },
+];
+
+/**
+ * Classify an error into a specific type with user-friendly message
+ */
+export function classifyError(
+ error: unknown,
+ provider?: string,
+ context?: Record
+): ErrorClassification {
+ const errorText = getErrorText(error);
+
+ // Try to match against known patterns
+ for (const pattern of ERROR_PATTERNS) {
+ for (const regex of pattern.patterns) {
+ if (regex.test(errorText)) {
+ return {
+ type: pattern.type,
+ severity: pattern.severity,
+ userMessage: pattern.userMessage,
+ technicalMessage: errorText,
+ suggestedAction: pattern.suggestedAction,
+ retryable: pattern.retryable,
+ provider,
+ context,
+ };
+ }
+ }
+ }
+
+ // Unknown error
+ return {
+ type: ErrorType.UNKNOWN,
+ severity: ErrorSeverity.MEDIUM,
+ userMessage: 'An unexpected error occurred.',
+ technicalMessage: errorText,
+ suggestedAction: 'Please try again or contact support if the issue persists.',
+ retryable: true,
+ provider,
+ context,
+ };
+}
+
+/**
+ * Get a user-friendly error message
+ */
+export function getUserFriendlyErrorMessage(error: unknown, provider?: string): string {
+ const classification = classifyError(error, provider);
+
+ let message = classification.userMessage;
+
+ if (classification.suggestedAction) {
+ message += ` ${classification.suggestedAction}`;
+ }
+
+ // Add provider-specific context if available
+ if (provider) {
+ message = `[${provider.toUpperCase()}] ${message}`;
+ }
+
+ return message;
+}
+
+/**
+ * Check if an error is retryable
+ */
+export function isRetryableError(error: unknown): boolean {
+ const classification = classifyError(error);
+ return classification.retryable;
+}
+
+/**
+ * Check if an error is authentication-related
+ */
+export function isAuthenticationError(error: unknown): boolean {
+ const classification = classifyError(error);
+ return classification.type === ErrorType.AUTHENTICATION;
+}
+
+/**
+ * Check if an error is billing-related
+ */
+export function isBillingError(error: unknown): boolean {
+ const classification = classifyError(error);
+ return classification.type === ErrorType.BILLING;
+}
+
+/**
+ * Check if an error is rate limit related
+ */
+export function isRateLimitError(error: unknown): boolean {
+ const classification = classifyError(error);
+ return classification.type === ErrorType.RATE_LIMIT;
+}
+
+/**
+ * Get error text from various error types
+ */
+function getErrorText(error: unknown): string {
+ if (typeof error === 'string') {
+ return error;
+ }
+
+ if (error instanceof Error) {
+ return error.message;
+ }
+
+ if (typeof error === 'object' && error !== null) {
+ // Handle structured error objects
+ const errorObj = error as any;
+
+ if (errorObj.message) {
+ return errorObj.message;
+ }
+
+ if (errorObj.error?.message) {
+ return errorObj.error.message;
+ }
+
+ if (errorObj.error) {
+ return typeof errorObj.error === 'string' ? errorObj.error : JSON.stringify(errorObj.error);
+ }
+
+ return JSON.stringify(error);
+ }
+
+ return String(error);
+}
+
+/**
+ * Create a standardized error response
+ */
+export function createErrorResponse(
+ error: unknown,
+ provider?: string,
+ context?: Record
+): {
+ success: false;
+ error: string;
+ errorType: ErrorType;
+ severity: ErrorSeverity;
+ retryable: boolean;
+ suggestedAction?: string;
+} {
+ const classification = classifyError(error, provider, context);
+
+ return {
+ success: false,
+ error: classification.userMessage,
+ errorType: classification.type,
+ severity: classification.severity,
+ retryable: classification.retryable,
+ suggestedAction: classification.suggestedAction,
+ };
+}
+
+/**
+ * Log error with full context
+ */
+export function logError(
+ error: unknown,
+ provider?: string,
+ operation?: string,
+ additionalContext?: Record
+): void {
+ const classification = classifyError(error, provider, {
+ operation,
+ ...additionalContext,
+ });
+
+ logger.error(`Error in ${provider || 'unknown'}${operation ? ` during ${operation}` : ''}`, {
+ type: classification.type,
+ severity: classification.severity,
+ message: classification.userMessage,
+ technicalMessage: classification.technicalMessage,
+ retryable: classification.retryable,
+ suggestedAction: classification.suggestedAction,
+ context: classification.context,
+ });
+}
+
+/**
+ * Provider-specific error handlers
+ */
+export const ProviderErrorHandler = {
+ claude: {
+ classify: (error: unknown) => classifyError(error, 'claude'),
+ getUserMessage: (error: unknown) => getUserFriendlyErrorMessage(error, 'claude'),
+ isAuth: (error: unknown) => isAuthenticationError(error),
+ isBilling: (error: unknown) => isBillingError(error),
+ isRateLimit: (error: unknown) => isRateLimitError(error),
+ },
+
+ codex: {
+ classify: (error: unknown) => classifyError(error, 'codex'),
+ getUserMessage: (error: unknown) => getUserFriendlyErrorMessage(error, 'codex'),
+ isAuth: (error: unknown) => isAuthenticationError(error),
+ isBilling: (error: unknown) => isBillingError(error),
+ isRateLimit: (error: unknown) => isRateLimitError(error),
+ },
+
+ cursor: {
+ classify: (error: unknown) => classifyError(error, 'cursor'),
+ getUserMessage: (error: unknown) => getUserFriendlyErrorMessage(error, 'cursor'),
+ isAuth: (error: unknown) => isAuthenticationError(error),
+ isBilling: (error: unknown) => isBillingError(error),
+ isRateLimit: (error: unknown) => isRateLimitError(error),
+ },
+};
+
+/**
+ * Create a retry handler for retryable errors
+ */
+export function createRetryHandler(maxRetries: number = 3, baseDelay: number = 1000) {
+ return async function (
+ operation: () => Promise,
+ shouldRetry: (error: unknown) => boolean = isRetryableError
+ ): Promise {
+ let lastError: unknown;
+
+ for (let attempt = 0; attempt <= maxRetries; attempt++) {
+ try {
+ return await operation();
+ } catch (error) {
+ lastError = error;
+
+ if (attempt === maxRetries || !shouldRetry(error)) {
+ throw error;
+ }
+
+ // Exponential backoff with jitter
+ const delay = baseDelay * Math.pow(2, attempt) + Math.random() * 1000;
+ logger.debug(`Retrying operation in ${delay}ms (attempt ${attempt + 1}/${maxRetries})`);
+ await new Promise((resolve) => setTimeout(resolve, delay));
+ }
+ }
+
+ throw lastError;
+ };
+}
diff --git a/apps/server/src/lib/events.ts b/apps/server/src/lib/events.ts
index c7bd1512..7f224c4e 100644
--- a/apps/server/src/lib/events.ts
+++ b/apps/server/src/lib/events.ts
@@ -3,6 +3,9 @@
*/
import type { EventType, EventCallback } from '@automaker/types';
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('Events');
// Re-export event types from shared package
export type { EventType, EventCallback };
@@ -21,7 +24,7 @@ export function createEventEmitter(): EventEmitter {
try {
callback(type, payload);
} catch (error) {
- console.error('Error in event subscriber:', error);
+ logger.error('Error in event subscriber:', error);
}
}
},
diff --git a/apps/server/src/lib/json-extractor.ts b/apps/server/src/lib/json-extractor.ts
new file mode 100644
index 00000000..a1a97dd8
--- /dev/null
+++ b/apps/server/src/lib/json-extractor.ts
@@ -0,0 +1,211 @@
+/**
+ * JSON Extraction Utilities
+ *
+ * Robust JSON extraction from AI responses that may contain markdown,
+ * code blocks, or other text mixed with JSON content.
+ *
+ * Used by various routes that parse structured output from Cursor or
+ * Claude responses when structured output is not available.
+ */
+
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('JsonExtractor');
+
+/**
+ * Logger interface for optional custom logging
+ */
+export interface JsonExtractorLogger {
+ debug: (message: string, ...args: unknown[]) => void;
+ warn?: (message: string, ...args: unknown[]) => void;
+}
+
+/**
+ * Options for JSON extraction
+ */
+export interface ExtractJsonOptions {
+ /** Custom logger (defaults to internal logger) */
+ logger?: JsonExtractorLogger;
+ /** Required key that must be present in the extracted JSON */
+ requiredKey?: string;
+ /** Whether the required key's value must be an array */
+ requireArray?: boolean;
+}
+
+/**
+ * Extract JSON from response text using multiple strategies.
+ *
+ * Strategies tried in order:
+ * 1. JSON in ```json code block
+ * 2. JSON in ``` code block (no language)
+ * 3. Find JSON object by matching braces (starting with requiredKey if specified)
+ * 4. Find any JSON object by matching braces
+ * 5. Parse entire response as JSON
+ *
+ * @param responseText - The raw response text that may contain JSON
+ * @param options - Optional extraction options
+ * @returns Parsed JSON object or null if extraction fails
+ */
+export function extractJson>(
+ responseText: string,
+ options: ExtractJsonOptions = {}
+): T | null {
+ const log = options.logger || logger;
+ const requiredKey = options.requiredKey;
+ const requireArray = options.requireArray ?? false;
+
+ /**
+ * Validate that the result has the required key/structure
+ */
+ const validateResult = (result: unknown): result is T => {
+ if (!result || typeof result !== 'object') return false;
+ if (requiredKey) {
+ const obj = result as Record;
+ if (!(requiredKey in obj)) return false;
+ if (requireArray && !Array.isArray(obj[requiredKey])) return false;
+ }
+ return true;
+ };
+
+ /**
+ * Find matching closing brace by counting brackets
+ */
+ const findMatchingBrace = (text: string, startIdx: number): number => {
+ let depth = 0;
+ for (let i = startIdx; i < text.length; i++) {
+ if (text[i] === '{') depth++;
+ if (text[i] === '}') {
+ depth--;
+ if (depth === 0) {
+ return i + 1;
+ }
+ }
+ }
+ return -1;
+ };
+
+ const strategies = [
+ // Strategy 1: JSON in ```json code block
+ () => {
+ const match = responseText.match(/```json\s*([\s\S]*?)```/);
+ if (match) {
+ log.debug('Extracting JSON from ```json code block');
+ return JSON.parse(match[1].trim());
+ }
+ return null;
+ },
+
+ // Strategy 2: JSON in ``` code block (no language specified)
+ () => {
+ const match = responseText.match(/```\s*([\s\S]*?)```/);
+ if (match) {
+ const content = match[1].trim();
+ // Only try if it looks like JSON (starts with { or [)
+ if (content.startsWith('{') || content.startsWith('[')) {
+ log.debug('Extracting JSON from ``` code block');
+ return JSON.parse(content);
+ }
+ }
+ return null;
+ },
+
+ // Strategy 3: Find JSON object containing the required key (if specified)
+ () => {
+ if (!requiredKey) return null;
+
+ const searchPattern = `{"${requiredKey}"`;
+ const startIdx = responseText.indexOf(searchPattern);
+ if (startIdx === -1) return null;
+
+ const endIdx = findMatchingBrace(responseText, startIdx);
+ if (endIdx > startIdx) {
+ log.debug(`Extracting JSON with required key "${requiredKey}"`);
+ return JSON.parse(responseText.slice(startIdx, endIdx));
+ }
+ return null;
+ },
+
+ // Strategy 4: Find any JSON object by matching braces
+ () => {
+ const startIdx = responseText.indexOf('{');
+ if (startIdx === -1) return null;
+
+ const endIdx = findMatchingBrace(responseText, startIdx);
+ if (endIdx > startIdx) {
+ log.debug('Extracting JSON by brace matching');
+ return JSON.parse(responseText.slice(startIdx, endIdx));
+ }
+ return null;
+ },
+
+ // Strategy 5: Find JSON using first { to last } (may be less accurate)
+ () => {
+ const firstBrace = responseText.indexOf('{');
+ const lastBrace = responseText.lastIndexOf('}');
+ if (firstBrace !== -1 && lastBrace > firstBrace) {
+ log.debug('Extracting JSON from first { to last }');
+ return JSON.parse(responseText.slice(firstBrace, lastBrace + 1));
+ }
+ return null;
+ },
+
+ // Strategy 6: Try parsing the entire response as JSON
+ () => {
+ const trimmed = responseText.trim();
+ if (trimmed.startsWith('{') || trimmed.startsWith('[')) {
+ log.debug('Parsing entire response as JSON');
+ return JSON.parse(trimmed);
+ }
+ return null;
+ },
+ ];
+
+ for (const strategy of strategies) {
+ try {
+ const result = strategy();
+ if (validateResult(result)) {
+ log.debug('Successfully extracted JSON');
+ return result as T;
+ }
+ } catch {
+ // Strategy failed, try next
+ }
+ }
+
+ log.debug('Failed to extract JSON from response');
+ return null;
+}
+
+/**
+ * Extract JSON with a specific required key.
+ * Convenience wrapper around extractJson.
+ *
+ * @param responseText - The raw response text
+ * @param requiredKey - Key that must be present in the extracted JSON
+ * @param options - Additional options
+ * @returns Parsed JSON object or null
+ */
+export function extractJsonWithKey>(
+ responseText: string,
+ requiredKey: string,
+ options: Omit = {}
+): T | null {
+ return extractJson(responseText, { ...options, requiredKey });
+}
+
+/**
+ * Extract JSON that has a required array property.
+ * Useful for extracting responses like { "suggestions": [...] }
+ *
+ * @param responseText - The raw response text
+ * @param arrayKey - Key that must contain an array
+ * @param options - Additional options
+ * @returns Parsed JSON object or null
+ */
+export function extractJsonWithArray>(
+ responseText: string,
+ arrayKey: string,
+ options: Omit = {}
+): T | null {
+ return extractJson(responseText, { ...options, requiredKey: arrayKey, requireArray: true });
+}
diff --git a/apps/server/src/lib/permission-enforcer.ts b/apps/server/src/lib/permission-enforcer.ts
new file mode 100644
index 00000000..003608ee
--- /dev/null
+++ b/apps/server/src/lib/permission-enforcer.ts
@@ -0,0 +1,173 @@
+/**
+ * Permission enforcement utilities for Cursor provider
+ */
+
+import type { CursorCliConfigFile } from '@automaker/types';
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('PermissionEnforcer');
+
+export interface PermissionCheckResult {
+ allowed: boolean;
+ reason?: string;
+}
+
+/**
+ * Check if a tool call is allowed based on permissions
+ */
+export function checkToolCallPermission(
+ toolCall: any,
+ permissions: CursorCliConfigFile | null
+): PermissionCheckResult {
+ if (!permissions || !permissions.permissions) {
+ // If no permissions are configured, allow everything (backward compatibility)
+ return { allowed: true };
+ }
+
+ const { allow = [], deny = [] } = permissions.permissions;
+
+ // Check shell tool calls
+ if (toolCall.shellToolCall?.args?.command) {
+ const command = toolCall.shellToolCall.args.command;
+ const toolName = `Shell(${extractCommandName(command)})`;
+
+ // Check deny list first (deny takes precedence)
+ for (const denyRule of deny) {
+ if (matchesRule(toolName, denyRule)) {
+ return {
+ allowed: false,
+ reason: `Operation blocked by permission rule: ${denyRule}`,
+ };
+ }
+ }
+
+ // Then check allow list
+ for (const allowRule of allow) {
+ if (matchesRule(toolName, allowRule)) {
+ return { allowed: true };
+ }
+ }
+
+ return {
+ allowed: false,
+ reason: `Operation not in allow list: ${toolName}`,
+ };
+ }
+
+ // Check read tool calls
+ if (toolCall.readToolCall?.args?.path) {
+ const path = toolCall.readToolCall.args.path;
+ const toolName = `Read(${path})`;
+
+ // Check deny list first
+ for (const denyRule of deny) {
+ if (matchesRule(toolName, denyRule)) {
+ return {
+ allowed: false,
+ reason: `Read operation blocked by permission rule: ${denyRule}`,
+ };
+ }
+ }
+
+ // Then check allow list
+ for (const allowRule of allow) {
+ if (matchesRule(toolName, allowRule)) {
+ return { allowed: true };
+ }
+ }
+
+ return {
+ allowed: false,
+ reason: `Read operation not in allow list: ${toolName}`,
+ };
+ }
+
+ // Check write tool calls
+ if (toolCall.writeToolCall?.args?.path) {
+ const path = toolCall.writeToolCall.args.path;
+ const toolName = `Write(${path})`;
+
+ // Check deny list first
+ for (const denyRule of deny) {
+ if (matchesRule(toolName, denyRule)) {
+ return {
+ allowed: false,
+ reason: `Write operation blocked by permission rule: ${denyRule}`,
+ };
+ }
+ }
+
+ // Then check allow list
+ for (const allowRule of allow) {
+ if (matchesRule(toolName, allowRule)) {
+ return { allowed: true };
+ }
+ }
+
+ return {
+ allowed: false,
+ reason: `Write operation not in allow list: ${toolName}`,
+ };
+ }
+
+ // For other tool types, allow by default for now
+ return { allowed: true };
+}
+
+/**
+ * Extract the base command name from a shell command
+ */
+function extractCommandName(command: string): string {
+ // Remove leading spaces and get the first word
+ const trimmed = command.trim();
+ const firstWord = trimmed.split(/\s+/)[0];
+ return firstWord || 'unknown';
+}
+
+/**
+ * Check if a tool name matches a permission rule
+ */
+function matchesRule(toolName: string, rule: string): boolean {
+ // Exact match
+ if (toolName === rule) {
+ return true;
+ }
+
+ // Wildcard patterns
+ if (rule.includes('*')) {
+ const regex = new RegExp(rule.replace(/\*/g, '.*'));
+ return regex.test(toolName);
+ }
+
+ // Prefix match for shell commands (e.g., "Shell(git)" matches "Shell(git status)")
+ if (rule.startsWith('Shell(') && toolName.startsWith('Shell(')) {
+ const ruleCommand = rule.slice(6, -1); // Remove "Shell(" and ")"
+ const toolCommand = extractCommandName(toolName.slice(6, -1)); // Remove "Shell(" and ")"
+ return toolCommand.startsWith(ruleCommand);
+ }
+
+ return false;
+}
+
+/**
+ * Log permission violations
+ */
+export function logPermissionViolation(toolCall: any, reason: string, sessionId?: string): void {
+ const sessionIdStr = sessionId ? ` [${sessionId}]` : '';
+
+ if (toolCall.shellToolCall?.args?.command) {
+ logger.warn(
+ `Permission violation${sessionIdStr}: Shell command blocked - ${toolCall.shellToolCall.args.command} (${reason})`
+ );
+ } else if (toolCall.readToolCall?.args?.path) {
+ logger.warn(
+ `Permission violation${sessionIdStr}: Read operation blocked - ${toolCall.readToolCall.args.path} (${reason})`
+ );
+ } else if (toolCall.writeToolCall?.args?.path) {
+ logger.warn(
+ `Permission violation${sessionIdStr}: Write operation blocked - ${toolCall.writeToolCall.args.path} (${reason})`
+ );
+ } else {
+ logger.warn(`Permission violation${sessionIdStr}: Tool call blocked (${reason})`, { toolCall });
+ }
+}
diff --git a/apps/server/src/lib/sdk-options.ts b/apps/server/src/lib/sdk-options.ts
index d9b78398..4d3e670f 100644
--- a/apps/server/src/lib/sdk-options.ts
+++ b/apps/server/src/lib/sdk-options.ts
@@ -16,12 +16,82 @@
*/
import type { Options } from '@anthropic-ai/claude-agent-sdk';
-import os from 'os';
import path from 'path';
import { resolveModelString } from '@automaker/model-resolver';
-import { DEFAULT_MODELS, CLAUDE_MODEL_MAP, type McpServerConfig } from '@automaker/types';
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('SdkOptions');
+import {
+ DEFAULT_MODELS,
+ CLAUDE_MODEL_MAP,
+ type McpServerConfig,
+ type ThinkingLevel,
+ getThinkingTokenBudget,
+} from '@automaker/types';
import { isPathAllowed, PathNotAllowedError, getAllowedRootDirectory } from '@automaker/platform';
+/**
+ * Result of sandbox compatibility check
+ */
+export interface SandboxCompatibilityResult {
+ /** Whether sandbox mode can be enabled for this path */
+ enabled: boolean;
+ /** Optional message explaining why sandbox is disabled */
+ message?: string;
+}
+
+/**
+ * Check if a working directory is compatible with sandbox mode.
+ * Some paths (like cloud storage mounts) may not work with sandboxed execution.
+ *
+ * @param cwd - The working directory to check
+ * @param sandboxRequested - Whether sandbox mode was requested by settings
+ * @returns Object indicating if sandbox can be enabled and why not if disabled
+ */
+export function checkSandboxCompatibility(
+ cwd: string,
+ sandboxRequested: boolean
+): SandboxCompatibilityResult {
+ if (!sandboxRequested) {
+ return { enabled: false };
+ }
+
+ const resolvedCwd = path.resolve(cwd);
+
+ // Check for cloud storage paths that may not be compatible with sandbox
+ const cloudStoragePatterns = [
+ // macOS mounted volumes
+ /^\/Volumes\/GoogleDrive/i,
+ /^\/Volumes\/Dropbox/i,
+ /^\/Volumes\/OneDrive/i,
+ /^\/Volumes\/iCloud/i,
+ // macOS home directory
+ /^\/Users\/[^/]+\/Google Drive/i,
+ /^\/Users\/[^/]+\/Dropbox/i,
+ /^\/Users\/[^/]+\/OneDrive/i,
+ /^\/Users\/[^/]+\/Library\/Mobile Documents/i, // iCloud
+ // Linux home directory
+ /^\/home\/[^/]+\/Google Drive/i,
+ /^\/home\/[^/]+\/Dropbox/i,
+ /^\/home\/[^/]+\/OneDrive/i,
+ // Windows
+ /^C:\\Users\\[^\\]+\\Google Drive/i,
+ /^C:\\Users\\[^\\]+\\Dropbox/i,
+ /^C:\\Users\\[^\\]+\\OneDrive/i,
+ ];
+
+ for (const pattern of cloudStoragePatterns) {
+ if (pattern.test(resolvedCwd)) {
+ return {
+ enabled: false,
+ message: `Sandbox disabled: Cloud storage path detected (${resolvedCwd}). Sandbox mode may not work correctly with cloud-synced directories.`,
+ };
+ }
+ }
+
+ return { enabled: true };
+}
+
/**
* Validate that a working directory is allowed by ALLOWED_ROOT_DIRECTORY.
* This is the centralized security check for ALL AI model invocations.
@@ -48,128 +118,6 @@ export function validateWorkingDirectory(cwd: string): void {
}
}
-/**
- * Known cloud storage path patterns where sandbox mode is incompatible.
- *
- * The Claude CLI sandbox feature uses filesystem isolation that conflicts with
- * cloud storage providers' virtual filesystem implementations. This causes the
- * Claude process to exit with code 1 when sandbox is enabled for these paths.
- *
- * Affected providers (macOS paths):
- * - Dropbox: ~/Library/CloudStorage/Dropbox-*
- * - Google Drive: ~/Library/CloudStorage/GoogleDrive-*
- * - OneDrive: ~/Library/CloudStorage/OneDrive-*
- * - iCloud Drive: ~/Library/Mobile Documents/
- * - Box: ~/Library/CloudStorage/Box-*
- *
- * @see https://github.com/anthropics/claude-code/issues/XXX (TODO: file upstream issue)
- */
-
-/**
- * macOS-specific cloud storage patterns that appear under ~/Library/
- * These are specific enough to use with includes() safely.
- */
-const MACOS_CLOUD_STORAGE_PATTERNS = [
- '/Library/CloudStorage/', // Dropbox, Google Drive, OneDrive, Box on macOS
- '/Library/Mobile Documents/', // iCloud Drive on macOS
-] as const;
-
-/**
- * Generic cloud storage folder names that need to be anchored to the home directory
- * to avoid false positives (e.g., /home/user/my-project-about-dropbox/).
- */
-const HOME_ANCHORED_CLOUD_FOLDERS = [
- 'Google Drive', // Google Drive on some systems
- 'Dropbox', // Dropbox on Linux/alternative installs
- 'OneDrive', // OneDrive on Linux/alternative installs
-] as const;
-
-/**
- * Check if a path is within a cloud storage location.
- *
- * Cloud storage providers use virtual filesystem implementations that are
- * incompatible with the Claude CLI sandbox feature, causing process crashes.
- *
- * Uses two detection strategies:
- * 1. macOS-specific patterns (under ~/Library/) - checked via includes()
- * 2. Generic folder names - anchored to home directory to avoid false positives
- *
- * @param cwd - The working directory path to check
- * @returns true if the path is in a cloud storage location
- */
-export function isCloudStoragePath(cwd: string): boolean {
- const resolvedPath = path.resolve(cwd);
-
- // Check macOS-specific patterns (these are specific enough to use includes)
- if (MACOS_CLOUD_STORAGE_PATTERNS.some((pattern) => resolvedPath.includes(pattern))) {
- return true;
- }
-
- // Check home-anchored patterns to avoid false positives
- // e.g., /home/user/my-project-about-dropbox/ should NOT match
- const home = os.homedir();
- for (const folder of HOME_ANCHORED_CLOUD_FOLDERS) {
- const cloudPath = path.join(home, folder);
- // Check if resolved path starts with the cloud storage path followed by a separator
- // This ensures we match ~/Dropbox/project but not ~/Dropbox-archive or ~/my-dropbox-tool
- if (resolvedPath === cloudPath || resolvedPath.startsWith(cloudPath + path.sep)) {
- return true;
- }
- }
-
- return false;
-}
-
-/**
- * Result of sandbox compatibility check
- */
-export interface SandboxCheckResult {
- /** Whether sandbox should be enabled */
- enabled: boolean;
- /** If disabled, the reason why */
- disabledReason?: 'cloud_storage' | 'user_setting';
- /** Human-readable message for logging/UI */
- message?: string;
-}
-
-/**
- * Determine if sandbox mode should be enabled for a given configuration.
- *
- * Sandbox mode is automatically disabled for cloud storage paths because the
- * Claude CLI sandbox feature is incompatible with virtual filesystem
- * implementations used by cloud storage providers (Dropbox, Google Drive, etc.).
- *
- * @param cwd - The working directory
- * @param enableSandboxMode - User's sandbox mode setting
- * @returns SandboxCheckResult with enabled status and reason if disabled
- */
-export function checkSandboxCompatibility(
- cwd: string,
- enableSandboxMode?: boolean
-): SandboxCheckResult {
- // User has explicitly disabled sandbox mode
- if (enableSandboxMode === false) {
- return {
- enabled: false,
- disabledReason: 'user_setting',
- };
- }
-
- // Check for cloud storage incompatibility (applies when enabled or undefined)
- if (isCloudStoragePath(cwd)) {
- return {
- enabled: false,
- disabledReason: 'cloud_storage',
- message: `Sandbox mode auto-disabled: Project is in a cloud storage location (${cwd}). The Claude CLI sandbox feature is incompatible with cloud storage filesystems. To use sandbox mode, move your project to a local directory.`,
- };
- }
-
- // Sandbox is compatible and enabled (true or undefined defaults to enabled)
- return {
- enabled: true,
- };
-}
-
/**
* Tool presets for different use cases
*/
@@ -252,60 +200,51 @@ export function getModelForUseCase(
/**
* Base options that apply to all SDK calls
+ * AUTONOMOUS MODE: Always bypass permissions for fully autonomous operation
*/
function getBaseOptions(): Partial {
return {
- permissionMode: 'acceptEdits',
+ permissionMode: 'bypassPermissions',
+ allowDangerouslySkipPermissions: true,
};
}
/**
- * MCP permission options result
+ * MCP options result
*/
-interface McpPermissionOptions {
- /** Whether tools should be restricted to a preset */
- shouldRestrictTools: boolean;
- /** Options to spread when MCP bypass is enabled */
- bypassOptions: Partial;
+interface McpOptions {
/** Options to spread for MCP servers */
mcpServerOptions: Partial;
}
/**
* Build MCP-related options based on configuration.
- * Centralizes the logic for determining permission modes and tool restrictions
- * when MCP servers are configured.
*
* @param config - The SDK options config
- * @returns Object with MCP permission settings to spread into final options
+ * @returns Object with MCP server settings to spread into final options
*/
-function buildMcpOptions(config: CreateSdkOptionsConfig): McpPermissionOptions {
- const hasMcpServers = config.mcpServers && Object.keys(config.mcpServers).length > 0;
- // Default to true for autonomous workflow. Security is enforced when adding servers
- // via the security warning dialog that explains the risks.
- const mcpAutoApprove = config.mcpAutoApproveTools ?? true;
- const mcpUnrestricted = config.mcpUnrestrictedTools ?? true;
-
- // Determine if we should bypass permissions based on settings
- const shouldBypassPermissions = hasMcpServers && mcpAutoApprove;
- // Determine if we should restrict tools (only when no MCP or unrestricted is disabled)
- const shouldRestrictTools = !hasMcpServers || !mcpUnrestricted;
-
+function buildMcpOptions(config: CreateSdkOptionsConfig): McpOptions {
return {
- shouldRestrictTools,
- // Only include bypass options when MCP is configured and auto-approve is enabled
- bypassOptions: shouldBypassPermissions
- ? {
- permissionMode: 'bypassPermissions' as const,
- // Required flag when using bypassPermissions mode
- allowDangerouslySkipPermissions: true,
- }
- : {},
// Include MCP servers if configured
mcpServerOptions: config.mcpServers ? { mcpServers: config.mcpServers } : {},
};
}
+/**
+ * Build thinking options for SDK configuration.
+ * Converts ThinkingLevel to maxThinkingTokens for the Claude SDK.
+ *
+ * @param thinkingLevel - The thinking level to convert
+ * @returns Object with maxThinkingTokens if thinking is enabled
+ */
+function buildThinkingOptions(thinkingLevel?: ThinkingLevel): Partial {
+ const maxThinkingTokens = getThinkingTokenBudget(thinkingLevel);
+ logger.debug(
+ `buildThinkingOptions: thinkingLevel="${thinkingLevel}" -> maxThinkingTokens=${maxThinkingTokens}`
+ );
+ return maxThinkingTokens ? { maxThinkingTokens } : {};
+}
+
/**
* Build system prompt configuration based on autoLoadClaudeMd setting.
* When autoLoadClaudeMd is true:
@@ -387,17 +326,11 @@ export interface CreateSdkOptionsConfig {
/** Enable auto-loading of CLAUDE.md files via SDK's settingSources */
autoLoadClaudeMd?: boolean;
- /** Enable sandbox mode for bash command isolation */
- enableSandboxMode?: boolean;
-
/** MCP servers to make available to the agent */
mcpServers?: Record;
- /** Auto-approve MCP tool calls without permission prompts */
- mcpAutoApproveTools?: boolean;
-
- /** Allow unrestricted tools when MCP servers are enabled */
- mcpUnrestrictedTools?: boolean;
+ /** Extended thinking level for Claude models */
+ thinkingLevel?: ThinkingLevel;
}
// Re-export MCP types from @automaker/types for convenience
@@ -424,6 +357,9 @@ export function createSpecGenerationOptions(config: CreateSdkOptionsConfig): Opt
// Build CLAUDE.md auto-loading options if enabled
const claudeMdOptions = buildClaudeMdOptions(config);
+ // Build thinking options
+ const thinkingOptions = buildThinkingOptions(config.thinkingLevel);
+
return {
...getBaseOptions(),
// Override permissionMode - spec generation only needs read-only tools
@@ -435,6 +371,7 @@ export function createSpecGenerationOptions(config: CreateSdkOptionsConfig): Opt
cwd: config.cwd,
allowedTools: [...TOOL_PRESETS.specGeneration],
...claudeMdOptions,
+ ...thinkingOptions,
...(config.abortController && { abortController: config.abortController }),
...(config.outputFormat && { outputFormat: config.outputFormat }),
};
@@ -456,6 +393,9 @@ export function createFeatureGenerationOptions(config: CreateSdkOptionsConfig):
// Build CLAUDE.md auto-loading options if enabled
const claudeMdOptions = buildClaudeMdOptions(config);
+ // Build thinking options
+ const thinkingOptions = buildThinkingOptions(config.thinkingLevel);
+
return {
...getBaseOptions(),
// Override permissionMode - feature generation only needs read-only tools
@@ -465,6 +405,7 @@ export function createFeatureGenerationOptions(config: CreateSdkOptionsConfig):
cwd: config.cwd,
allowedTools: [...TOOL_PRESETS.readOnly],
...claudeMdOptions,
+ ...thinkingOptions,
...(config.abortController && { abortController: config.abortController }),
};
}
@@ -485,6 +426,9 @@ export function createSuggestionsOptions(config: CreateSdkOptionsConfig): Option
// Build CLAUDE.md auto-loading options if enabled
const claudeMdOptions = buildClaudeMdOptions(config);
+ // Build thinking options
+ const thinkingOptions = buildThinkingOptions(config.thinkingLevel);
+
return {
...getBaseOptions(),
model: getModelForUseCase('suggestions', config.model),
@@ -492,6 +436,7 @@ export function createSuggestionsOptions(config: CreateSdkOptionsConfig): Option
cwd: config.cwd,
allowedTools: [...TOOL_PRESETS.readOnly],
...claudeMdOptions,
+ ...thinkingOptions,
...(config.abortController && { abortController: config.abortController }),
...(config.outputFormat && { outputFormat: config.outputFormat }),
};
@@ -504,7 +449,6 @@ export function createSuggestionsOptions(config: CreateSdkOptionsConfig): Option
* - Full tool access for code modification
* - Standard turns for interactive sessions
* - Model priority: explicit model > session model > chat default
- * - Sandbox mode controlled by enableSandboxMode setting (auto-disabled for cloud storage)
* - When autoLoadClaudeMd is true, uses preset mode and settingSources for CLAUDE.md loading
*/
export function createChatOptions(config: CreateSdkOptionsConfig): Options {
@@ -520,25 +464,17 @@ export function createChatOptions(config: CreateSdkOptionsConfig): Options {
// Build MCP-related options
const mcpOptions = buildMcpOptions(config);
- // Check sandbox compatibility (auto-disables for cloud storage paths)
- const sandboxCheck = checkSandboxCompatibility(config.cwd, config.enableSandboxMode);
+ // Build thinking options
+ const thinkingOptions = buildThinkingOptions(config.thinkingLevel);
return {
...getBaseOptions(),
model: getModelForUseCase('chat', effectiveModel),
maxTurns: MAX_TURNS.standard,
cwd: config.cwd,
- // Only restrict tools if no MCP servers configured or unrestricted is disabled
- ...(mcpOptions.shouldRestrictTools && { allowedTools: [...TOOL_PRESETS.chat] }),
- // Apply MCP bypass options if configured
- ...mcpOptions.bypassOptions,
- ...(sandboxCheck.enabled && {
- sandbox: {
- enabled: true,
- autoAllowBashIfSandboxed: true,
- },
- }),
+ allowedTools: [...TOOL_PRESETS.chat],
...claudeMdOptions,
+ ...thinkingOptions,
...(config.abortController && { abortController: config.abortController }),
...mcpOptions.mcpServerOptions,
};
@@ -551,7 +487,6 @@ export function createChatOptions(config: CreateSdkOptionsConfig): Options {
* - Full tool access for code modification and implementation
* - Extended turns for thorough feature implementation
* - Uses default model (can be overridden)
- * - Sandbox mode controlled by enableSandboxMode setting (auto-disabled for cloud storage)
* - When autoLoadClaudeMd is true, uses preset mode and settingSources for CLAUDE.md loading
*/
export function createAutoModeOptions(config: CreateSdkOptionsConfig): Options {
@@ -564,25 +499,17 @@ export function createAutoModeOptions(config: CreateSdkOptionsConfig): Options {
// Build MCP-related options
const mcpOptions = buildMcpOptions(config);
- // Check sandbox compatibility (auto-disables for cloud storage paths)
- const sandboxCheck = checkSandboxCompatibility(config.cwd, config.enableSandboxMode);
+ // Build thinking options
+ const thinkingOptions = buildThinkingOptions(config.thinkingLevel);
return {
...getBaseOptions(),
model: getModelForUseCase('auto', config.model),
maxTurns: MAX_TURNS.maximum,
cwd: config.cwd,
- // Only restrict tools if no MCP servers configured or unrestricted is disabled
- ...(mcpOptions.shouldRestrictTools && { allowedTools: [...TOOL_PRESETS.fullAccess] }),
- // Apply MCP bypass options if configured
- ...mcpOptions.bypassOptions,
- ...(sandboxCheck.enabled && {
- sandbox: {
- enabled: true,
- autoAllowBashIfSandboxed: true,
- },
- }),
+ allowedTools: [...TOOL_PRESETS.fullAccess],
...claudeMdOptions,
+ ...thinkingOptions,
...(config.abortController && { abortController: config.abortController }),
...mcpOptions.mcpServerOptions,
};
@@ -598,7 +525,6 @@ export function createCustomOptions(
config: CreateSdkOptionsConfig & {
maxTurns?: number;
allowedTools?: readonly string[];
- sandbox?: { enabled: boolean; autoAllowBashIfSandboxed?: boolean };
}
): Options {
// Validate working directory before creating options
@@ -610,23 +536,22 @@ export function createCustomOptions(
// Build MCP-related options
const mcpOptions = buildMcpOptions(config);
- // For custom options: use explicit allowedTools if provided, otherwise use preset based on MCP settings
+ // Build thinking options
+ const thinkingOptions = buildThinkingOptions(config.thinkingLevel);
+
+ // For custom options: use explicit allowedTools if provided, otherwise default to readOnly
const effectiveAllowedTools = config.allowedTools
? [...config.allowedTools]
- : mcpOptions.shouldRestrictTools
- ? [...TOOL_PRESETS.readOnly]
- : undefined;
+ : [...TOOL_PRESETS.readOnly];
return {
...getBaseOptions(),
model: getModelForUseCase('default', config.model),
maxTurns: config.maxTurns ?? MAX_TURNS.maximum,
cwd: config.cwd,
- ...(effectiveAllowedTools && { allowedTools: effectiveAllowedTools }),
- ...(config.sandbox && { sandbox: config.sandbox }),
- // Apply MCP bypass options if configured
- ...mcpOptions.bypassOptions,
+ allowedTools: effectiveAllowedTools,
...claudeMdOptions,
+ ...thinkingOptions,
...(config.abortController && { abortController: config.abortController }),
...mcpOptions.mcpServerOptions,
};
diff --git a/apps/server/src/lib/settings-helpers.ts b/apps/server/src/lib/settings-helpers.ts
index b6e86ff2..da3c08fe 100644
--- a/apps/server/src/lib/settings-helpers.ts
+++ b/apps/server/src/lib/settings-helpers.ts
@@ -55,34 +55,6 @@ export async function getAutoLoadClaudeMdSetting(
}
}
-/**
- * Get the enableSandboxMode setting from global settings.
- * Returns false if settings service is not available.
- *
- * @param settingsService - Optional settings service instance
- * @param logPrefix - Prefix for log messages (e.g., '[AgentService]')
- * @returns Promise resolving to the enableSandboxMode setting value
- */
-export async function getEnableSandboxModeSetting(
- settingsService?: SettingsService | null,
- logPrefix = '[SettingsHelper]'
-): Promise {
- if (!settingsService) {
- logger.info(`${logPrefix} SettingsService not available, sandbox mode disabled`);
- return false;
- }
-
- try {
- const globalSettings = await settingsService.getGlobalSettings();
- const result = globalSettings.enableSandboxMode ?? false;
- logger.info(`${logPrefix} enableSandboxMode from global settings: ${result}`);
- return result;
- } catch (error) {
- logger.error(`${logPrefix} Failed to load enableSandboxMode setting:`, error);
- throw error;
- }
-}
-
/**
* Filters out CLAUDE.md from context files when autoLoadClaudeMd is enabled
* and rebuilds the formatted prompt without it.
@@ -191,41 +163,6 @@ export async function getMCPServersFromSettings(
}
}
-/**
- * Get MCP permission settings from global settings.
- *
- * @param settingsService - Optional settings service instance
- * @param logPrefix - Prefix for log messages (e.g., '[AgentService]')
- * @returns Promise resolving to MCP permission settings
- */
-export async function getMCPPermissionSettings(
- settingsService?: SettingsService | null,
- logPrefix = '[SettingsHelper]'
-): Promise<{ mcpAutoApproveTools: boolean; mcpUnrestrictedTools: boolean }> {
- // Default to true for autonomous workflow. Security is enforced when adding servers
- // via the security warning dialog that explains the risks.
- const defaults = { mcpAutoApproveTools: true, mcpUnrestrictedTools: true };
-
- if (!settingsService) {
- return defaults;
- }
-
- try {
- const globalSettings = await settingsService.getGlobalSettings();
- const result = {
- mcpAutoApproveTools: globalSettings.mcpAutoApproveTools ?? true,
- mcpUnrestrictedTools: globalSettings.mcpUnrestrictedTools ?? true,
- };
- logger.info(
- `${logPrefix} MCP permission settings: autoApprove=${result.mcpAutoApproveTools}, unrestricted=${result.mcpUnrestrictedTools}`
- );
- return result;
- } catch (error) {
- logger.error(`${logPrefix} Failed to load MCP permission settings:`, error);
- return defaults;
- }
-}
-
/**
* Convert a settings MCPServerConfig to SDK McpServerConfig format.
* Validates required fields and throws informative errors if missing.
@@ -304,3 +241,83 @@ export async function getPromptCustomization(
enhancement: mergeEnhancementPrompts(customization.enhancement),
};
}
+
+/**
+ * Get Skills configuration from settings.
+ * Returns configuration for enabling skills and which sources to load from.
+ *
+ * @param settingsService - Settings service instance
+ * @returns Skills configuration with enabled state, sources, and tool inclusion flag
+ */
+export async function getSkillsConfiguration(settingsService: SettingsService): Promise<{
+ enabled: boolean;
+ sources: Array<'user' | 'project'>;
+ shouldIncludeInTools: boolean;
+}> {
+ const settings = await settingsService.getGlobalSettings();
+ const enabled = settings.enableSkills ?? true; // Default enabled
+ const sources = settings.skillsSources ?? ['user', 'project']; // Default both sources
+
+ return {
+ enabled,
+ sources,
+ shouldIncludeInTools: enabled && sources.length > 0,
+ };
+}
+
+/**
+ * Get Subagents configuration from settings.
+ * Returns configuration for enabling subagents and which sources to load from.
+ *
+ * @param settingsService - Settings service instance
+ * @returns Subagents configuration with enabled state, sources, and tool inclusion flag
+ */
+export async function getSubagentsConfiguration(settingsService: SettingsService): Promise<{
+ enabled: boolean;
+ sources: Array<'user' | 'project'>;
+ shouldIncludeInTools: boolean;
+}> {
+ const settings = await settingsService.getGlobalSettings();
+ const enabled = settings.enableSubagents ?? true; // Default enabled
+ const sources = settings.subagentsSources ?? ['user', 'project']; // Default both sources
+
+ return {
+ enabled,
+ sources,
+ shouldIncludeInTools: enabled && sources.length > 0,
+ };
+}
+
+/**
+ * Get custom subagents from settings, merging global and project-level definitions.
+ * Project-level subagents take precedence over global ones with the same name.
+ *
+ * @param settingsService - Settings service instance
+ * @param projectPath - Path to the project for loading project-specific subagents
+ * @returns Record of agent names to definitions, or undefined if none configured
+ */
+export async function getCustomSubagents(
+ settingsService: SettingsService,
+ projectPath?: string
+): Promise | undefined> {
+ // Get global subagents
+ const globalSettings = await settingsService.getGlobalSettings();
+ const globalSubagents = globalSettings.customSubagents || {};
+
+ // If no project path, return only global subagents
+ if (!projectPath) {
+ return Object.keys(globalSubagents).length > 0 ? globalSubagents : undefined;
+ }
+
+ // Get project-specific subagents
+ const projectSettings = await settingsService.getProjectSettings(projectPath);
+ const projectSubagents = projectSettings.customSubagents || {};
+
+ // Merge: project-level takes precedence
+ const merged = {
+ ...globalSubagents,
+ ...projectSubagents,
+ };
+
+ return Object.keys(merged).length > 0 ? merged : undefined;
+}
diff --git a/apps/server/src/lib/version.ts b/apps/server/src/lib/version.ts
index 61e182e3..93d88d48 100644
--- a/apps/server/src/lib/version.ts
+++ b/apps/server/src/lib/version.ts
@@ -5,6 +5,9 @@
import { readFileSync } from 'fs';
import { fileURLToPath } from 'url';
import { dirname, join } from 'path';
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('Version');
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
@@ -27,7 +30,7 @@ export function getVersion(): string {
cachedVersion = version;
return version;
} catch (error) {
- console.warn('Failed to read version from package.json:', error);
+ logger.warn('Failed to read version from package.json:', error);
return '0.0.0';
}
}
diff --git a/apps/server/src/lib/worktree-metadata.ts b/apps/server/src/lib/worktree-metadata.ts
index edeadc5b..3f7ea60d 100644
--- a/apps/server/src/lib/worktree-metadata.ts
+++ b/apps/server/src/lib/worktree-metadata.ts
@@ -21,6 +21,12 @@ export interface WorktreeMetadata {
branch: string;
createdAt: string;
pr?: WorktreePRInfo;
+ /** Whether the init script has been executed for this worktree */
+ initScriptRan?: boolean;
+ /** Status of the init script execution */
+ initScriptStatus?: 'running' | 'success' | 'failed';
+ /** Error message if init script failed */
+ initScriptError?: string;
}
/**
diff --git a/apps/server/src/providers/claude-provider.ts b/apps/server/src/providers/claude-provider.ts
index 33494535..ecdd46af 100644
--- a/apps/server/src/providers/claude-provider.ts
+++ b/apps/server/src/providers/claude-provider.ts
@@ -7,7 +7,10 @@
import { query, type Options } from '@anthropic-ai/claude-agent-sdk';
import { BaseProvider } from './base-provider.js';
-import { classifyError, getUserFriendlyErrorMessage } from '@automaker/utils';
+import { classifyError, getUserFriendlyErrorMessage, createLogger } from '@automaker/utils';
+
+const logger = createLogger('ClaudeProvider');
+import { getThinkingTokenBudget, validateBareModelId } from '@automaker/types';
import type {
ExecuteOptions,
ProviderMessage,
@@ -50,6 +53,10 @@ export class ClaudeProvider extends BaseProvider {
* Execute a query using Claude Agent SDK
*/
async *executeQuery(options: ExecuteOptions): AsyncGenerator {
+ // Validate that model doesn't have a provider prefix
+ // AgentService should strip prefixes before passing to providers
+ validateBareModelId(options.model, 'ClaudeProvider');
+
const {
prompt,
model,
@@ -60,24 +67,13 @@ export class ClaudeProvider extends BaseProvider {
abortController,
conversationHistory,
sdkSessionId,
+ thinkingLevel,
} = options;
+ // Convert thinking level to token budget
+ const maxThinkingTokens = getThinkingTokenBudget(thinkingLevel);
+
// Build Claude SDK options
- // MCP permission logic - determines how to handle tool permissions when MCP servers are configured.
- // This logic mirrors buildMcpOptions() in sdk-options.ts but is applied here since
- // the provider is the final point where SDK options are constructed.
- const hasMcpServers = options.mcpServers && Object.keys(options.mcpServers).length > 0;
- // Default to true for autonomous workflow. Security is enforced when adding servers
- // via the security warning dialog that explains the risks.
- const mcpAutoApprove = options.mcpAutoApproveTools ?? true;
- const mcpUnrestricted = options.mcpUnrestrictedTools ?? true;
- const defaultTools = ['Read', 'Write', 'Edit', 'Glob', 'Grep', 'Bash', 'WebSearch', 'WebFetch'];
-
- // Determine permission mode based on settings
- const shouldBypassPermissions = hasMcpServers && mcpAutoApprove;
- // Determine if we should restrict tools (only when no MCP or unrestricted is disabled)
- const shouldRestrictTools = !hasMcpServers || !mcpUnrestricted;
-
const sdkOptions: Options = {
model,
systemPrompt,
@@ -85,13 +81,11 @@ export class ClaudeProvider extends BaseProvider {
cwd,
// Pass only explicitly allowed environment variables to SDK
env: buildEnv(),
- // Only restrict tools if explicitly set OR (no MCP / unrestricted disabled)
- ...(allowedTools && shouldRestrictTools && { allowedTools }),
- ...(!allowedTools && shouldRestrictTools && { allowedTools: defaultTools }),
- // When MCP servers are configured and auto-approve is enabled, use bypassPermissions
- permissionMode: shouldBypassPermissions ? 'bypassPermissions' : 'default',
- // Required when using bypassPermissions mode
- ...(shouldBypassPermissions && { allowDangerouslySkipPermissions: true }),
+ // Pass through allowedTools if provided by caller (decided by sdk-options.ts)
+ ...(allowedTools && { allowedTools }),
+ // AUTONOMOUS MODE: Always bypass permissions for fully autonomous operation
+ permissionMode: 'bypassPermissions',
+ allowDangerouslySkipPermissions: true,
abortController,
// Resume existing SDK session if we have a session ID
...(sdkSessionId && conversationHistory && conversationHistory.length > 0
@@ -99,10 +93,14 @@ export class ClaudeProvider extends BaseProvider {
: {}),
// Forward settingSources for CLAUDE.md file loading
...(options.settingSources && { settingSources: options.settingSources }),
- // Forward sandbox configuration
- ...(options.sandbox && { sandbox: options.sandbox }),
// Forward MCP servers configuration
...(options.mcpServers && { mcpServers: options.mcpServers }),
+ // Extended thinking configuration
+ ...(maxThinkingTokens && { maxThinkingTokens }),
+ // Subagents configuration for specialized task delegation
+ ...(options.agents && { agents: options.agents }),
+ // Pass through outputFormat for structured JSON outputs
+ ...(options.outputFormat && { outputFormat: options.outputFormat }),
};
// Build prompt payload
@@ -140,7 +138,7 @@ export class ClaudeProvider extends BaseProvider {
const errorInfo = classifyError(error);
const userMessage = getUserFriendlyErrorMessage(error);
- console.error('[ClaudeProvider] executeQuery() error during execution:', {
+ logger.error('executeQuery() error during execution:', {
type: errorInfo.type,
message: errorInfo.message,
isRateLimit: errorInfo.isRateLimit,
diff --git a/apps/server/src/providers/cli-provider.ts b/apps/server/src/providers/cli-provider.ts
new file mode 100644
index 00000000..7e0599f9
--- /dev/null
+++ b/apps/server/src/providers/cli-provider.ts
@@ -0,0 +1,558 @@
+/**
+ * CliProvider - Abstract base class for CLI-based AI providers
+ *
+ * Provides common infrastructure for CLI tools that spawn subprocesses
+ * and stream JSONL output. Handles:
+ * - Platform-specific CLI detection (PATH, common locations)
+ * - Windows execution strategies (WSL, npx, direct, cmd)
+ * - JSONL subprocess spawning and streaming
+ * - Error mapping infrastructure
+ *
+ * @example
+ * ```typescript
+ * class CursorProvider extends CliProvider {
+ * getCliName(): string { return 'cursor-agent'; }
+ * getSpawnConfig(): CliSpawnConfig {
+ * return {
+ * windowsStrategy: 'wsl',
+ * commonPaths: {
+ * linux: ['~/.local/bin/cursor-agent'],
+ * darwin: ['~/.local/bin/cursor-agent'],
+ * }
+ * };
+ * }
+ * // ... implement abstract methods
+ * }
+ * ```
+ */
+
+import { execSync } from 'child_process';
+import * as fs from 'fs';
+import * as path from 'path';
+import * as os from 'os';
+import { BaseProvider } from './base-provider.js';
+import type { ProviderConfig, ExecuteOptions, ProviderMessage } from './types.js';
+import {
+ spawnJSONLProcess,
+ type SubprocessOptions,
+ isWslAvailable,
+ findCliInWsl,
+ createWslCommand,
+ windowsToWslPath,
+ type WslCliResult,
+} from '@automaker/platform';
+import { createLogger, isAbortError } from '@automaker/utils';
+
+/**
+ * Spawn strategy for CLI tools on Windows
+ *
+ * Different CLI tools require different execution strategies:
+ * - 'wsl': Requires WSL, CLI only available on Linux/macOS (e.g., cursor-agent)
+ * - 'npx': Installed globally via npm/npx, use `npx ` to run
+ * - 'direct': Native Windows binary, can spawn directly
+ * - 'cmd': Windows batch file (.cmd/.bat), needs cmd.exe shell
+ */
+export type SpawnStrategy = 'wsl' | 'npx' | 'direct' | 'cmd';
+
+/**
+ * Configuration for CLI tool spawning
+ */
+export interface CliSpawnConfig {
+ /** How to spawn on Windows */
+ windowsStrategy: SpawnStrategy;
+
+ /** NPX package name (required if windowsStrategy is 'npx') */
+ npxPackage?: string;
+
+ /** Preferred WSL distribution (if windowsStrategy is 'wsl') */
+ wslDistribution?: string;
+
+ /**
+ * Common installation paths per platform
+ * Use ~ for home directory (will be expanded)
+ * Keys: 'linux', 'darwin', 'win32'
+ */
+ commonPaths: Record;
+
+ /** Version check command (defaults to --version) */
+ versionCommand?: string;
+}
+
+/**
+ * CLI error information for consistent error handling
+ */
+export interface CliErrorInfo {
+ code: string;
+ message: string;
+ recoverable: boolean;
+ suggestion?: string;
+}
+
+/**
+ * Detection result from CLI path finding
+ */
+export interface CliDetectionResult {
+ /** Path to the CLI (or 'npx' for npx strategy) */
+ cliPath: string | null;
+ /** Whether using WSL mode */
+ useWsl: boolean;
+ /** WSL path if using WSL */
+ wslCliPath?: string;
+ /** WSL distribution if using WSL */
+ wslDistribution?: string;
+ /** Detected strategy used */
+ strategy: SpawnStrategy | 'native';
+}
+
+// Create logger for CLI operations
+const cliLogger = createLogger('CliProvider');
+
+/**
+ * Abstract base class for CLI-based providers
+ *
+ * Subclasses must implement:
+ * - getCliName(): CLI executable name
+ * - getSpawnConfig(): Platform-specific spawn configuration
+ * - buildCliArgs(): Convert ExecuteOptions to CLI arguments
+ * - normalizeEvent(): Convert CLI output to ProviderMessage
+ */
+export abstract class CliProvider extends BaseProvider {
+ // CLI detection results (cached after first detection)
+ protected cliPath: string | null = null;
+ protected useWsl: boolean = false;
+ protected wslCliPath: string | null = null;
+ protected wslDistribution: string | undefined = undefined;
+ protected detectedStrategy: SpawnStrategy | 'native' = 'native';
+
+ // NPX args (used when strategy is 'npx')
+ protected npxArgs: string[] = [];
+
+ constructor(config: ProviderConfig = {}) {
+ super(config);
+ // Detection happens lazily on first use
+ }
+
+ // ==========================================================================
+ // Abstract methods - must be implemented by subclasses
+ // ==========================================================================
+
+ /**
+ * Get the CLI executable name (e.g., 'cursor-agent', 'aider')
+ */
+ abstract getCliName(): string;
+
+ /**
+ * Get spawn configuration for this CLI
+ */
+ abstract getSpawnConfig(): CliSpawnConfig;
+
+ /**
+ * Build CLI arguments from execution options
+ * @param options Execution options
+ * @returns Array of CLI arguments
+ */
+ abstract buildCliArgs(options: ExecuteOptions): string[];
+
+ /**
+ * Normalize a raw CLI event to ProviderMessage format
+ * @param event Raw event from CLI JSONL output
+ * @returns Normalized ProviderMessage or null to skip
+ */
+ abstract normalizeEvent(event: unknown): ProviderMessage | null;
+
+ // ==========================================================================
+ // Optional overrides
+ // ==========================================================================
+
+ /**
+ * Map CLI stderr/exit code to error info
+ * Override to provide CLI-specific error mapping
+ */
+ protected mapError(stderr: string, exitCode: number | null): CliErrorInfo {
+ const lower = stderr.toLowerCase();
+
+ // Common authentication errors
+ if (
+ lower.includes('not authenticated') ||
+ lower.includes('please log in') ||
+ lower.includes('unauthorized')
+ ) {
+ return {
+ code: 'NOT_AUTHENTICATED',
+ message: `${this.getCliName()} is not authenticated`,
+ recoverable: true,
+ suggestion: `Run "${this.getCliName()} login" to authenticate`,
+ };
+ }
+
+ // Rate limiting
+ if (
+ lower.includes('rate limit') ||
+ lower.includes('too many requests') ||
+ lower.includes('429')
+ ) {
+ return {
+ code: 'RATE_LIMITED',
+ message: 'API rate limit exceeded',
+ recoverable: true,
+ suggestion: 'Wait a few minutes and try again',
+ };
+ }
+
+ // Network errors
+ if (
+ lower.includes('network') ||
+ lower.includes('connection') ||
+ lower.includes('econnrefused') ||
+ lower.includes('timeout')
+ ) {
+ return {
+ code: 'NETWORK_ERROR',
+ message: 'Network connection error',
+ recoverable: true,
+ suggestion: 'Check your internet connection and try again',
+ };
+ }
+
+ // Process killed
+ if (exitCode === 137 || lower.includes('killed') || lower.includes('sigterm')) {
+ return {
+ code: 'PROCESS_CRASHED',
+ message: 'Process was terminated',
+ recoverable: true,
+ suggestion: 'The process may have run out of memory. Try a simpler task.',
+ };
+ }
+
+ // Generic error
+ return {
+ code: 'UNKNOWN_ERROR',
+ message: stderr || `Process exited with code ${exitCode}`,
+ recoverable: false,
+ };
+ }
+
+ /**
+ * Get installation instructions for this CLI
+ * Override to provide CLI-specific instructions
+ */
+ protected getInstallInstructions(): string {
+ const cliName = this.getCliName();
+ const config = this.getSpawnConfig();
+
+ if (process.platform === 'win32') {
+ switch (config.windowsStrategy) {
+ case 'wsl':
+ return `${cliName} requires WSL on Windows. Install WSL, then run inside WSL to install.`;
+ case 'npx':
+ return `Install with: npm install -g ${config.npxPackage || cliName}`;
+ case 'cmd':
+ case 'direct':
+ return `${cliName} is not installed. Check the documentation for installation instructions.`;
+ }
+ }
+
+ return `${cliName} is not installed. Check the documentation for installation instructions.`;
+ }
+
+ // ==========================================================================
+ // CLI Detection
+ // ==========================================================================
+
+ /**
+ * Expand ~ to home directory in path
+ */
+ private expandPath(p: string): string {
+ if (p.startsWith('~')) {
+ return path.join(os.homedir(), p.slice(1));
+ }
+ return p;
+ }
+
+ /**
+ * Find CLI in PATH using 'which' (Unix) or 'where' (Windows)
+ */
+ private findCliInPath(): string | null {
+ const cliName = this.getCliName();
+
+ try {
+ const command = process.platform === 'win32' ? 'where' : 'which';
+ const result = execSync(`${command} ${cliName}`, {
+ encoding: 'utf8',
+ timeout: 5000,
+ stdio: ['pipe', 'pipe', 'pipe'],
+ windowsHide: true,
+ })
+ .trim()
+ .split('\n')[0];
+
+ if (result && fs.existsSync(result)) {
+ cliLogger.debug(`Found ${cliName} in PATH: ${result}`);
+ return result;
+ }
+ } catch {
+ // Not in PATH
+ }
+
+ return null;
+ }
+
+ /**
+ * Find CLI in common installation paths for current platform
+ */
+ private findCliInCommonPaths(): string | null {
+ const config = this.getSpawnConfig();
+ const cliName = this.getCliName();
+ const platform = process.platform as 'linux' | 'darwin' | 'win32';
+ const paths = config.commonPaths[platform] || [];
+
+ for (const p of paths) {
+ const expandedPath = this.expandPath(p);
+ if (fs.existsSync(expandedPath)) {
+ cliLogger.debug(`Found ${cliName} at: ${expandedPath}`);
+ return expandedPath;
+ }
+ }
+
+ return null;
+ }
+
+ /**
+ * Detect CLI installation using appropriate strategy
+ */
+ protected detectCli(): CliDetectionResult {
+ const config = this.getSpawnConfig();
+ const cliName = this.getCliName();
+ const wslLogger = (msg: string) => cliLogger.debug(msg);
+
+ // Windows - use configured strategy
+ if (process.platform === 'win32') {
+ switch (config.windowsStrategy) {
+ case 'wsl': {
+ // Check WSL for CLI
+ if (isWslAvailable({ logger: wslLogger })) {
+ const wslResult: WslCliResult | null = findCliInWsl(cliName, {
+ logger: wslLogger,
+ distribution: config.wslDistribution,
+ });
+ if (wslResult) {
+ cliLogger.debug(
+ `Using ${cliName} via WSL (${wslResult.distribution || 'default'}): ${wslResult.wslPath}`
+ );
+ return {
+ cliPath: 'wsl.exe',
+ useWsl: true,
+ wslCliPath: wslResult.wslPath,
+ wslDistribution: wslResult.distribution,
+ strategy: 'wsl',
+ };
+ }
+ }
+ cliLogger.debug(`${cliName} not found (WSL not available or CLI not installed in WSL)`);
+ return { cliPath: null, useWsl: false, strategy: 'wsl' };
+ }
+
+ case 'npx': {
+ // For npx, we don't need to find the CLI, just return npx
+ cliLogger.debug(`Using ${cliName} via npx (package: ${config.npxPackage})`);
+ return {
+ cliPath: 'npx',
+ useWsl: false,
+ strategy: 'npx',
+ };
+ }
+
+ case 'direct':
+ case 'cmd': {
+ // Native Windows - check PATH and common paths
+ const pathResult = this.findCliInPath();
+ if (pathResult) {
+ return { cliPath: pathResult, useWsl: false, strategy: config.windowsStrategy };
+ }
+
+ const commonResult = this.findCliInCommonPaths();
+ if (commonResult) {
+ return { cliPath: commonResult, useWsl: false, strategy: config.windowsStrategy };
+ }
+
+ cliLogger.debug(`${cliName} not found on Windows`);
+ return { cliPath: null, useWsl: false, strategy: config.windowsStrategy };
+ }
+ }
+ }
+
+ // Linux/macOS - native execution
+ const pathResult = this.findCliInPath();
+ if (pathResult) {
+ return { cliPath: pathResult, useWsl: false, strategy: 'native' };
+ }
+
+ const commonResult = this.findCliInCommonPaths();
+ if (commonResult) {
+ return { cliPath: commonResult, useWsl: false, strategy: 'native' };
+ }
+
+ cliLogger.debug(`${cliName} not found`);
+ return { cliPath: null, useWsl: false, strategy: 'native' };
+ }
+
+ /**
+ * Ensure CLI is detected (lazy initialization)
+ */
+ protected ensureCliDetected(): void {
+ if (this.cliPath !== null || this.detectedStrategy !== 'native') {
+ return; // Already detected
+ }
+
+ const result = this.detectCli();
+ this.cliPath = result.cliPath;
+ this.useWsl = result.useWsl;
+ this.wslCliPath = result.wslCliPath || null;
+ this.wslDistribution = result.wslDistribution;
+ this.detectedStrategy = result.strategy;
+
+ // Set up npx args if using npx strategy
+ const config = this.getSpawnConfig();
+ if (result.strategy === 'npx' && config.npxPackage) {
+ this.npxArgs = [config.npxPackage];
+ }
+ }
+
+ /**
+ * Check if CLI is installed
+ */
+ async isInstalled(): Promise {
+ this.ensureCliDetected();
+ return this.cliPath !== null;
+ }
+
+ // ==========================================================================
+ // Subprocess Spawning
+ // ==========================================================================
+
+ /**
+ * Build subprocess options based on detected strategy
+ */
+ protected buildSubprocessOptions(options: ExecuteOptions, cliArgs: string[]): SubprocessOptions {
+ this.ensureCliDetected();
+
+ if (!this.cliPath) {
+ throw new Error(`${this.getCliName()} CLI not found. ${this.getInstallInstructions()}`);
+ }
+
+ const cwd = options.cwd || process.cwd();
+
+ // Filter undefined values from process.env
+ const filteredEnv: Record = {};
+ for (const [key, value] of Object.entries(process.env)) {
+ if (value !== undefined) {
+ filteredEnv[key] = value;
+ }
+ }
+
+ // WSL strategy
+ if (this.useWsl && this.wslCliPath) {
+ const wslCwd = windowsToWslPath(cwd);
+ const wslCmd = createWslCommand(this.wslCliPath, cliArgs, {
+ distribution: this.wslDistribution,
+ });
+
+ // Add --cd flag to change directory inside WSL
+ let args: string[];
+ if (this.wslDistribution) {
+ args = ['-d', this.wslDistribution, '--cd', wslCwd, this.wslCliPath, ...cliArgs];
+ } else {
+ args = ['--cd', wslCwd, this.wslCliPath, ...cliArgs];
+ }
+
+ cliLogger.debug(`WSL spawn: ${wslCmd.command} ${args.slice(0, 6).join(' ')}...`);
+
+ return {
+ command: wslCmd.command,
+ args,
+ cwd, // Windows cwd for spawn
+ env: filteredEnv,
+ abortController: options.abortController,
+ timeout: 120000, // CLI operations may take longer
+ };
+ }
+
+ // NPX strategy
+ if (this.detectedStrategy === 'npx') {
+ const allArgs = [...this.npxArgs, ...cliArgs];
+ cliLogger.debug(`NPX spawn: npx ${allArgs.slice(0, 6).join(' ')}...`);
+
+ return {
+ command: 'npx',
+ args: allArgs,
+ cwd,
+ env: filteredEnv,
+ abortController: options.abortController,
+ timeout: 120000,
+ };
+ }
+
+ // Direct strategy (native Unix or Windows direct/cmd)
+ cliLogger.debug(`Direct spawn: ${this.cliPath} ${cliArgs.slice(0, 6).join(' ')}...`);
+
+ return {
+ command: this.cliPath,
+ args: cliArgs,
+ cwd,
+ env: filteredEnv,
+ abortController: options.abortController,
+ timeout: 120000,
+ };
+ }
+
+ /**
+ * Execute a query using the CLI with JSONL streaming
+ *
+ * This is a default implementation that:
+ * 1. Builds CLI args from options
+ * 2. Spawns the subprocess with appropriate strategy
+ * 3. Streams and normalizes events
+ *
+ * Subclasses can override for custom behavior.
+ */
+ async *executeQuery(options: ExecuteOptions): AsyncGenerator {
+ this.ensureCliDetected();
+
+ if (!this.cliPath) {
+ throw new Error(`${this.getCliName()} CLI not found. ${this.getInstallInstructions()}`);
+ }
+
+ const cliArgs = this.buildCliArgs(options);
+ const subprocessOptions = this.buildSubprocessOptions(options, cliArgs);
+
+ try {
+ for await (const rawEvent of spawnJSONLProcess(subprocessOptions)) {
+ const normalized = this.normalizeEvent(rawEvent);
+ if (normalized) {
+ yield normalized;
+ }
+ }
+ } catch (error) {
+ if (isAbortError(error)) {
+ cliLogger.debug('Query aborted');
+ return;
+ }
+
+ // Map CLI errors
+ if (error instanceof Error && 'stderr' in error) {
+ const errorInfo = this.mapError(
+ (error as { stderr?: string }).stderr || error.message,
+ (error as { exitCode?: number | null }).exitCode ?? null
+ );
+
+ const cliError = new Error(errorInfo.message) as Error & CliErrorInfo;
+ cliError.code = errorInfo.code;
+ cliError.recoverable = errorInfo.recoverable;
+ cliError.suggestion = errorInfo.suggestion;
+ throw cliError;
+ }
+
+ throw error;
+ }
+ }
+}
diff --git a/apps/server/src/providers/codex-config-manager.ts b/apps/server/src/providers/codex-config-manager.ts
new file mode 100644
index 00000000..33031c4a
--- /dev/null
+++ b/apps/server/src/providers/codex-config-manager.ts
@@ -0,0 +1,85 @@
+/**
+ * Codex Config Manager - Writes MCP server configuration for Codex CLI
+ */
+
+import path from 'path';
+import type { McpServerConfig } from '@automaker/types';
+import * as secureFs from '../lib/secure-fs.js';
+
+const CODEX_CONFIG_DIR = '.codex';
+const CODEX_CONFIG_FILENAME = 'config.toml';
+const CODEX_MCP_SECTION = 'mcp_servers';
+
+function formatTomlString(value: string): string {
+ return JSON.stringify(value);
+}
+
+function formatTomlArray(values: string[]): string {
+ const formatted = values.map((value) => formatTomlString(value)).join(', ');
+ return `[${formatted}]`;
+}
+
+function formatTomlInlineTable(values: Record): string {
+ const entries = Object.entries(values).map(
+ ([key, value]) => `${key} = ${formatTomlString(value)}`
+ );
+ return `{ ${entries.join(', ')} }`;
+}
+
+function formatTomlKey(key: string): string {
+ return `"${key.replace(/"/g, '\\"')}"`;
+}
+
+function buildServerBlock(name: string, server: McpServerConfig): string[] {
+ const lines: string[] = [];
+ const section = `${CODEX_MCP_SECTION}.${formatTomlKey(name)}`;
+ lines.push(`[${section}]`);
+
+ if (server.type) {
+ lines.push(`type = ${formatTomlString(server.type)}`);
+ }
+
+ if ('command' in server && server.command) {
+ lines.push(`command = ${formatTomlString(server.command)}`);
+ }
+
+ if ('args' in server && server.args && server.args.length > 0) {
+ lines.push(`args = ${formatTomlArray(server.args)}`);
+ }
+
+ if ('env' in server && server.env && Object.keys(server.env).length > 0) {
+ lines.push(`env = ${formatTomlInlineTable(server.env)}`);
+ }
+
+ if ('url' in server && server.url) {
+ lines.push(`url = ${formatTomlString(server.url)}`);
+ }
+
+ if ('headers' in server && server.headers && Object.keys(server.headers).length > 0) {
+ lines.push(`headers = ${formatTomlInlineTable(server.headers)}`);
+ }
+
+ return lines;
+}
+
+export class CodexConfigManager {
+ async configureMcpServers(
+ cwd: string,
+ mcpServers: Record
+ ): Promise {
+ const configDir = path.join(cwd, CODEX_CONFIG_DIR);
+ const configPath = path.join(configDir, CODEX_CONFIG_FILENAME);
+
+ await secureFs.mkdir(configDir, { recursive: true });
+
+ const blocks: string[] = [];
+ for (const [name, server] of Object.entries(mcpServers)) {
+ blocks.push(...buildServerBlock(name, server), '');
+ }
+
+ const content = blocks.join('\n').trim();
+ if (content) {
+ await secureFs.writeFile(configPath, content + '\n', 'utf-8');
+ }
+ }
+}
diff --git a/apps/server/src/providers/codex-models.ts b/apps/server/src/providers/codex-models.ts
new file mode 100644
index 00000000..141d5355
--- /dev/null
+++ b/apps/server/src/providers/codex-models.ts
@@ -0,0 +1,111 @@
+/**
+ * Codex Model Definitions
+ *
+ * Official Codex CLI models as documented at https://developers.openai.com/codex/models/
+ */
+
+import { CODEX_MODEL_MAP } from '@automaker/types';
+import type { ModelDefinition } from './types.js';
+
+const CONTEXT_WINDOW_256K = 256000;
+const CONTEXT_WINDOW_128K = 128000;
+const MAX_OUTPUT_32K = 32000;
+const MAX_OUTPUT_16K = 16000;
+
+/**
+ * All available Codex models with their specifications
+ * Based on https://developers.openai.com/codex/models/
+ */
+export const CODEX_MODELS: ModelDefinition[] = [
+ // ========== Recommended Codex Models ==========
+ {
+ id: CODEX_MODEL_MAP.gpt52Codex,
+ name: 'GPT-5.2-Codex',
+ modelString: CODEX_MODEL_MAP.gpt52Codex,
+ provider: 'openai',
+ description:
+ 'Most advanced agentic coding model for complex software engineering (default for ChatGPT users).',
+ contextWindow: CONTEXT_WINDOW_256K,
+ maxOutputTokens: MAX_OUTPUT_32K,
+ supportsVision: true,
+ supportsTools: true,
+ tier: 'premium' as const,
+ default: true,
+ hasReasoning: true,
+ },
+ {
+ id: CODEX_MODEL_MAP.gpt51CodexMax,
+ name: 'GPT-5.1-Codex-Max',
+ modelString: CODEX_MODEL_MAP.gpt51CodexMax,
+ provider: 'openai',
+ description: 'Optimized for long-horizon, agentic coding tasks in Codex.',
+ contextWindow: CONTEXT_WINDOW_256K,
+ maxOutputTokens: MAX_OUTPUT_32K,
+ supportsVision: true,
+ supportsTools: true,
+ tier: 'premium' as const,
+ hasReasoning: true,
+ },
+ {
+ id: CODEX_MODEL_MAP.gpt51CodexMini,
+ name: 'GPT-5.1-Codex-Mini',
+ modelString: CODEX_MODEL_MAP.gpt51CodexMini,
+ provider: 'openai',
+ description: 'Smaller, more cost-effective version for faster workflows.',
+ contextWindow: CONTEXT_WINDOW_128K,
+ maxOutputTokens: MAX_OUTPUT_16K,
+ supportsVision: true,
+ supportsTools: true,
+ tier: 'basic' as const,
+ hasReasoning: false,
+ },
+
+ // ========== General-Purpose GPT Models ==========
+ {
+ id: CODEX_MODEL_MAP.gpt52,
+ name: 'GPT-5.2',
+ modelString: CODEX_MODEL_MAP.gpt52,
+ provider: 'openai',
+ description: 'Best general agentic model for tasks across industries and domains.',
+ contextWindow: CONTEXT_WINDOW_256K,
+ maxOutputTokens: MAX_OUTPUT_32K,
+ supportsVision: true,
+ supportsTools: true,
+ tier: 'standard' as const,
+ hasReasoning: true,
+ },
+ {
+ id: CODEX_MODEL_MAP.gpt51,
+ name: 'GPT-5.1',
+ modelString: CODEX_MODEL_MAP.gpt51,
+ provider: 'openai',
+ description: 'Great for coding and agentic tasks across domains.',
+ contextWindow: CONTEXT_WINDOW_256K,
+ maxOutputTokens: MAX_OUTPUT_32K,
+ supportsVision: true,
+ supportsTools: true,
+ tier: 'standard' as const,
+ hasReasoning: true,
+ },
+];
+
+/**
+ * Get model definition by ID
+ */
+export function getCodexModelById(modelId: string): ModelDefinition | undefined {
+ return CODEX_MODELS.find((m) => m.id === modelId || m.modelString === modelId);
+}
+
+/**
+ * Get all models that support reasoning
+ */
+export function getReasoningModels(): ModelDefinition[] {
+ return CODEX_MODELS.filter((m) => m.hasReasoning);
+}
+
+/**
+ * Get models by tier
+ */
+export function getModelsByTier(tier: 'premium' | 'standard' | 'basic'): ModelDefinition[] {
+ return CODEX_MODELS.filter((m) => m.tier === tier);
+}
diff --git a/apps/server/src/providers/codex-provider.ts b/apps/server/src/providers/codex-provider.ts
new file mode 100644
index 00000000..2e3962a0
--- /dev/null
+++ b/apps/server/src/providers/codex-provider.ts
@@ -0,0 +1,1082 @@
+/**
+ * Codex Provider - Executes queries using Codex CLI
+ *
+ * Spawns the Codex CLI and converts JSONL output into ProviderMessage format.
+ */
+
+import path from 'path';
+import { BaseProvider } from './base-provider.js';
+import {
+ spawnJSONLProcess,
+ spawnProcess,
+ findCodexCliPath,
+ getCodexAuthIndicators,
+ secureFs,
+ getDataDirectory,
+ getCodexConfigDir,
+} from '@automaker/platform';
+import { checkCodexAuthentication } from '../lib/codex-auth.js';
+import {
+ formatHistoryAsText,
+ extractTextFromContent,
+ classifyError,
+ getUserFriendlyErrorMessage,
+ createLogger,
+} from '@automaker/utils';
+import type {
+ ExecuteOptions,
+ ProviderMessage,
+ InstallationStatus,
+ ModelDefinition,
+} from './types.js';
+import {
+ CODEX_MODEL_MAP,
+ supportsReasoningEffort,
+ validateBareModelId,
+ type CodexApprovalPolicy,
+ type CodexSandboxMode,
+ type CodexAuthStatus,
+} from '@automaker/types';
+import { CodexConfigManager } from './codex-config-manager.js';
+import { executeCodexSdkQuery } from './codex-sdk-client.js';
+import {
+ resolveCodexToolCall,
+ extractCodexTodoItems,
+ getCodexTodoToolName,
+} from './codex-tool-mapping.js';
+import { SettingsService } from '../services/settings-service.js';
+import { checkSandboxCompatibility } from '../lib/sdk-options.js';
+import { CODEX_MODELS } from './codex-models.js';
+
+const CODEX_COMMAND = 'codex';
+const CODEX_EXEC_SUBCOMMAND = 'exec';
+const CODEX_JSON_FLAG = '--json';
+const CODEX_MODEL_FLAG = '--model';
+const CODEX_VERSION_FLAG = '--version';
+const CODEX_SANDBOX_FLAG = '--sandbox';
+const CODEX_APPROVAL_FLAG = '--ask-for-approval';
+const CODEX_SEARCH_FLAG = '--search';
+const CODEX_OUTPUT_SCHEMA_FLAG = '--output-schema';
+const CODEX_CONFIG_FLAG = '--config';
+const CODEX_IMAGE_FLAG = '--image';
+const CODEX_ADD_DIR_FLAG = '--add-dir';
+const CODEX_SKIP_GIT_REPO_CHECK_FLAG = '--skip-git-repo-check';
+const CODEX_RESUME_FLAG = 'resume';
+const CODEX_REASONING_EFFORT_KEY = 'reasoning_effort';
+const CODEX_YOLO_FLAG = '--dangerously-bypass-approvals-and-sandbox';
+const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
+const CODEX_EXECUTION_MODE_CLI = 'cli';
+const CODEX_EXECUTION_MODE_SDK = 'sdk';
+const ERROR_CODEX_CLI_REQUIRED =
+ 'Codex CLI is required for tool-enabled requests. Please install Codex CLI and run `codex login`.';
+const ERROR_CODEX_AUTH_REQUIRED = "Codex authentication is required. Please run 'codex login'.";
+const ERROR_CODEX_SDK_AUTH_REQUIRED = 'OpenAI API key required for Codex SDK execution.';
+
+const CODEX_EVENT_TYPES = {
+ itemCompleted: 'item.completed',
+ itemStarted: 'item.started',
+ itemUpdated: 'item.updated',
+ turnCompleted: 'turn.completed',
+ error: 'error',
+} as const;
+
+const CODEX_ITEM_TYPES = {
+ reasoning: 'reasoning',
+ agentMessage: 'agent_message',
+ commandExecution: 'command_execution',
+ todoList: 'todo_list',
+} as const;
+
+const SYSTEM_PROMPT_LABEL = 'System instructions';
+const HISTORY_HEADER = 'Current request:\n';
+const TEXT_ENCODING = 'utf-8';
+const DEFAULT_TIMEOUT_MS = 30000;
+const CONTEXT_WINDOW_256K = 256000;
+const MAX_OUTPUT_32K = 32000;
+const MAX_OUTPUT_16K = 16000;
+const SYSTEM_PROMPT_SEPARATOR = '\n\n';
+const CODEX_INSTRUCTIONS_DIR = '.codex';
+const CODEX_INSTRUCTIONS_SECTION = 'Codex Project Instructions';
+const CODEX_INSTRUCTIONS_PATH_LABEL = 'Path';
+const CODEX_INSTRUCTIONS_SOURCE_LABEL = 'Source';
+const CODEX_INSTRUCTIONS_USER_SOURCE = 'User instructions';
+const CODEX_INSTRUCTIONS_PROJECT_SOURCE = 'Project instructions';
+const CODEX_USER_INSTRUCTIONS_FILE = 'AGENTS.md';
+const CODEX_PROJECT_INSTRUCTIONS_FILES = ['AGENTS.md'] as const;
+const CODEX_SETTINGS_DIR_FALLBACK = './data';
+const DEFAULT_CODEX_AUTO_LOAD_AGENTS = false;
+const DEFAULT_CODEX_SANDBOX_MODE: CodexSandboxMode = 'workspace-write';
+const DEFAULT_CODEX_APPROVAL_POLICY: CodexApprovalPolicy = 'on-request';
+const TOOL_USE_ID_PREFIX = 'codex-tool-';
+const ITEM_ID_KEYS = ['id', 'item_id', 'call_id', 'tool_use_id', 'command_id'] as const;
+const EVENT_ID_KEYS = ['id', 'event_id', 'request_id'] as const;
+const COMMAND_OUTPUT_FIELDS = ['output', 'stdout', 'stderr', 'result'] as const;
+const COMMAND_OUTPUT_SEPARATOR = '\n';
+const OUTPUT_SCHEMA_FILENAME = 'output-schema.json';
+const OUTPUT_SCHEMA_INDENT_SPACES = 2;
+const IMAGE_TEMP_DIR = '.codex-images';
+const IMAGE_FILE_PREFIX = 'image-';
+const IMAGE_FILE_EXT = '.png';
+const DEFAULT_ALLOWED_TOOLS = [
+ 'Read',
+ 'Write',
+ 'Edit',
+ 'Glob',
+ 'Grep',
+ 'Bash',
+ 'WebSearch',
+ 'WebFetch',
+] as const;
+const SEARCH_TOOL_NAMES = new Set(['WebSearch', 'WebFetch']);
+const MIN_MAX_TURNS = 1;
+const CONFIG_KEY_MAX_TURNS = 'max_turns';
+const CONSTRAINTS_SECTION_TITLE = 'Codex Execution Constraints';
+const CONSTRAINTS_MAX_TURNS_LABEL = 'Max turns';
+const CONSTRAINTS_ALLOWED_TOOLS_LABEL = 'Allowed tools';
+const CONSTRAINTS_OUTPUT_SCHEMA_LABEL = 'Output format';
+const CONSTRAINTS_SESSION_ID_LABEL = 'Session ID';
+const CONSTRAINTS_NO_TOOLS_VALUE = 'none';
+const CONSTRAINTS_OUTPUT_SCHEMA_VALUE = 'Respond with JSON that matches the provided schema.';
+
+type CodexExecutionMode = typeof CODEX_EXECUTION_MODE_CLI | typeof CODEX_EXECUTION_MODE_SDK;
+type CodexExecutionPlan = {
+ mode: CodexExecutionMode;
+ cliPath: string | null;
+};
+
+const ALLOWED_ENV_VARS = [
+ OPENAI_API_KEY_ENV,
+ 'PATH',
+ 'HOME',
+ 'SHELL',
+ 'TERM',
+ 'USER',
+ 'LANG',
+ 'LC_ALL',
+];
+
+function buildEnv(): Record {
+ const env: Record = {};
+ for (const key of ALLOWED_ENV_VARS) {
+ const value = process.env[key];
+ if (value) {
+ env[key] = value;
+ }
+ }
+ return env;
+}
+
+function hasMcpServersConfigured(options: ExecuteOptions): boolean {
+ return Boolean(options.mcpServers && Object.keys(options.mcpServers).length > 0);
+}
+
+function isNoToolsRequested(options: ExecuteOptions): boolean {
+ return Array.isArray(options.allowedTools) && options.allowedTools.length === 0;
+}
+
+function isSdkEligible(options: ExecuteOptions): boolean {
+ return isNoToolsRequested(options) && !hasMcpServersConfigured(options);
+}
+
+async function resolveCodexExecutionPlan(options: ExecuteOptions): Promise {
+ const cliPath = await findCodexCliPath();
+ const authIndicators = await getCodexAuthIndicators();
+ const hasApiKey = Boolean(process.env[OPENAI_API_KEY_ENV]);
+ const cliAuthenticated = authIndicators.hasOAuthToken || authIndicators.hasApiKey || hasApiKey;
+ const sdkEligible = isSdkEligible(options);
+ const cliAvailable = Boolean(cliPath);
+
+ if (sdkEligible) {
+ if (hasApiKey) {
+ return {
+ mode: CODEX_EXECUTION_MODE_SDK,
+ cliPath,
+ };
+ }
+ if (!cliAvailable) {
+ throw new Error(ERROR_CODEX_SDK_AUTH_REQUIRED);
+ }
+ }
+
+ if (!cliAvailable) {
+ throw new Error(ERROR_CODEX_CLI_REQUIRED);
+ }
+
+ if (!cliAuthenticated) {
+ throw new Error(ERROR_CODEX_AUTH_REQUIRED);
+ }
+
+ return {
+ mode: CODEX_EXECUTION_MODE_CLI,
+ cliPath,
+ };
+}
+
+function getEventType(event: Record): string | null {
+ if (typeof event.type === 'string') {
+ return event.type;
+ }
+ if (typeof event.event === 'string') {
+ return event.event;
+ }
+ return null;
+}
+
+function extractText(value: unknown): string | null {
+ if (typeof value === 'string') {
+ return value;
+ }
+ if (Array.isArray(value)) {
+ return value
+ .map((item) => extractText(item))
+ .filter(Boolean)
+ .join('\n');
+ }
+ if (value && typeof value === 'object') {
+ const record = value as Record;
+ if (typeof record.text === 'string') {
+ return record.text;
+ }
+ if (typeof record.content === 'string') {
+ return record.content;
+ }
+ if (typeof record.message === 'string') {
+ return record.message;
+ }
+ }
+ return null;
+}
+
+function extractCommandText(item: Record): string | null {
+ const direct = extractText(item.command ?? item.input ?? item.content);
+ if (direct) {
+ return direct;
+ }
+ return null;
+}
+
+function extractCommandOutput(item: Record): string | null {
+ const outputs: string[] = [];
+ for (const field of COMMAND_OUTPUT_FIELDS) {
+ const value = item[field];
+ const text = extractText(value);
+ if (text) {
+ outputs.push(text);
+ }
+ }
+
+ if (outputs.length === 0) {
+ return null;
+ }
+
+ const uniqueOutputs = outputs.filter((output, index) => outputs.indexOf(output) === index);
+ return uniqueOutputs.join(COMMAND_OUTPUT_SEPARATOR);
+}
+
+function extractItemType(item: Record): string | null {
+ if (typeof item.type === 'string') {
+ return item.type;
+ }
+ if (typeof item.kind === 'string') {
+ return item.kind;
+ }
+ return null;
+}
+
+function resolveSystemPrompt(systemPrompt?: unknown): string | null {
+ if (!systemPrompt) {
+ return null;
+ }
+ if (typeof systemPrompt === 'string') {
+ return systemPrompt;
+ }
+ if (typeof systemPrompt === 'object' && systemPrompt !== null) {
+ const record = systemPrompt as Record;
+ if (typeof record.append === 'string') {
+ return record.append;
+ }
+ }
+ return null;
+}
+
+function buildCombinedPrompt(options: ExecuteOptions, systemPromptText?: string | null): string {
+ const promptText =
+ typeof options.prompt === 'string' ? options.prompt : extractTextFromContent(options.prompt);
+ const historyText = options.conversationHistory
+ ? formatHistoryAsText(options.conversationHistory)
+ : '';
+ const resolvedSystemPrompt = systemPromptText ?? resolveSystemPrompt(options.systemPrompt);
+
+ const systemSection = resolvedSystemPrompt
+ ? `${SYSTEM_PROMPT_LABEL}:\n${resolvedSystemPrompt}\n\n`
+ : '';
+
+ return `${historyText}${systemSection}${HISTORY_HEADER}${promptText}`;
+}
+
+function formatConfigValue(value: string | number | boolean): string {
+ return String(value);
+}
+
+function buildConfigOverrides(
+ overrides: Array<{ key: string; value: string | number | boolean }>
+): string[] {
+ const args: string[] = [];
+ for (const override of overrides) {
+ args.push(CODEX_CONFIG_FLAG, `${override.key}=${formatConfigValue(override.value)}`);
+ }
+ return args;
+}
+
+function resolveMaxTurns(maxTurns?: number): number | null {
+ if (typeof maxTurns !== 'number' || Number.isNaN(maxTurns) || !Number.isFinite(maxTurns)) {
+ return null;
+ }
+ const normalized = Math.floor(maxTurns);
+ return normalized >= MIN_MAX_TURNS ? normalized : null;
+}
+
+function resolveSearchEnabled(allowedTools: string[], restrictTools: boolean): boolean {
+ const toolsToCheck = restrictTools ? allowedTools : Array.from(DEFAULT_ALLOWED_TOOLS);
+ return toolsToCheck.some((tool) => SEARCH_TOOL_NAMES.has(tool));
+}
+
+function buildCodexConstraintsPrompt(
+ options: ExecuteOptions,
+ config: {
+ allowedTools: string[];
+ restrictTools: boolean;
+ maxTurns: number | null;
+ hasOutputSchema: boolean;
+ }
+): string | null {
+ const lines: string[] = [];
+
+ if (config.maxTurns !== null) {
+ lines.push(`${CONSTRAINTS_MAX_TURNS_LABEL}: ${config.maxTurns}`);
+ }
+
+ if (config.restrictTools) {
+ const allowed =
+ config.allowedTools.length > 0 ? config.allowedTools.join(', ') : CONSTRAINTS_NO_TOOLS_VALUE;
+ lines.push(`${CONSTRAINTS_ALLOWED_TOOLS_LABEL}: ${allowed}`);
+ }
+
+ if (config.hasOutputSchema) {
+ lines.push(`${CONSTRAINTS_OUTPUT_SCHEMA_LABEL}: ${CONSTRAINTS_OUTPUT_SCHEMA_VALUE}`);
+ }
+
+ if (options.sdkSessionId) {
+ lines.push(`${CONSTRAINTS_SESSION_ID_LABEL}: ${options.sdkSessionId}`);
+ }
+
+ if (lines.length === 0) {
+ return null;
+ }
+
+ return `## ${CONSTRAINTS_SECTION_TITLE}\n${lines.map((line) => `- ${line}`).join('\n')}`;
+}
+
+async function writeOutputSchemaFile(
+ cwd: string,
+ outputFormat?: ExecuteOptions['outputFormat']
+): Promise {
+ if (!outputFormat || outputFormat.type !== 'json_schema') {
+ return null;
+ }
+ if (!outputFormat.schema || typeof outputFormat.schema !== 'object') {
+ throw new Error('Codex output schema must be a JSON object.');
+ }
+
+ const schemaDir = path.join(cwd, CODEX_INSTRUCTIONS_DIR);
+ await secureFs.mkdir(schemaDir, { recursive: true });
+ const schemaPath = path.join(schemaDir, OUTPUT_SCHEMA_FILENAME);
+ const schemaContent = JSON.stringify(outputFormat.schema, null, OUTPUT_SCHEMA_INDENT_SPACES);
+ await secureFs.writeFile(schemaPath, schemaContent, TEXT_ENCODING);
+ return schemaPath;
+}
+
+type ImageBlock = {
+ type: 'image';
+ source: {
+ type: string;
+ media_type: string;
+ data: string;
+ };
+};
+
+function extractImageBlocks(prompt: ExecuteOptions['prompt']): ImageBlock[] {
+ if (typeof prompt === 'string') {
+ return [];
+ }
+ if (!Array.isArray(prompt)) {
+ return [];
+ }
+
+ const images: ImageBlock[] = [];
+ for (const block of prompt) {
+ if (
+ block &&
+ typeof block === 'object' &&
+ 'type' in block &&
+ block.type === 'image' &&
+ 'source' in block &&
+ block.source &&
+ typeof block.source === 'object' &&
+ 'data' in block.source &&
+ 'media_type' in block.source
+ ) {
+ images.push(block as ImageBlock);
+ }
+ }
+ return images;
+}
+
+async function writeImageFiles(cwd: string, imageBlocks: ImageBlock[]): Promise {
+ if (imageBlocks.length === 0) {
+ return [];
+ }
+
+ const imageDir = path.join(cwd, CODEX_INSTRUCTIONS_DIR, IMAGE_TEMP_DIR);
+ await secureFs.mkdir(imageDir, { recursive: true });
+
+ const imagePaths: string[] = [];
+ for (let i = 0; i < imageBlocks.length; i++) {
+ const imageBlock = imageBlocks[i];
+ const imageName = `${IMAGE_FILE_PREFIX}${Date.now()}-${i}${IMAGE_FILE_EXT}`;
+ const imagePath = path.join(imageDir, imageName);
+
+ // Convert base64 to buffer
+ const imageData = Buffer.from(imageBlock.source.data, 'base64');
+ await secureFs.writeFile(imagePath, imageData);
+ imagePaths.push(imagePath);
+ }
+
+ return imagePaths;
+}
+
+function normalizeIdentifier(value: unknown): string | null {
+ if (typeof value === 'string') {
+ const trimmed = value.trim();
+ return trimmed ? trimmed : null;
+ }
+ if (typeof value === 'number' && Number.isFinite(value)) {
+ return String(value);
+ }
+ return null;
+}
+
+function getIdentifierFromRecord(
+ record: Record,
+ keys: readonly string[]
+): string | null {
+ for (const key of keys) {
+ const id = normalizeIdentifier(record[key]);
+ if (id) {
+ return id;
+ }
+ }
+ return null;
+}
+
+function getItemIdentifier(
+ event: Record,
+ item: Record
+): string | null {
+ return (
+ getIdentifierFromRecord(item, ITEM_ID_KEYS) ?? getIdentifierFromRecord(event, EVENT_ID_KEYS)
+ );
+}
+
+class CodexToolUseTracker {
+ private readonly toolUseIdsByItem = new Map();
+ private readonly anonymousToolUses: string[] = [];
+ private sequence = 0;
+
+ register(event: Record, item: Record): string {
+ const itemId = getItemIdentifier(event, item);
+ const toolUseId = this.nextToolUseId();
+ if (itemId) {
+ this.toolUseIdsByItem.set(itemId, toolUseId);
+ } else {
+ this.anonymousToolUses.push(toolUseId);
+ }
+ return toolUseId;
+ }
+
+ resolve(event: Record, item: Record): string | null {
+ const itemId = getItemIdentifier(event, item);
+ if (itemId) {
+ const toolUseId = this.toolUseIdsByItem.get(itemId);
+ if (toolUseId) {
+ this.toolUseIdsByItem.delete(itemId);
+ return toolUseId;
+ }
+ }
+
+ if (this.anonymousToolUses.length > 0) {
+ return this.anonymousToolUses.shift() || null;
+ }
+
+ return null;
+ }
+
+ private nextToolUseId(): string {
+ this.sequence += 1;
+ return `${TOOL_USE_ID_PREFIX}${this.sequence}`;
+ }
+}
+
+type CodexCliSettings = {
+ autoLoadAgents: boolean;
+ sandboxMode: CodexSandboxMode;
+ approvalPolicy: CodexApprovalPolicy;
+ enableWebSearch: boolean;
+ enableImages: boolean;
+ additionalDirs: string[];
+ threadId?: string;
+};
+
+function getCodexSettingsDir(): string {
+ const configured = getDataDirectory() ?? process.env.DATA_DIR;
+ return configured ? path.resolve(configured) : path.resolve(CODEX_SETTINGS_DIR_FALLBACK);
+}
+
+async function loadCodexCliSettings(
+ overrides?: ExecuteOptions['codexSettings']
+): Promise {
+ const defaults: CodexCliSettings = {
+ autoLoadAgents: DEFAULT_CODEX_AUTO_LOAD_AGENTS,
+ sandboxMode: DEFAULT_CODEX_SANDBOX_MODE,
+ approvalPolicy: DEFAULT_CODEX_APPROVAL_POLICY,
+ enableWebSearch: false,
+ enableImages: true,
+ additionalDirs: [],
+ threadId: undefined,
+ };
+
+ try {
+ const settingsService = new SettingsService(getCodexSettingsDir());
+ const settings = await settingsService.getGlobalSettings();
+ const resolved: CodexCliSettings = {
+ autoLoadAgents: settings.codexAutoLoadAgents ?? defaults.autoLoadAgents,
+ sandboxMode: settings.codexSandboxMode ?? defaults.sandboxMode,
+ approvalPolicy: settings.codexApprovalPolicy ?? defaults.approvalPolicy,
+ enableWebSearch: settings.codexEnableWebSearch ?? defaults.enableWebSearch,
+ enableImages: settings.codexEnableImages ?? defaults.enableImages,
+ additionalDirs: settings.codexAdditionalDirs ?? defaults.additionalDirs,
+ threadId: settings.codexThreadId,
+ };
+
+ if (!overrides) {
+ return resolved;
+ }
+
+ return {
+ autoLoadAgents: overrides.autoLoadAgents ?? resolved.autoLoadAgents,
+ sandboxMode: overrides.sandboxMode ?? resolved.sandboxMode,
+ approvalPolicy: overrides.approvalPolicy ?? resolved.approvalPolicy,
+ enableWebSearch: overrides.enableWebSearch ?? resolved.enableWebSearch,
+ enableImages: overrides.enableImages ?? resolved.enableImages,
+ additionalDirs: overrides.additionalDirs ?? resolved.additionalDirs,
+ threadId: overrides.threadId ?? resolved.threadId,
+ };
+ } catch {
+ return {
+ autoLoadAgents: overrides?.autoLoadAgents ?? defaults.autoLoadAgents,
+ sandboxMode: overrides?.sandboxMode ?? defaults.sandboxMode,
+ approvalPolicy: overrides?.approvalPolicy ?? defaults.approvalPolicy,
+ enableWebSearch: overrides?.enableWebSearch ?? defaults.enableWebSearch,
+ enableImages: overrides?.enableImages ?? defaults.enableImages,
+ additionalDirs: overrides?.additionalDirs ?? defaults.additionalDirs,
+ threadId: overrides?.threadId ?? defaults.threadId,
+ };
+ }
+}
+
+function buildCodexInstructionsPrompt(
+ filePath: string,
+ content: string,
+ sourceLabel: string
+): string {
+ return `## ${CODEX_INSTRUCTIONS_SECTION}\n**${CODEX_INSTRUCTIONS_SOURCE_LABEL}:** ${sourceLabel}\n**${CODEX_INSTRUCTIONS_PATH_LABEL}:** \`${filePath}\`\n\n${content}`;
+}
+
+async function readCodexInstructionFile(filePath: string): Promise {
+ try {
+ const raw = await secureFs.readFile(filePath, TEXT_ENCODING);
+ const content = String(raw).trim();
+ return content ? content : null;
+ } catch {
+ return null;
+ }
+}
+
+async function loadCodexInstructions(cwd: string, enabled: boolean): Promise {
+ if (!enabled) {
+ return null;
+ }
+
+ const sources: Array<{ path: string; content: string; sourceLabel: string }> = [];
+ const userInstructionsPath = path.join(getCodexConfigDir(), CODEX_USER_INSTRUCTIONS_FILE);
+ const userContent = await readCodexInstructionFile(userInstructionsPath);
+ if (userContent) {
+ sources.push({
+ path: userInstructionsPath,
+ content: userContent,
+ sourceLabel: CODEX_INSTRUCTIONS_USER_SOURCE,
+ });
+ }
+
+ for (const fileName of CODEX_PROJECT_INSTRUCTIONS_FILES) {
+ const projectPath = path.join(cwd, CODEX_INSTRUCTIONS_DIR, fileName);
+ const projectContent = await readCodexInstructionFile(projectPath);
+ if (projectContent) {
+ sources.push({
+ path: projectPath,
+ content: projectContent,
+ sourceLabel: CODEX_INSTRUCTIONS_PROJECT_SOURCE,
+ });
+ }
+ }
+
+ if (sources.length === 0) {
+ return null;
+ }
+
+ const seen = new Set();
+ const uniqueSources = sources.filter((source) => {
+ const normalized = source.content.trim();
+ if (seen.has(normalized)) {
+ return false;
+ }
+ seen.add(normalized);
+ return true;
+ });
+
+ return uniqueSources
+ .map((source) => buildCodexInstructionsPrompt(source.path, source.content, source.sourceLabel))
+ .join('\n\n');
+}
+
+const logger = createLogger('CodexProvider');
+
+export class CodexProvider extends BaseProvider {
+ getName(): string {
+ return 'codex';
+ }
+
+ async *executeQuery(options: ExecuteOptions): AsyncGenerator {
+ // Validate that model doesn't have a provider prefix
+ // AgentService should strip prefixes before passing to providers
+ validateBareModelId(options.model, 'CodexProvider');
+
+ try {
+ const mcpServers = options.mcpServers ?? {};
+ const hasMcpServers = Object.keys(mcpServers).length > 0;
+ const codexSettings = await loadCodexCliSettings(options.codexSettings);
+ const codexInstructions = await loadCodexInstructions(
+ options.cwd,
+ codexSettings.autoLoadAgents
+ );
+ const baseSystemPrompt = resolveSystemPrompt(options.systemPrompt);
+ const resolvedMaxTurns = resolveMaxTurns(options.maxTurns);
+ const resolvedAllowedTools = options.allowedTools ?? Array.from(DEFAULT_ALLOWED_TOOLS);
+ const restrictTools = !hasMcpServers || options.mcpUnrestrictedTools === false;
+ const wantsOutputSchema = Boolean(
+ options.outputFormat && options.outputFormat.type === 'json_schema'
+ );
+ const constraintsPrompt = buildCodexConstraintsPrompt(options, {
+ allowedTools: resolvedAllowedTools,
+ restrictTools,
+ maxTurns: resolvedMaxTurns,
+ hasOutputSchema: wantsOutputSchema,
+ });
+ const systemPromptParts = [codexInstructions, baseSystemPrompt, constraintsPrompt].filter(
+ (part): part is string => Boolean(part)
+ );
+ const combinedSystemPrompt = systemPromptParts.length
+ ? systemPromptParts.join(SYSTEM_PROMPT_SEPARATOR)
+ : null;
+
+ const executionPlan = await resolveCodexExecutionPlan(options);
+ if (executionPlan.mode === CODEX_EXECUTION_MODE_SDK) {
+ yield* executeCodexSdkQuery(options, combinedSystemPrompt);
+ return;
+ }
+
+ if (hasMcpServers) {
+ const configManager = new CodexConfigManager();
+ await configManager.configureMcpServers(options.cwd, options.mcpServers!);
+ }
+
+ const toolUseTracker = new CodexToolUseTracker();
+ const sandboxCheck = checkSandboxCompatibility(
+ options.cwd,
+ codexSettings.sandboxMode !== 'danger-full-access'
+ );
+ const resolvedSandboxMode = sandboxCheck.enabled
+ ? codexSettings.sandboxMode
+ : 'danger-full-access';
+ if (!sandboxCheck.enabled && sandboxCheck.message) {
+ console.warn(`[CodexProvider] ${sandboxCheck.message}`);
+ }
+ const searchEnabled =
+ codexSettings.enableWebSearch || resolveSearchEnabled(resolvedAllowedTools, restrictTools);
+ const outputSchemaPath = await writeOutputSchemaFile(options.cwd, options.outputFormat);
+ const imageBlocks = codexSettings.enableImages ? extractImageBlocks(options.prompt) : [];
+ const imagePaths = await writeImageFiles(options.cwd, imageBlocks);
+ const approvalPolicy =
+ hasMcpServers && options.mcpAutoApproveTools !== undefined
+ ? options.mcpAutoApproveTools
+ ? 'never'
+ : 'on-request'
+ : codexSettings.approvalPolicy;
+ const promptText = buildCombinedPrompt(options, combinedSystemPrompt);
+ const commandPath = executionPlan.cliPath || CODEX_COMMAND;
+
+ // Build config overrides for max turns and reasoning effort
+ const overrides: Array<{ key: string; value: string | number | boolean }> = [];
+ if (resolvedMaxTurns !== null) {
+ overrides.push({ key: CONFIG_KEY_MAX_TURNS, value: resolvedMaxTurns });
+ }
+
+ // Add reasoning effort if model supports it and reasoningEffort is specified
+ if (
+ options.reasoningEffort &&
+ supportsReasoningEffort(options.model) &&
+ options.reasoningEffort !== 'none'
+ ) {
+ overrides.push({ key: CODEX_REASONING_EFFORT_KEY, value: options.reasoningEffort });
+ }
+
+ // Add approval policy
+ overrides.push({ key: 'approval_policy', value: approvalPolicy });
+
+ // Add web search if enabled
+ if (searchEnabled) {
+ overrides.push({ key: 'features.web_search_request', value: true });
+ }
+
+ const configOverrides = buildConfigOverrides(overrides);
+ const preExecArgs: string[] = [];
+
+ // Add additional directories with write access
+ if (codexSettings.additionalDirs && codexSettings.additionalDirs.length > 0) {
+ for (const dir of codexSettings.additionalDirs) {
+ preExecArgs.push(CODEX_ADD_DIR_FLAG, dir);
+ }
+ }
+
+ // Model is already bare (no prefix) - validated by executeQuery
+ const args = [
+ CODEX_EXEC_SUBCOMMAND,
+ CODEX_YOLO_FLAG,
+ CODEX_SKIP_GIT_REPO_CHECK_FLAG,
+ ...preExecArgs,
+ CODEX_MODEL_FLAG,
+ options.model,
+ CODEX_JSON_FLAG,
+ '-', // Read prompt from stdin to avoid shell escaping issues
+ ];
+
+ const stream = spawnJSONLProcess({
+ command: commandPath,
+ args,
+ cwd: options.cwd,
+ env: buildEnv(),
+ abortController: options.abortController,
+ timeout: DEFAULT_TIMEOUT_MS,
+ stdinData: promptText, // Pass prompt via stdin
+ });
+
+ for await (const rawEvent of stream) {
+ const event = rawEvent as Record;
+ const eventType = getEventType(event);
+
+ // Track thread/session ID from events
+ const threadId = event.thread_id;
+ if (threadId && typeof threadId === 'string') {
+ this._lastSessionId = threadId;
+ }
+
+ if (eventType === CODEX_EVENT_TYPES.error) {
+ const errorText = extractText(event.error ?? event.message) || 'Codex CLI error';
+
+ // Enhance error message with helpful context
+ let enhancedError = errorText;
+ if (errorText.toLowerCase().includes('rate limit')) {
+ enhancedError = `${errorText}\n\nTip: You're being rate limited. Try reducing concurrent tasks or waiting a few minutes before retrying.`;
+ } else if (
+ errorText.toLowerCase().includes('authentication') ||
+ errorText.toLowerCase().includes('unauthorized')
+ ) {
+ enhancedError = `${errorText}\n\nTip: Check that your OPENAI_API_KEY is set correctly or run 'codex auth login' to authenticate.`;
+ } else if (
+ errorText.toLowerCase().includes('not found') ||
+ errorText.toLowerCase().includes('command not found')
+ ) {
+ enhancedError = `${errorText}\n\nTip: Make sure the Codex CLI is installed. Run 'npm install -g @openai/codex-cli' to install.`;
+ }
+
+ console.error('[CodexProvider] CLI error event:', { errorText, event });
+ yield { type: 'error', error: enhancedError };
+ continue;
+ }
+
+ if (eventType === CODEX_EVENT_TYPES.turnCompleted) {
+ const resultText = extractText(event.result) || undefined;
+ yield { type: 'result', subtype: 'success', result: resultText };
+ continue;
+ }
+
+ if (!eventType) {
+ const fallbackText = extractText(event);
+ if (fallbackText) {
+ yield {
+ type: 'assistant',
+ message: {
+ role: 'assistant',
+ content: [{ type: 'text', text: fallbackText }],
+ },
+ };
+ }
+ continue;
+ }
+
+ const item = (event.item ?? {}) as Record;
+ const itemType = extractItemType(item);
+
+ if (
+ eventType === CODEX_EVENT_TYPES.itemStarted &&
+ itemType === CODEX_ITEM_TYPES.commandExecution
+ ) {
+ const commandText = extractCommandText(item) || '';
+ const tool = resolveCodexToolCall(commandText);
+ const toolUseId = toolUseTracker.register(event, item);
+ yield {
+ type: 'assistant',
+ message: {
+ role: 'assistant',
+ content: [
+ {
+ type: 'tool_use',
+ name: tool.name,
+ input: tool.input,
+ tool_use_id: toolUseId,
+ },
+ ],
+ },
+ };
+ continue;
+ }
+
+ if (eventType === CODEX_EVENT_TYPES.itemUpdated && itemType === CODEX_ITEM_TYPES.todoList) {
+ const todos = extractCodexTodoItems(item);
+ if (todos) {
+ yield {
+ type: 'assistant',
+ message: {
+ role: 'assistant',
+ content: [
+ {
+ type: 'tool_use',
+ name: getCodexTodoToolName(),
+ input: { todos },
+ },
+ ],
+ },
+ };
+ } else {
+ const todoText = extractText(item) || '';
+ const formatted = todoText ? `Updated TODO list:\n${todoText}` : 'Updated TODO list';
+ yield {
+ type: 'assistant',
+ message: {
+ role: 'assistant',
+ content: [{ type: 'text', text: formatted }],
+ },
+ };
+ }
+ continue;
+ }
+
+ if (eventType === CODEX_EVENT_TYPES.itemCompleted) {
+ if (itemType === CODEX_ITEM_TYPES.reasoning) {
+ const thinkingText = extractText(item) || '';
+ yield {
+ type: 'assistant',
+ message: {
+ role: 'assistant',
+ content: [{ type: 'thinking', thinking: thinkingText }],
+ },
+ };
+ continue;
+ }
+
+ if (itemType === CODEX_ITEM_TYPES.commandExecution) {
+ const commandOutput =
+ extractCommandOutput(item) ?? extractCommandText(item) ?? extractText(item) ?? '';
+ if (commandOutput) {
+ const toolUseId = toolUseTracker.resolve(event, item);
+ const toolResultBlock: {
+ type: 'tool_result';
+ content: string;
+ tool_use_id?: string;
+ } = { type: 'tool_result', content: commandOutput };
+ if (toolUseId) {
+ toolResultBlock.tool_use_id = toolUseId;
+ }
+ yield {
+ type: 'assistant',
+ message: {
+ role: 'assistant',
+ content: [toolResultBlock],
+ },
+ };
+ }
+ continue;
+ }
+
+ const text = extractText(item) || extractText(event);
+ if (text) {
+ yield {
+ type: 'assistant',
+ message: {
+ role: 'assistant',
+ content: [{ type: 'text', text }],
+ },
+ };
+ }
+ }
+ }
+ } catch (error) {
+ const errorInfo = classifyError(error);
+ const userMessage = getUserFriendlyErrorMessage(error);
+ const enhancedMessage = errorInfo.isRateLimit
+ ? `${userMessage}\n\nTip: If you're rate limited, try reducing concurrent tasks or waiting a few minutes.`
+ : userMessage;
+
+ console.error('[CodexProvider] executeQuery() error:', {
+ type: errorInfo.type,
+ message: errorInfo.message,
+ isRateLimit: errorInfo.isRateLimit,
+ retryAfter: errorInfo.retryAfter,
+ stack: error instanceof Error ? error.stack : undefined,
+ });
+
+ yield { type: 'error', error: enhancedMessage };
+ }
+ }
+
+ async detectInstallation(): Promise {
+ const cliPath = await findCodexCliPath();
+ const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
+ const authIndicators = await getCodexAuthIndicators();
+ const installed = !!cliPath;
+
+ let version = '';
+ if (installed) {
+ try {
+ const result = await spawnProcess({
+ command: cliPath || CODEX_COMMAND,
+ args: [CODEX_VERSION_FLAG],
+ cwd: process.cwd(),
+ });
+ version = result.stdout.trim();
+ } catch (error) {
+ version = '';
+ }
+ }
+
+ // Determine auth status - always verify with CLI, never assume authenticated
+ const authCheck = await checkCodexAuthentication(cliPath);
+ const authenticated = authCheck.authenticated;
+
+ return {
+ installed,
+ path: cliPath || undefined,
+ version: version || undefined,
+ method: 'cli' as const, // Installation method
+ hasApiKey,
+ authenticated,
+ };
+ }
+
+ getAvailableModels(): ModelDefinition[] {
+ // Return all available Codex/OpenAI models
+ return CODEX_MODELS;
+ }
+
+ /**
+ * Check authentication status for Codex CLI
+ */
+ async checkAuth(): Promise {
+ const cliPath = await findCodexCliPath();
+ const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
+ const authIndicators = await getCodexAuthIndicators();
+
+ // Check for API key in environment
+ if (hasApiKey) {
+ return { authenticated: true, method: 'api_key' };
+ }
+
+ // Check for OAuth/token from Codex CLI
+ if (authIndicators.hasOAuthToken || authIndicators.hasApiKey) {
+ return { authenticated: true, method: 'oauth' };
+ }
+
+ // CLI is installed but not authenticated via indicators - try CLI command
+ if (cliPath) {
+ try {
+ // Try 'codex login status' first (same as checkCodexAuthentication)
+ const result = await spawnProcess({
+ command: cliPath || CODEX_COMMAND,
+ args: ['login', 'status'],
+ cwd: process.cwd(),
+ env: {
+ ...process.env,
+ TERM: 'dumb',
+ },
+ });
+
+ // Check both stdout and stderr - Codex CLI outputs to stderr
+ const combinedOutput = (result.stdout + result.stderr).toLowerCase();
+ const isLoggedIn = combinedOutput.includes('logged in');
+
+ if (result.exitCode === 0 && isLoggedIn) {
+ return { authenticated: true, method: 'oauth' };
+ }
+ } catch (error) {
+ logger.warn('Error running login status command during auth check:', error);
+ }
+ }
+
+ return { authenticated: false, method: 'none' };
+ }
+
+ /**
+ * Get the detected CLI path (public accessor for status endpoints)
+ */
+ async getCliPath(): Promise {
+ const path = await findCodexCliPath();
+ return path || null;
+ }
+
+ /**
+ * Get the last CLI session ID (for tracking across queries)
+ * This can be used to resume sessions in subsequent requests
+ */
+ getLastSessionId(): string | null {
+ return this._lastSessionId ?? null;
+ }
+
+ /**
+ * Set a session ID to use for CLI session resumption
+ */
+ setSessionId(sessionId: string | null): void {
+ this._lastSessionId = sessionId;
+ }
+
+ private _lastSessionId: string | null = null;
+}
diff --git a/apps/server/src/providers/codex-sdk-client.ts b/apps/server/src/providers/codex-sdk-client.ts
new file mode 100644
index 00000000..51f7c0d2
--- /dev/null
+++ b/apps/server/src/providers/codex-sdk-client.ts
@@ -0,0 +1,173 @@
+/**
+ * Codex SDK client - Executes Codex queries via official @openai/codex-sdk
+ *
+ * Used for programmatic control of Codex from within the application.
+ * Provides cleaner integration than spawning CLI processes.
+ */
+
+import { Codex } from '@openai/codex-sdk';
+import { formatHistoryAsText, classifyError, getUserFriendlyErrorMessage } from '@automaker/utils';
+import { supportsReasoningEffort } from '@automaker/types';
+import type { ExecuteOptions, ProviderMessage } from './types.js';
+
+const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
+const SDK_HISTORY_HEADER = 'Current request:\n';
+const DEFAULT_RESPONSE_TEXT = '';
+const SDK_ERROR_DETAILS_LABEL = 'Details:';
+
+type PromptBlock = {
+ type: string;
+ text?: string;
+ source?: {
+ type?: string;
+ media_type?: string;
+ data?: string;
+ };
+};
+
+function resolveApiKey(): string {
+ const apiKey = process.env[OPENAI_API_KEY_ENV];
+ if (!apiKey) {
+ throw new Error('OPENAI_API_KEY is not set.');
+ }
+ return apiKey;
+}
+
+function normalizePromptBlocks(prompt: ExecuteOptions['prompt']): PromptBlock[] {
+ if (Array.isArray(prompt)) {
+ return prompt as PromptBlock[];
+ }
+ return [{ type: 'text', text: prompt }];
+}
+
+function buildPromptText(options: ExecuteOptions, systemPrompt: string | null): string {
+ const historyText =
+ options.conversationHistory && options.conversationHistory.length > 0
+ ? formatHistoryAsText(options.conversationHistory)
+ : '';
+
+ const promptBlocks = normalizePromptBlocks(options.prompt);
+ const promptTexts: string[] = [];
+
+ for (const block of promptBlocks) {
+ if (block.type === 'text' && typeof block.text === 'string' && block.text.trim()) {
+ promptTexts.push(block.text);
+ }
+ }
+
+ const promptContent = promptTexts.join('\n\n');
+ if (!promptContent.trim()) {
+ throw new Error('Codex SDK prompt is empty.');
+ }
+
+ const parts: string[] = [];
+ if (systemPrompt) {
+ parts.push(`System: ${systemPrompt}`);
+ }
+ if (historyText) {
+ parts.push(historyText);
+ }
+ parts.push(`${SDK_HISTORY_HEADER}${promptContent}`);
+
+ return parts.join('\n\n');
+}
+
+function buildSdkErrorMessage(rawMessage: string, userMessage: string): string {
+ if (!rawMessage) {
+ return userMessage;
+ }
+ if (!userMessage || rawMessage === userMessage) {
+ return rawMessage;
+ }
+ return `${userMessage}\n\n${SDK_ERROR_DETAILS_LABEL} ${rawMessage}`;
+}
+
+/**
+ * Execute a query using the official Codex SDK
+ *
+ * The SDK provides a cleaner interface than spawning CLI processes:
+ * - Handles authentication automatically
+ * - Provides TypeScript types
+ * - Supports thread management and resumption
+ * - Better error handling
+ */
+export async function* executeCodexSdkQuery(
+ options: ExecuteOptions,
+ systemPrompt: string | null
+): AsyncGenerator {
+ try {
+ const apiKey = resolveApiKey();
+ const codex = new Codex({ apiKey });
+
+ // Resume existing thread or start new one
+ let thread;
+ if (options.sdkSessionId) {
+ try {
+ thread = codex.resumeThread(options.sdkSessionId);
+ } catch {
+ // If resume fails, start a new thread
+ thread = codex.startThread();
+ }
+ } else {
+ thread = codex.startThread();
+ }
+
+ const promptText = buildPromptText(options, systemPrompt);
+
+ // Build run options with reasoning effort if supported
+ const runOptions: {
+ signal?: AbortSignal;
+ reasoning?: { effort: string };
+ } = {
+ signal: options.abortController?.signal,
+ };
+
+ // Add reasoning effort if model supports it and reasoningEffort is specified
+ if (
+ options.reasoningEffort &&
+ supportsReasoningEffort(options.model) &&
+ options.reasoningEffort !== 'none'
+ ) {
+ runOptions.reasoning = { effort: options.reasoningEffort };
+ }
+
+ // Run the query
+ const result = await thread.run(promptText, runOptions);
+
+ // Extract response text (from finalResponse property)
+ const outputText = result.finalResponse ?? DEFAULT_RESPONSE_TEXT;
+
+ // Get thread ID (may be null if not populated yet)
+ const threadId = thread.id ?? undefined;
+
+ // Yield assistant message
+ yield {
+ type: 'assistant',
+ session_id: threadId,
+ message: {
+ role: 'assistant',
+ content: [{ type: 'text', text: outputText }],
+ },
+ };
+
+ // Yield result
+ yield {
+ type: 'result',
+ subtype: 'success',
+ session_id: threadId,
+ result: outputText,
+ };
+ } catch (error) {
+ const errorInfo = classifyError(error);
+ const userMessage = getUserFriendlyErrorMessage(error);
+ const combinedMessage = buildSdkErrorMessage(errorInfo.message, userMessage);
+ console.error('[CodexSDK] executeQuery() error during execution:', {
+ type: errorInfo.type,
+ message: errorInfo.message,
+ isRateLimit: errorInfo.isRateLimit,
+ retryAfter: errorInfo.retryAfter,
+ stack: error instanceof Error ? error.stack : undefined,
+ });
+ yield { type: 'error', error: combinedMessage };
+ }
+}
diff --git a/apps/server/src/providers/codex-tool-mapping.ts b/apps/server/src/providers/codex-tool-mapping.ts
new file mode 100644
index 00000000..f951e0f0
--- /dev/null
+++ b/apps/server/src/providers/codex-tool-mapping.ts
@@ -0,0 +1,436 @@
+export type CodexToolResolution = {
+ name: string;
+ input: Record;
+};
+
+export type CodexTodoItem = {
+ content: string;
+ status: 'pending' | 'in_progress' | 'completed';
+ activeForm?: string;
+};
+
+const TOOL_NAME_BASH = 'Bash';
+const TOOL_NAME_READ = 'Read';
+const TOOL_NAME_EDIT = 'Edit';
+const TOOL_NAME_WRITE = 'Write';
+const TOOL_NAME_GREP = 'Grep';
+const TOOL_NAME_GLOB = 'Glob';
+const TOOL_NAME_TODO = 'TodoWrite';
+const TOOL_NAME_DELETE = 'Delete';
+const TOOL_NAME_LS = 'Ls';
+
+const INPUT_KEY_COMMAND = 'command';
+const INPUT_KEY_FILE_PATH = 'file_path';
+const INPUT_KEY_PATTERN = 'pattern';
+
+const SHELL_WRAPPER_PATTERNS = [
+ /^\/bin\/bash\s+-lc\s+["']([\s\S]+)["']$/,
+ /^bash\s+-lc\s+["']([\s\S]+)["']$/,
+ /^\/bin\/sh\s+-lc\s+["']([\s\S]+)["']$/,
+ /^sh\s+-lc\s+["']([\s\S]+)["']$/,
+ /^cmd\.exe\s+\/c\s+["']?([\s\S]+)["']?$/i,
+ /^powershell(?:\.exe)?\s+-Command\s+["']?([\s\S]+)["']?$/i,
+ /^pwsh(?:\.exe)?\s+-Command\s+["']?([\s\S]+)["']?$/i,
+] as const;
+
+const COMMAND_SEPARATOR_PATTERN = /\s*(?:&&|\|\||;)\s*/;
+const SEGMENT_SKIP_PREFIXES = ['cd ', 'export ', 'set ', 'pushd '] as const;
+const WRAPPER_COMMANDS = new Set(['sudo', 'env', 'command']);
+const READ_COMMANDS = new Set(['cat', 'sed', 'head', 'tail', 'less', 'more', 'bat', 'stat', 'wc']);
+const SEARCH_COMMANDS = new Set(['rg', 'grep', 'ag', 'ack']);
+const GLOB_COMMANDS = new Set(['ls', 'find', 'fd', 'tree']);
+const DELETE_COMMANDS = new Set(['rm', 'del', 'erase', 'remove', 'unlink']);
+const LIST_COMMANDS = new Set(['ls', 'dir', 'll', 'la']);
+const WRITE_COMMANDS = new Set(['tee', 'touch', 'mkdir']);
+const APPLY_PATCH_COMMAND = 'apply_patch';
+const APPLY_PATCH_PATTERN = /\bapply_patch\b/;
+const REDIRECTION_TARGET_PATTERN = /(?:>>|>)\s*([^\s]+)/;
+const SED_IN_PLACE_FLAGS = new Set(['-i', '--in-place']);
+const PERL_IN_PLACE_FLAG = /-.*i/;
+const SEARCH_PATTERN_FLAGS = new Set(['-e', '--regexp']);
+const SEARCH_VALUE_FLAGS = new Set([
+ '-g',
+ '--glob',
+ '--iglob',
+ '--type',
+ '--type-add',
+ '--type-clear',
+ '--encoding',
+]);
+const SEARCH_FILE_LIST_FLAGS = new Set(['--files']);
+const TODO_LINE_PATTERN = /^[-*]\s*(?:\[(?[ x~])\]\s*)?(?.+)$/;
+const TODO_STATUS_COMPLETED = 'completed';
+const TODO_STATUS_IN_PROGRESS = 'in_progress';
+const TODO_STATUS_PENDING = 'pending';
+const PATCH_FILE_MARKERS = [
+ '*** Update File: ',
+ '*** Add File: ',
+ '*** Delete File: ',
+ '*** Move to: ',
+] as const;
+
+function stripShellWrapper(command: string): string {
+ const trimmed = command.trim();
+ for (const pattern of SHELL_WRAPPER_PATTERNS) {
+ const match = trimmed.match(pattern);
+ if (match && match[1]) {
+ return unescapeCommand(match[1].trim());
+ }
+ }
+ return trimmed;
+}
+
+function unescapeCommand(command: string): string {
+ return command.replace(/\\(["'])/g, '$1');
+}
+
+function extractPrimarySegment(command: string): string {
+ const segments = command
+ .split(COMMAND_SEPARATOR_PATTERN)
+ .map((segment) => segment.trim())
+ .filter(Boolean);
+
+ for (const segment of segments) {
+ const shouldSkip = SEGMENT_SKIP_PREFIXES.some((prefix) => segment.startsWith(prefix));
+ if (!shouldSkip) {
+ return segment;
+ }
+ }
+
+ return command.trim();
+}
+
+function tokenizeCommand(command: string): string[] {
+ const tokens: string[] = [];
+ let current = '';
+ let inSingleQuote = false;
+ let inDoubleQuote = false;
+ let isEscaped = false;
+
+ for (const char of command) {
+ if (isEscaped) {
+ current += char;
+ isEscaped = false;
+ continue;
+ }
+
+ if (char === '\\') {
+ isEscaped = true;
+ continue;
+ }
+
+ if (char === "'" && !inDoubleQuote) {
+ inSingleQuote = !inSingleQuote;
+ continue;
+ }
+
+ if (char === '"' && !inSingleQuote) {
+ inDoubleQuote = !inDoubleQuote;
+ continue;
+ }
+
+ if (!inSingleQuote && !inDoubleQuote && /\s/.test(char)) {
+ if (current) {
+ tokens.push(current);
+ current = '';
+ }
+ continue;
+ }
+
+ current += char;
+ }
+
+ if (current) {
+ tokens.push(current);
+ }
+
+ return tokens;
+}
+
+function stripWrapperTokens(tokens: string[]): string[] {
+ let index = 0;
+ while (index < tokens.length && WRAPPER_COMMANDS.has(tokens[index].toLowerCase())) {
+ index += 1;
+ }
+ return tokens.slice(index);
+}
+
+function extractFilePathFromTokens(tokens: string[]): string | null {
+ const candidates = tokens.slice(1).filter((token) => token && !token.startsWith('-'));
+ if (candidates.length === 0) return null;
+ return candidates[candidates.length - 1];
+}
+
+function extractSearchPattern(tokens: string[]): string | null {
+ const remaining = tokens.slice(1);
+
+ for (let index = 0; index < remaining.length; index += 1) {
+ const token = remaining[index];
+ if (token === '--') {
+ return remaining[index + 1] ?? null;
+ }
+ if (SEARCH_PATTERN_FLAGS.has(token)) {
+ return remaining[index + 1] ?? null;
+ }
+ if (SEARCH_VALUE_FLAGS.has(token)) {
+ index += 1;
+ continue;
+ }
+ if (token.startsWith('-')) {
+ continue;
+ }
+ return token;
+ }
+
+ return null;
+}
+
+function extractTeeTarget(tokens: string[]): string | null {
+ const teeIndex = tokens.findIndex((token) => token === 'tee');
+ if (teeIndex < 0) return null;
+ const candidate = tokens[teeIndex + 1];
+ return candidate && !candidate.startsWith('-') ? candidate : null;
+}
+
+function extractRedirectionTarget(command: string): string | null {
+ const match = command.match(REDIRECTION_TARGET_PATTERN);
+ return match?.[1] ?? null;
+}
+
+function extractFilePathFromDeleteTokens(tokens: string[]): string | null {
+ // rm file.txt or rm /path/to/file.txt
+ // Skip flags and get the first non-flag argument
+ for (let i = 1; i < tokens.length; i++) {
+ const token = tokens[i];
+ if (token && !token.startsWith('-')) {
+ return token;
+ }
+ }
+ return null;
+}
+
+function hasSedInPlaceFlag(tokens: string[]): boolean {
+ return tokens.some((token) => SED_IN_PLACE_FLAGS.has(token) || token.startsWith('-i'));
+}
+
+function hasPerlInPlaceFlag(tokens: string[]): boolean {
+ return tokens.some((token) => PERL_IN_PLACE_FLAG.test(token));
+}
+
+function extractPatchFilePath(command: string): string | null {
+ for (const marker of PATCH_FILE_MARKERS) {
+ const index = command.indexOf(marker);
+ if (index < 0) continue;
+ const start = index + marker.length;
+ const end = command.indexOf('\n', start);
+ const rawPath = (end === -1 ? command.slice(start) : command.slice(start, end)).trim();
+ if (rawPath) return rawPath;
+ }
+ return null;
+}
+
+function buildInputWithFilePath(filePath: string | null): Record {
+ return filePath ? { [INPUT_KEY_FILE_PATH]: filePath } : {};
+}
+
+function buildInputWithPattern(pattern: string | null): Record {
+ return pattern ? { [INPUT_KEY_PATTERN]: pattern } : {};
+}
+
+export function resolveCodexToolCall(command: string): CodexToolResolution {
+ const normalized = stripShellWrapper(command);
+ const primarySegment = extractPrimarySegment(normalized);
+ const tokens = stripWrapperTokens(tokenizeCommand(primarySegment));
+ const commandToken = tokens[0]?.toLowerCase() ?? '';
+
+ const redirectionTarget = extractRedirectionTarget(primarySegment);
+ if (redirectionTarget) {
+ return {
+ name: TOOL_NAME_WRITE,
+ input: buildInputWithFilePath(redirectionTarget),
+ };
+ }
+
+ if (commandToken === APPLY_PATCH_COMMAND || APPLY_PATCH_PATTERN.test(primarySegment)) {
+ return {
+ name: TOOL_NAME_EDIT,
+ input: buildInputWithFilePath(extractPatchFilePath(primarySegment)),
+ };
+ }
+
+ if (commandToken === 'sed' && hasSedInPlaceFlag(tokens)) {
+ return {
+ name: TOOL_NAME_EDIT,
+ input: buildInputWithFilePath(extractFilePathFromTokens(tokens)),
+ };
+ }
+
+ if (commandToken === 'perl' && hasPerlInPlaceFlag(tokens)) {
+ return {
+ name: TOOL_NAME_EDIT,
+ input: buildInputWithFilePath(extractFilePathFromTokens(tokens)),
+ };
+ }
+
+ if (WRITE_COMMANDS.has(commandToken)) {
+ const filePath =
+ commandToken === 'tee' ? extractTeeTarget(tokens) : extractFilePathFromTokens(tokens);
+ return {
+ name: TOOL_NAME_WRITE,
+ input: buildInputWithFilePath(filePath),
+ };
+ }
+
+ if (SEARCH_COMMANDS.has(commandToken)) {
+ if (tokens.some((token) => SEARCH_FILE_LIST_FLAGS.has(token))) {
+ return {
+ name: TOOL_NAME_GLOB,
+ input: buildInputWithPattern(extractFilePathFromTokens(tokens)),
+ };
+ }
+
+ return {
+ name: TOOL_NAME_GREP,
+ input: buildInputWithPattern(extractSearchPattern(tokens)),
+ };
+ }
+
+ // Handle Delete commands (rm, del, erase, remove, unlink)
+ if (DELETE_COMMANDS.has(commandToken)) {
+ // Skip if -r or -rf flags (recursive delete should go to Bash)
+ if (
+ tokens.some((token) => token === '-r' || token === '-rf' || token === '-f' || token === '-rf')
+ ) {
+ return {
+ name: TOOL_NAME_BASH,
+ input: { [INPUT_KEY_COMMAND]: normalized },
+ };
+ }
+ // Simple file deletion - extract the file path
+ const filePath = extractFilePathFromDeleteTokens(tokens);
+ if (filePath) {
+ return {
+ name: TOOL_NAME_DELETE,
+ input: { path: filePath },
+ };
+ }
+ // Fall back to bash if we can't determine the file path
+ return {
+ name: TOOL_NAME_BASH,
+ input: { [INPUT_KEY_COMMAND]: normalized },
+ };
+ }
+
+ // Handle simple Ls commands (just listing, not find/glob)
+ if (LIST_COMMANDS.has(commandToken)) {
+ const filePath = extractFilePathFromTokens(tokens);
+ return {
+ name: TOOL_NAME_LS,
+ input: { path: filePath || '.' },
+ };
+ }
+
+ if (GLOB_COMMANDS.has(commandToken)) {
+ return {
+ name: TOOL_NAME_GLOB,
+ input: buildInputWithPattern(extractFilePathFromTokens(tokens)),
+ };
+ }
+
+ if (READ_COMMANDS.has(commandToken)) {
+ return {
+ name: TOOL_NAME_READ,
+ input: buildInputWithFilePath(extractFilePathFromTokens(tokens)),
+ };
+ }
+
+ return {
+ name: TOOL_NAME_BASH,
+ input: { [INPUT_KEY_COMMAND]: normalized },
+ };
+}
+
+function parseTodoLines(lines: string[]): CodexTodoItem[] {
+ const todos: CodexTodoItem[] = [];
+
+ for (const line of lines) {
+ const match = line.match(TODO_LINE_PATTERN);
+ if (!match?.groups?.content) continue;
+
+ const statusToken = match.groups.status;
+ const status =
+ statusToken === 'x'
+ ? TODO_STATUS_COMPLETED
+ : statusToken === '~'
+ ? TODO_STATUS_IN_PROGRESS
+ : TODO_STATUS_PENDING;
+
+ todos.push({ content: match.groups.content.trim(), status });
+ }
+
+ return todos;
+}
+
+function extractTodoFromArray(value: unknown[]): CodexTodoItem[] {
+ return value
+ .map((entry) => {
+ if (typeof entry === 'string') {
+ return { content: entry, status: TODO_STATUS_PENDING };
+ }
+ if (entry && typeof entry === 'object') {
+ const record = entry as Record;
+ const content =
+ typeof record.content === 'string'
+ ? record.content
+ : typeof record.text === 'string'
+ ? record.text
+ : typeof record.title === 'string'
+ ? record.title
+ : null;
+ if (!content) return null;
+ const status =
+ record.status === TODO_STATUS_COMPLETED ||
+ record.status === TODO_STATUS_IN_PROGRESS ||
+ record.status === TODO_STATUS_PENDING
+ ? (record.status as CodexTodoItem['status'])
+ : TODO_STATUS_PENDING;
+ const activeForm = typeof record.activeForm === 'string' ? record.activeForm : undefined;
+ return { content, status, activeForm };
+ }
+ return null;
+ })
+ .filter((item): item is CodexTodoItem => Boolean(item));
+}
+
+export function extractCodexTodoItems(item: Record): CodexTodoItem[] | null {
+ const todosValue = item.todos;
+ if (Array.isArray(todosValue)) {
+ const todos = extractTodoFromArray(todosValue);
+ return todos.length > 0 ? todos : null;
+ }
+
+ const itemsValue = item.items;
+ if (Array.isArray(itemsValue)) {
+ const todos = extractTodoFromArray(itemsValue);
+ return todos.length > 0 ? todos : null;
+ }
+
+ const textValue =
+ typeof item.text === 'string'
+ ? item.text
+ : typeof item.content === 'string'
+ ? item.content
+ : null;
+ if (!textValue) return null;
+
+ const lines = textValue
+ .split('\n')
+ .map((line) => line.trim())
+ .filter(Boolean);
+ const todos = parseTodoLines(lines);
+ return todos.length > 0 ? todos : null;
+}
+
+export function getCodexTodoToolName(): string {
+ return TOOL_NAME_TODO;
+}
diff --git a/apps/server/src/providers/cursor-config-manager.ts b/apps/server/src/providers/cursor-config-manager.ts
new file mode 100644
index 00000000..aa57d2b6
--- /dev/null
+++ b/apps/server/src/providers/cursor-config-manager.ts
@@ -0,0 +1,197 @@
+/**
+ * Cursor CLI Configuration Manager
+ *
+ * Manages Cursor CLI configuration stored in .automaker/cursor-config.json
+ */
+
+import * as fs from 'fs';
+import * as path from 'path';
+import { getAllCursorModelIds, type CursorCliConfig, type CursorModelId } from '@automaker/types';
+import { createLogger } from '@automaker/utils';
+import { getAutomakerDir } from '@automaker/platform';
+
+// Create logger for this module
+const logger = createLogger('CursorConfigManager');
+
+/**
+ * Manages Cursor CLI configuration
+ * Config location: .automaker/cursor-config.json
+ */
+export class CursorConfigManager {
+ private configPath: string;
+ private config: CursorCliConfig;
+
+ constructor(projectPath: string) {
+ // Use getAutomakerDir for consistent path resolution
+ this.configPath = path.join(getAutomakerDir(projectPath), 'cursor-config.json');
+ this.config = this.loadConfig();
+ }
+
+ /**
+ * Load configuration from disk
+ */
+ private loadConfig(): CursorCliConfig {
+ try {
+ if (fs.existsSync(this.configPath)) {
+ const content = fs.readFileSync(this.configPath, 'utf8');
+ const parsed = JSON.parse(content) as CursorCliConfig;
+ logger.debug(`Loaded config from ${this.configPath}`);
+ return parsed;
+ }
+ } catch (error) {
+ logger.warn('Failed to load config:', error);
+ }
+
+ // Return default config with all available models
+ return {
+ defaultModel: 'auto',
+ models: getAllCursorModelIds(),
+ };
+ }
+
+ /**
+ * Save configuration to disk
+ */
+ private saveConfig(): void {
+ try {
+ const dir = path.dirname(this.configPath);
+ if (!fs.existsSync(dir)) {
+ fs.mkdirSync(dir, { recursive: true });
+ }
+ fs.writeFileSync(this.configPath, JSON.stringify(this.config, null, 2));
+ logger.debug('Config saved');
+ } catch (error) {
+ logger.error('Failed to save config:', error);
+ throw error;
+ }
+ }
+
+ /**
+ * Get the full configuration
+ */
+ getConfig(): CursorCliConfig {
+ return { ...this.config };
+ }
+
+ /**
+ * Get the default model
+ */
+ getDefaultModel(): CursorModelId {
+ return this.config.defaultModel || 'auto';
+ }
+
+ /**
+ * Set the default model
+ */
+ setDefaultModel(model: CursorModelId): void {
+ this.config.defaultModel = model;
+ this.saveConfig();
+ logger.info(`Default model set to: ${model}`);
+ }
+
+ /**
+ * Get enabled models
+ */
+ getEnabledModels(): CursorModelId[] {
+ return this.config.models || ['auto'];
+ }
+
+ /**
+ * Set enabled models
+ */
+ setEnabledModels(models: CursorModelId[]): void {
+ this.config.models = models;
+ this.saveConfig();
+ logger.info(`Enabled models updated: ${models.join(', ')}`);
+ }
+
+ /**
+ * Add a model to enabled list
+ */
+ addModel(model: CursorModelId): void {
+ if (!this.config.models) {
+ this.config.models = [];
+ }
+ if (!this.config.models.includes(model)) {
+ this.config.models.push(model);
+ this.saveConfig();
+ logger.info(`Model added: ${model}`);
+ }
+ }
+
+ /**
+ * Remove a model from enabled list
+ */
+ removeModel(model: CursorModelId): void {
+ if (this.config.models) {
+ this.config.models = this.config.models.filter((m) => m !== model);
+ this.saveConfig();
+ logger.info(`Model removed: ${model}`);
+ }
+ }
+
+ /**
+ * Check if a model is enabled
+ */
+ isModelEnabled(model: CursorModelId): boolean {
+ return this.config.models?.includes(model) ?? false;
+ }
+
+ /**
+ * Get MCP server configurations
+ */
+ getMcpServers(): string[] {
+ return this.config.mcpServers || [];
+ }
+
+ /**
+ * Set MCP server configurations
+ */
+ setMcpServers(servers: string[]): void {
+ this.config.mcpServers = servers;
+ this.saveConfig();
+ logger.info(`MCP servers updated: ${servers.join(', ')}`);
+ }
+
+ /**
+ * Get Cursor rules paths
+ */
+ getRules(): string[] {
+ return this.config.rules || [];
+ }
+
+ /**
+ * Set Cursor rules paths
+ */
+ setRules(rules: string[]): void {
+ this.config.rules = rules;
+ this.saveConfig();
+ logger.info(`Rules updated: ${rules.join(', ')}`);
+ }
+
+ /**
+ * Reset configuration to defaults
+ */
+ reset(): void {
+ this.config = {
+ defaultModel: 'auto',
+ models: getAllCursorModelIds(),
+ };
+ this.saveConfig();
+ logger.info('Config reset to defaults');
+ }
+
+ /**
+ * Check if config file exists
+ */
+ exists(): boolean {
+ return fs.existsSync(this.configPath);
+ }
+
+ /**
+ * Get the config file path
+ */
+ getConfigPath(): string {
+ return this.configPath;
+ }
+}
diff --git a/apps/server/src/providers/cursor-provider.ts b/apps/server/src/providers/cursor-provider.ts
new file mode 100644
index 00000000..6cefc279
--- /dev/null
+++ b/apps/server/src/providers/cursor-provider.ts
@@ -0,0 +1,1056 @@
+/**
+ * Cursor Provider - Executes queries using cursor-agent CLI
+ *
+ * Extends CliProvider with Cursor-specific:
+ * - Event normalization for Cursor's JSONL format
+ * - Text block deduplication (Cursor sends duplicates)
+ * - Session ID tracking
+ * - Versions directory detection
+ *
+ * Spawns the cursor-agent CLI with --output-format stream-json for streaming responses.
+ */
+
+import { execSync } from 'child_process';
+import * as fs from 'fs';
+import * as path from 'path';
+import * as os from 'os';
+import {
+ CliProvider,
+ type CliSpawnConfig,
+ type CliDetectionResult,
+ type CliErrorInfo,
+} from './cli-provider.js';
+import type {
+ ProviderConfig,
+ ExecuteOptions,
+ ProviderMessage,
+ InstallationStatus,
+ ModelDefinition,
+ ContentBlock,
+} from './types.js';
+import { validateBareModelId } from '@automaker/types';
+import { validateApiKey } from '../lib/auth-utils.js';
+import { getEffectivePermissions } from '../services/cursor-config-service.js';
+import {
+ type CursorStreamEvent,
+ type CursorSystemEvent,
+ type CursorAssistantEvent,
+ type CursorToolCallEvent,
+ type CursorResultEvent,
+ type CursorAuthStatus,
+ CURSOR_MODEL_MAP,
+} from '@automaker/types';
+import { createLogger, isAbortError } from '@automaker/utils';
+import { spawnJSONLProcess, execInWsl } from '@automaker/platform';
+
+// Create logger for this module
+const logger = createLogger('CursorProvider');
+
+// =============================================================================
+// Cursor Tool Handler Registry
+// =============================================================================
+
+/**
+ * Tool handler definition for mapping Cursor tool calls to normalized format
+ */
+interface CursorToolHandler {
+ /** The normalized tool name (e.g., 'Read', 'Write') */
+ name: string;
+ /** Extract and normalize input from Cursor's args format */
+ mapInput: (args: TArgs) => unknown;
+ /** Format the result content for display (optional) */
+ formatResult?: (result: TResult, args?: TArgs) => string;
+ /** Format rejected result (optional) */
+ formatRejected?: (reason: string) => string;
+}
+
+/**
+ * Registry of Cursor tool handlers
+ * Each handler knows how to normalize its specific tool call type
+ */
+const CURSOR_TOOL_HANDLERS: Record> = {
+ readToolCall: {
+ name: 'Read',
+ mapInput: (args: { path: string }) => ({ file_path: args.path }),
+ formatResult: (result: { content: string }) => result.content,
+ },
+
+ writeToolCall: {
+ name: 'Write',
+ mapInput: (args: { path: string; fileText: string }) => ({
+ file_path: args.path,
+ content: args.fileText,
+ }),
+ formatResult: (result: { linesCreated: number; path: string }) =>
+ `Wrote ${result.linesCreated} lines to ${result.path}`,
+ },
+
+ editToolCall: {
+ name: 'Edit',
+ mapInput: (args: { path: string; oldText?: string; newText?: string }) => ({
+ file_path: args.path,
+ old_string: args.oldText,
+ new_string: args.newText,
+ }),
+ formatResult: (_result: unknown, args?: { path: string }) => `Edited file: ${args?.path}`,
+ },
+
+ shellToolCall: {
+ name: 'Bash',
+ mapInput: (args: { command: string }) => ({ command: args.command }),
+ formatResult: (result: { exitCode: number; stdout?: string; stderr?: string }) => {
+ let content = `Exit code: ${result.exitCode}`;
+ if (result.stdout) content += `\n${result.stdout}`;
+ if (result.stderr) content += `\nStderr: ${result.stderr}`;
+ return content;
+ },
+ formatRejected: (reason: string) => `Rejected: ${reason}`,
+ },
+
+ deleteToolCall: {
+ name: 'Delete',
+ mapInput: (args: { path: string }) => ({ file_path: args.path }),
+ formatResult: (_result: unknown, args?: { path: string }) => `Deleted: ${args?.path}`,
+ formatRejected: (reason: string) => `Delete rejected: ${reason}`,
+ },
+
+ grepToolCall: {
+ name: 'Grep',
+ mapInput: (args: { pattern: string; path?: string }) => ({
+ pattern: args.pattern,
+ path: args.path,
+ }),
+ formatResult: (result: { matchedLines: number }) =>
+ `Found ${result.matchedLines} matching lines`,
+ },
+
+ lsToolCall: {
+ name: 'Ls',
+ mapInput: (args: { path: string }) => ({ path: args.path }),
+ formatResult: (result: { childrenFiles: number; childrenDirs: number }) =>
+ `Found ${result.childrenFiles} files, ${result.childrenDirs} directories`,
+ },
+
+ globToolCall: {
+ name: 'Glob',
+ mapInput: (args: { globPattern: string; targetDirectory?: string }) => ({
+ pattern: args.globPattern,
+ path: args.targetDirectory,
+ }),
+ formatResult: (result: { totalFiles: number }) => `Found ${result.totalFiles} matching files`,
+ },
+
+ semSearchToolCall: {
+ name: 'SemanticSearch',
+ mapInput: (args: { query: string; targetDirectories?: string[]; explanation?: string }) => ({
+ query: args.query,
+ targetDirectories: args.targetDirectories,
+ explanation: args.explanation,
+ }),
+ formatResult: (result: { results: string; codeResults?: unknown[] }) => {
+ const resultCount = result.codeResults?.length || 0;
+ return resultCount > 0
+ ? `Found ${resultCount} semantic search result(s)`
+ : result.results || 'No results found';
+ },
+ },
+
+ readLintsToolCall: {
+ name: 'ReadLints',
+ mapInput: (args: { paths: string[] }) => ({ paths: args.paths }),
+ formatResult: (result: { totalDiagnostics: number; totalFiles: number }) =>
+ `Found ${result.totalDiagnostics} diagnostic(s) in ${result.totalFiles} file(s)`,
+ },
+};
+
+/**
+ * Process a Cursor tool call using the handler registry
+ * Returns { toolName, toolInput } or null if tool type is unknown
+ */
+function processCursorToolCall(
+ toolCall: CursorToolCallEvent['tool_call']
+): { toolName: string; toolInput: unknown } | null {
+ // Check each registered handler
+ for (const [key, handler] of Object.entries(CURSOR_TOOL_HANDLERS)) {
+ const toolData = toolCall[key as keyof typeof toolCall] as { args?: unknown } | undefined;
+ if (toolData) {
+ // Skip if args not yet populated (partial streaming event)
+ if (!toolData.args) return null;
+ return {
+ toolName: handler.name,
+ toolInput: handler.mapInput(toolData.args),
+ };
+ }
+ }
+
+ // Handle generic function call (fallback)
+ if (toolCall.function) {
+ let toolInput: unknown;
+ try {
+ toolInput = JSON.parse(toolCall.function.arguments || '{}');
+ } catch {
+ toolInput = { raw: toolCall.function.arguments };
+ }
+ return {
+ toolName: toolCall.function.name,
+ toolInput,
+ };
+ }
+
+ return null;
+}
+
+/**
+ * Format the result content for a completed Cursor tool call
+ */
+function formatCursorToolResult(toolCall: CursorToolCallEvent['tool_call']): string {
+ for (const [key, handler] of Object.entries(CURSOR_TOOL_HANDLERS)) {
+ const toolData = toolCall[key as keyof typeof toolCall] as
+ | {
+ args?: unknown;
+ result?: { success?: unknown; rejected?: { reason: string } };
+ }
+ | undefined;
+
+ if (toolData?.result) {
+ if (toolData.result.success && handler.formatResult) {
+ return handler.formatResult(toolData.result.success, toolData.args);
+ }
+ if (toolData.result.rejected && handler.formatRejected) {
+ return handler.formatRejected(toolData.result.rejected.reason);
+ }
+ }
+ }
+
+ return '';
+}
+
+// =============================================================================
+// Error Codes
+// =============================================================================
+
+/**
+ * Cursor-specific error codes for detailed error handling
+ */
+export enum CursorErrorCode {
+ NOT_INSTALLED = 'CURSOR_NOT_INSTALLED',
+ NOT_AUTHENTICATED = 'CURSOR_NOT_AUTHENTICATED',
+ RATE_LIMITED = 'CURSOR_RATE_LIMITED',
+ MODEL_UNAVAILABLE = 'CURSOR_MODEL_UNAVAILABLE',
+ NETWORK_ERROR = 'CURSOR_NETWORK_ERROR',
+ PROCESS_CRASHED = 'CURSOR_PROCESS_CRASHED',
+ TIMEOUT = 'CURSOR_TIMEOUT',
+ UNKNOWN = 'CURSOR_UNKNOWN_ERROR',
+}
+
+export interface CursorError extends Error {
+ code: CursorErrorCode;
+ recoverable: boolean;
+ suggestion?: string;
+}
+
+/**
+ * CursorProvider - Integrates cursor-agent CLI as an AI provider
+ *
+ * Extends CliProvider with Cursor-specific behavior:
+ * - WSL required on Windows (cursor-agent has no native Windows build)
+ * - Versions directory detection for cursor-agent installations
+ * - Session ID tracking for conversation continuity
+ * - Text block deduplication (Cursor sends duplicate chunks)
+ */
+export class CursorProvider extends CliProvider {
+ /**
+ * Version data directory where cursor-agent stores versions
+ * The install script creates versioned folders like:
+ * ~/.local/share/cursor-agent/versions/2025.12.17-996666f/cursor-agent
+ */
+ private static VERSIONS_DIR = path.join(os.homedir(), '.local/share/cursor-agent/versions');
+
+ constructor(config: ProviderConfig = {}) {
+ super(config);
+ // Trigger CLI detection on construction (eager for Cursor)
+ this.ensureCliDetected();
+ }
+
+ // ==========================================================================
+ // CliProvider Abstract Method Implementations
+ // ==========================================================================
+
+ getName(): string {
+ return 'cursor';
+ }
+
+ getCliName(): string {
+ return 'cursor-agent';
+ }
+
+ getSpawnConfig(): CliSpawnConfig {
+ return {
+ windowsStrategy: 'wsl', // cursor-agent requires WSL on Windows
+ commonPaths: {
+ linux: [
+ path.join(os.homedir(), '.local/bin/cursor-agent'), // Primary symlink location
+ '/usr/local/bin/cursor-agent',
+ ],
+ darwin: [path.join(os.homedir(), '.local/bin/cursor-agent'), '/usr/local/bin/cursor-agent'],
+ // Windows paths are not used - we check for WSL installation instead
+ win32: [],
+ },
+ };
+ }
+
+ /**
+ * Extract prompt text from ExecuteOptions
+ * Used to pass prompt via stdin instead of CLI args to avoid shell escaping issues
+ */
+ private extractPromptText(options: ExecuteOptions): string {
+ if (typeof options.prompt === 'string') {
+ return options.prompt;
+ } else if (Array.isArray(options.prompt)) {
+ return options.prompt
+ .filter((p) => p.type === 'text' && p.text)
+ .map((p) => p.text)
+ .join('\n');
+ } else {
+ throw new Error('Invalid prompt format');
+ }
+ }
+
+ buildCliArgs(options: ExecuteOptions): string[] {
+ // Model is already bare (no prefix) - validated by executeQuery
+ const model = options.model || 'auto';
+
+ // Build CLI arguments for cursor-agent
+ // NOTE: Prompt is NOT included here - it's passed via stdin to avoid
+ // shell escaping issues when content contains $(), backticks, etc.
+ const cliArgs: string[] = [];
+
+ // If using Cursor IDE (cliPath is 'cursor' not 'cursor-agent'), add 'agent' subcommand
+ if (this.cliPath && !this.cliPath.includes('cursor-agent')) {
+ cliArgs.push('agent');
+ }
+
+ cliArgs.push(
+ '-p', // Print mode (non-interactive)
+ '--output-format',
+ 'stream-json',
+ '--stream-partial-output' // Real-time streaming
+ );
+
+ // Only add --force if NOT in read-only mode
+ // Without --force, Cursor CLI suggests changes but doesn't apply them
+ // With --force, Cursor CLI can actually edit files
+ if (!options.readOnly) {
+ cliArgs.push('--force');
+ }
+
+ // Add model if not auto
+ if (model !== 'auto') {
+ cliArgs.push('--model', model);
+ }
+
+ // Use '-' to indicate reading prompt from stdin
+ cliArgs.push('-');
+
+ return cliArgs;
+ }
+
+ /**
+ * Convert Cursor event to AutoMaker ProviderMessage format
+ * Made public as required by CliProvider abstract method
+ */
+ normalizeEvent(event: unknown): ProviderMessage | null {
+ const cursorEvent = event as CursorStreamEvent;
+
+ switch (cursorEvent.type) {
+ case 'system':
+ // System init - we capture session_id but don't yield a message
+ return null;
+
+ case 'user':
+ // User message - already handled by caller
+ return null;
+
+ case 'assistant': {
+ const assistantEvent = cursorEvent as CursorAssistantEvent;
+ return {
+ type: 'assistant',
+ session_id: assistantEvent.session_id,
+ message: {
+ role: 'assistant',
+ content: assistantEvent.message.content.map((c) => ({
+ type: 'text' as const,
+ text: c.text,
+ })),
+ },
+ };
+ }
+
+ case 'tool_call': {
+ const toolEvent = cursorEvent as CursorToolCallEvent;
+ const toolCall = toolEvent.tool_call;
+
+ // Use the tool handler registry to process the tool call
+ const processed = processCursorToolCall(toolCall);
+ if (!processed) {
+ // Log unrecognized tool call structure for debugging
+ const toolCallKeys = Object.keys(toolCall);
+ logger.warn(
+ `[UNHANDLED TOOL_CALL] Unknown tool call structure. Keys: ${toolCallKeys.join(', ')}. ` +
+ `Full tool_call: ${JSON.stringify(toolCall).substring(0, 500)}`
+ );
+ return null;
+ }
+
+ const { toolName, toolInput } = processed;
+
+ // For started events, emit tool_use
+ if (toolEvent.subtype === 'started') {
+ return {
+ type: 'assistant',
+ session_id: toolEvent.session_id,
+ message: {
+ role: 'assistant',
+ content: [
+ {
+ type: 'tool_use',
+ name: toolName,
+ tool_use_id: toolEvent.call_id,
+ input: toolInput,
+ },
+ ],
+ },
+ };
+ }
+
+ // For completed events, emit both tool_use and tool_result
+ if (toolEvent.subtype === 'completed') {
+ const resultContent = formatCursorToolResult(toolCall);
+
+ return {
+ type: 'assistant',
+ session_id: toolEvent.session_id,
+ message: {
+ role: 'assistant',
+ content: [
+ {
+ type: 'tool_use',
+ name: toolName,
+ tool_use_id: toolEvent.call_id,
+ input: toolInput,
+ },
+ {
+ type: 'tool_result',
+ tool_use_id: toolEvent.call_id,
+ content: resultContent,
+ },
+ ],
+ },
+ };
+ }
+
+ return null;
+ }
+
+ case 'result': {
+ const resultEvent = cursorEvent as CursorResultEvent;
+
+ if (resultEvent.is_error) {
+ return {
+ type: 'error',
+ session_id: resultEvent.session_id,
+ error: resultEvent.error || resultEvent.result || 'Unknown error',
+ };
+ }
+
+ return {
+ type: 'result',
+ subtype: 'success',
+ session_id: resultEvent.session_id,
+ result: resultEvent.result,
+ };
+ }
+
+ default:
+ return null;
+ }
+ }
+
+ // ==========================================================================
+ // CliProvider Overrides
+ // ==========================================================================
+
+ /**
+ * Override CLI detection to add Cursor-specific checks:
+ * 1. Versions directory for cursor-agent installations
+ * 2. Cursor IDE with 'cursor agent' subcommand support
+ */
+ protected detectCli(): CliDetectionResult {
+ // First try standard detection (PATH, common paths, WSL)
+ const result = super.detectCli();
+ if (result.cliPath) {
+ return result;
+ }
+
+ // Cursor-specific: Check versions directory for any installed version
+ // This handles cases where cursor-agent is installed but not in PATH
+ if (process.platform !== 'win32' && fs.existsSync(CursorProvider.VERSIONS_DIR)) {
+ try {
+ const versions = fs
+ .readdirSync(CursorProvider.VERSIONS_DIR)
+ .filter((v) => !v.startsWith('.'))
+ .sort()
+ .reverse(); // Most recent first
+
+ for (const version of versions) {
+ const versionPath = path.join(CursorProvider.VERSIONS_DIR, version, 'cursor-agent');
+ if (fs.existsSync(versionPath)) {
+ logger.debug(`Found cursor-agent version ${version} at: ${versionPath}`);
+ return {
+ cliPath: versionPath,
+ useWsl: false,
+ strategy: 'native',
+ };
+ }
+ }
+ } catch {
+ // Ignore directory read errors
+ }
+ }
+
+ // If cursor-agent not found, try to find 'cursor' IDE and use 'cursor agent' subcommand
+ // The Cursor IDE includes the agent as a subcommand: cursor agent
+ if (process.platform !== 'win32') {
+ const cursorPaths = [
+ '/usr/bin/cursor',
+ '/usr/local/bin/cursor',
+ path.join(os.homedir(), '.local/bin/cursor'),
+ '/opt/cursor/cursor',
+ ];
+
+ for (const cursorPath of cursorPaths) {
+ if (fs.existsSync(cursorPath)) {
+ // Verify cursor agent subcommand works
+ try {
+ execSync(`"${cursorPath}" agent --version`, {
+ encoding: 'utf8',
+ timeout: 5000,
+ stdio: 'pipe',
+ });
+ logger.debug(`Using cursor agent via Cursor IDE: ${cursorPath}`);
+ // Return cursor path but we'll use 'cursor agent' subcommand
+ return {
+ cliPath: cursorPath,
+ useWsl: false,
+ strategy: 'native',
+ };
+ } catch {
+ // cursor agent subcommand doesn't work, try next path
+ }
+ }
+ }
+ }
+
+ return result;
+ }
+
+ /**
+ * Override error mapping for Cursor-specific error codes
+ */
+ protected mapError(stderr: string, exitCode: number | null): CliErrorInfo {
+ const lower = stderr.toLowerCase();
+
+ if (
+ lower.includes('not authenticated') ||
+ lower.includes('please log in') ||
+ lower.includes('unauthorized')
+ ) {
+ return {
+ code: CursorErrorCode.NOT_AUTHENTICATED,
+ message: 'Cursor CLI is not authenticated',
+ recoverable: true,
+ suggestion: 'Run "cursor-agent login" to authenticate with your browser',
+ };
+ }
+
+ if (
+ lower.includes('rate limit') ||
+ lower.includes('too many requests') ||
+ lower.includes('429')
+ ) {
+ return {
+ code: CursorErrorCode.RATE_LIMITED,
+ message: 'Cursor API rate limit exceeded',
+ recoverable: true,
+ suggestion: 'Wait a few minutes and try again, or upgrade to Cursor Pro',
+ };
+ }
+
+ if (
+ lower.includes('model not available') ||
+ lower.includes('invalid model') ||
+ lower.includes('unknown model')
+ ) {
+ return {
+ code: CursorErrorCode.MODEL_UNAVAILABLE,
+ message: 'Requested model is not available',
+ recoverable: true,
+ suggestion: 'Try using "auto" mode or select a different model',
+ };
+ }
+
+ if (
+ lower.includes('network') ||
+ lower.includes('connection') ||
+ lower.includes('econnrefused') ||
+ lower.includes('timeout')
+ ) {
+ return {
+ code: CursorErrorCode.NETWORK_ERROR,
+ message: 'Network connection error',
+ recoverable: true,
+ suggestion: 'Check your internet connection and try again',
+ };
+ }
+
+ if (exitCode === 137 || lower.includes('killed') || lower.includes('sigterm')) {
+ return {
+ code: CursorErrorCode.PROCESS_CRASHED,
+ message: 'Cursor agent process was terminated',
+ recoverable: true,
+ suggestion: 'The process may have run out of memory. Try a simpler task.',
+ };
+ }
+
+ return {
+ code: CursorErrorCode.UNKNOWN,
+ message: stderr || `Cursor agent exited with code ${exitCode}`,
+ recoverable: false,
+ };
+ }
+
+ /**
+ * Override install instructions for Cursor-specific guidance
+ */
+ protected getInstallInstructions(): string {
+ if (process.platform === 'win32') {
+ return 'cursor-agent requires WSL on Windows. Install WSL, then run in WSL: curl https://cursor.com/install -fsS | bash';
+ }
+ return 'Install with: curl https://cursor.com/install -fsS | bash';
+ }
+
+ /**
+ * Execute a prompt using Cursor CLI with streaming
+ *
+ * Overrides base class to add:
+ * - Session ID tracking from system init events
+ * - Text block deduplication (Cursor sends duplicate chunks)
+ */
+ async *executeQuery(options: ExecuteOptions): AsyncGenerator {
+ this.ensureCliDetected();
+
+ // Validate that model doesn't have a provider prefix
+ // AgentService should strip prefixes before passing to providers
+ validateBareModelId(options.model, 'CursorProvider');
+
+ if (!this.cliPath) {
+ throw this.createError(
+ CursorErrorCode.NOT_INSTALLED,
+ 'Cursor CLI is not installed',
+ true,
+ this.getInstallInstructions()
+ );
+ }
+
+ // MCP servers are not yet supported by Cursor CLI - log warning but continue
+ if (options.mcpServers && Object.keys(options.mcpServers).length > 0) {
+ const serverCount = Object.keys(options.mcpServers).length;
+ logger.warn(
+ `MCP servers configured (${serverCount}) but not yet supported by Cursor CLI in AutoMaker. ` +
+ `MCP support for Cursor will be added in a future release. ` +
+ `The configured MCP servers will be ignored for this execution.`
+ );
+ }
+
+ // Extract prompt text to pass via stdin (avoids shell escaping issues)
+ const promptText = this.extractPromptText(options);
+
+ const cliArgs = this.buildCliArgs(options);
+ const subprocessOptions = this.buildSubprocessOptions(options, cliArgs);
+
+ // Pass prompt via stdin to avoid shell interpretation of special characters
+ // like $(), backticks, etc. that may appear in file content
+ subprocessOptions.stdinData = promptText;
+
+ let sessionId: string | undefined;
+
+ // Dedup state for Cursor-specific text block handling
+ let lastTextBlock = '';
+ let accumulatedText = '';
+
+ logger.debug(`CursorProvider.executeQuery called with model: "${options.model}"`);
+
+ // Get effective permissions for this project
+ const effectivePermissions = await getEffectivePermissions(options.cwd || process.cwd());
+
+ // Debug: log raw events when AUTOMAKER_DEBUG_RAW_OUTPUT is enabled
+ const debugRawEvents =
+ process.env.AUTOMAKER_DEBUG_RAW_OUTPUT === 'true' ||
+ process.env.AUTOMAKER_DEBUG_RAW_OUTPUT === '1';
+
+ try {
+ for await (const rawEvent of spawnJSONLProcess(subprocessOptions)) {
+ const event = rawEvent as CursorStreamEvent;
+
+ // Log raw event for debugging
+ if (debugRawEvents) {
+ const subtype = 'subtype' in event ? (event.subtype as string) : 'none';
+ logger.info(`[RAW EVENT] type=${event.type} subtype=${subtype}`);
+ if (event.type === 'tool_call') {
+ const toolEvent = event as CursorToolCallEvent;
+ const tc = toolEvent.tool_call;
+ const toolTypes =
+ [
+ tc.readToolCall && 'read',
+ tc.writeToolCall && 'write',
+ tc.editToolCall && 'edit',
+ tc.shellToolCall && 'shell',
+ tc.deleteToolCall && 'delete',
+ tc.grepToolCall && 'grep',
+ tc.lsToolCall && 'ls',
+ tc.globToolCall && 'glob',
+ tc.function && `function:${tc.function.name}`,
+ ]
+ .filter(Boolean)
+ .join(',') || 'unknown';
+ logger.info(
+ `[RAW TOOL_CALL] call_id=${toolEvent.call_id} types=[${toolTypes}]` +
+ (tc.shellToolCall ? ` cmd="${tc.shellToolCall.args?.command}"` : '') +
+ (tc.writeToolCall ? ` path="${tc.writeToolCall.args?.path}"` : '')
+ );
+ }
+ }
+
+ // Capture session ID from system init
+ if (event.type === 'system' && (event as CursorSystemEvent).subtype === 'init') {
+ sessionId = event.session_id;
+ logger.debug(`Session started: ${sessionId}`);
+ }
+
+ // Normalize and yield the event
+ const normalized = this.normalizeEvent(event);
+ if (!normalized && debugRawEvents) {
+ logger.info(`[DROPPED EVENT] type=${event.type} - normalizeEvent returned null`);
+ }
+ if (normalized) {
+ // Ensure session_id is always set
+ if (!normalized.session_id && sessionId) {
+ normalized.session_id = sessionId;
+ }
+
+ // Apply Cursor-specific dedup for assistant text messages
+ if (normalized.type === 'assistant' && normalized.message?.content) {
+ const dedupedContent = this.deduplicateTextBlocks(
+ normalized.message.content,
+ lastTextBlock,
+ accumulatedText
+ );
+
+ if (dedupedContent.content.length === 0) {
+ // All blocks were duplicates, skip this message
+ continue;
+ }
+
+ // Update state
+ lastTextBlock = dedupedContent.lastBlock;
+ accumulatedText = dedupedContent.accumulated;
+
+ // Update the message with deduped content
+ normalized.message.content = dedupedContent.content;
+ }
+
+ yield normalized;
+ }
+ }
+ } catch (error) {
+ if (isAbortError(error)) {
+ logger.debug('Query aborted');
+ return;
+ }
+
+ // Map CLI errors to CursorError
+ if (error instanceof Error && 'stderr' in error) {
+ const errorInfo = this.mapError(
+ (error as { stderr?: string }).stderr || error.message,
+ (error as { exitCode?: number | null }).exitCode ?? null
+ );
+ throw this.createError(
+ errorInfo.code as CursorErrorCode,
+ errorInfo.message,
+ errorInfo.recoverable,
+ errorInfo.suggestion
+ );
+ }
+ throw error;
+ }
+ }
+
+ // ==========================================================================
+ // Cursor-Specific Methods
+ // ==========================================================================
+
+ /**
+ * Create a CursorError with details
+ */
+ private createError(
+ code: CursorErrorCode,
+ message: string,
+ recoverable: boolean = false,
+ suggestion?: string
+ ): CursorError {
+ const error = new Error(message) as CursorError;
+ error.code = code;
+ error.recoverable = recoverable;
+ error.suggestion = suggestion;
+ error.name = 'CursorError';
+ return error;
+ }
+
+ /**
+ * Deduplicate text blocks in Cursor assistant messages
+ *
+ * Cursor often sends:
+ * 1. Duplicate consecutive text blocks (same text twice in a row)
+ * 2. A final accumulated block containing ALL previous text
+ *
+ * This method filters out these duplicates to prevent UI stuttering.
+ */
+ private deduplicateTextBlocks(
+ content: ContentBlock[],
+ lastTextBlock: string,
+ accumulatedText: string
+ ): { content: ContentBlock[]; lastBlock: string; accumulated: string } {
+ const filtered: ContentBlock[] = [];
+ let newLastBlock = lastTextBlock;
+ let newAccumulated = accumulatedText;
+
+ for (const block of content) {
+ if (block.type !== 'text' || !block.text) {
+ filtered.push(block);
+ continue;
+ }
+
+ const text = block.text;
+
+ // Skip empty text
+ if (!text.trim()) continue;
+
+ // Skip duplicate consecutive text blocks
+ if (text === newLastBlock) {
+ continue;
+ }
+
+ // Skip final accumulated text block
+ // Cursor sends one large block containing ALL previous text at the end
+ if (newAccumulated.length > 100 && text.length > newAccumulated.length * 0.8) {
+ const normalizedAccum = newAccumulated.replace(/\s+/g, ' ').trim();
+ const normalizedNew = text.replace(/\s+/g, ' ').trim();
+ if (normalizedNew.includes(normalizedAccum.slice(0, 100))) {
+ // This is the final accumulated block, skip it
+ continue;
+ }
+ }
+
+ // This is a valid new text block
+ newLastBlock = text;
+ newAccumulated += text;
+ filtered.push(block);
+ }
+
+ return {
+ content: filtered,
+ lastBlock: newLastBlock,
+ accumulated: newAccumulated,
+ };
+ }
+
+ /**
+ * Get Cursor CLI version
+ */
+ async getVersion(): Promise {
+ this.ensureCliDetected();
+ if (!this.cliPath) return null;
+
+ try {
+ if (this.useWsl && this.wslCliPath) {
+ const result = execInWsl(`${this.wslCliPath} --version`, {
+ timeout: 5000,
+ distribution: this.wslDistribution,
+ });
+ return result;
+ }
+
+ // If using Cursor IDE, use 'cursor agent --version'
+ const versionCmd = this.cliPath.includes('cursor-agent')
+ ? `"${this.cliPath}" --version`
+ : `"${this.cliPath}" agent --version`;
+
+ const result = execSync(versionCmd, {
+ encoding: 'utf8',
+ timeout: 5000,
+ stdio: 'pipe',
+ }).trim();
+ return result;
+ } catch {
+ return null;
+ }
+ }
+
+ /**
+ * Check authentication status
+ */
+ async checkAuth(): Promise {
+ this.ensureCliDetected();
+ if (!this.cliPath) {
+ return { authenticated: false, method: 'none' };
+ }
+
+ // Check for API key in environment with validation
+ if (process.env.CURSOR_API_KEY) {
+ const validation = validateApiKey(process.env.CURSOR_API_KEY, 'cursor');
+ if (!validation.isValid) {
+ logger.warn('Cursor API key validation failed:', validation.error);
+ return { authenticated: false, method: 'api_key', error: validation.error };
+ }
+ return { authenticated: true, method: 'api_key' };
+ }
+
+ // For WSL mode, check credentials inside WSL
+ if (this.useWsl && this.wslCliPath) {
+ const wslOpts = { timeout: 5000, distribution: this.wslDistribution };
+
+ // Check for credentials file inside WSL
+ const wslCredPaths = [
+ '$HOME/.cursor/credentials.json',
+ '$HOME/.config/cursor/credentials.json',
+ ];
+
+ for (const credPath of wslCredPaths) {
+ const content = execInWsl(`sh -c "cat ${credPath} 2>/dev/null || echo ''"`, wslOpts);
+ if (content && content.trim()) {
+ try {
+ const creds = JSON.parse(content);
+ if (creds.accessToken || creds.token) {
+ return { authenticated: true, method: 'login', hasCredentialsFile: true };
+ }
+ } catch {
+ // Invalid credentials file
+ }
+ }
+ }
+
+ // Try running --version to check if CLI works
+ const versionResult = execInWsl(`${this.wslCliPath} --version`, {
+ timeout: 10000,
+ distribution: this.wslDistribution,
+ });
+ if (versionResult) {
+ return { authenticated: true, method: 'login' };
+ }
+
+ return { authenticated: false, method: 'none' };
+ }
+
+ // Native mode (Linux/macOS) - check local credentials
+ const credentialPaths = [
+ path.join(os.homedir(), '.cursor', 'credentials.json'),
+ path.join(os.homedir(), '.config', 'cursor', 'credentials.json'),
+ ];
+
+ for (const credPath of credentialPaths) {
+ if (fs.existsSync(credPath)) {
+ try {
+ const content = fs.readFileSync(credPath, 'utf8');
+ const creds = JSON.parse(content);
+ if (creds.accessToken || creds.token) {
+ return { authenticated: true, method: 'login', hasCredentialsFile: true };
+ }
+ } catch {
+ // Invalid credentials file
+ }
+ }
+ }
+
+ // Try running a simple command to check auth
+ try {
+ execSync(`"${this.cliPath}" --version`, {
+ encoding: 'utf8',
+ timeout: 10000,
+ env: { ...process.env },
+ });
+ return { authenticated: true, method: 'login' };
+ } catch (error: unknown) {
+ const execError = error as { stderr?: string };
+ if (execError.stderr?.includes('not authenticated') || execError.stderr?.includes('log in')) {
+ return { authenticated: false, method: 'none' };
+ }
+ }
+
+ return { authenticated: false, method: 'none' };
+ }
+
+ /**
+ * Detect installation status (required by BaseProvider)
+ */
+ async detectInstallation(): Promise {
+ const installed = await this.isInstalled();
+ const version = installed ? await this.getVersion() : undefined;
+ const auth = await this.checkAuth();
+
+ // Determine the display path - for WSL, show the WSL path with distribution
+ const displayPath =
+ this.useWsl && this.wslCliPath
+ ? `(WSL${this.wslDistribution ? `:${this.wslDistribution}` : ''}) ${this.wslCliPath}`
+ : this.cliPath || undefined;
+
+ return {
+ installed,
+ version: version || undefined,
+ path: displayPath,
+ method: this.useWsl ? 'wsl' : 'cli',
+ hasApiKey: !!process.env.CURSOR_API_KEY,
+ authenticated: auth.authenticated,
+ };
+ }
+
+ /**
+ * Get the detected CLI path (public accessor for status endpoints)
+ */
+ getCliPath(): string | null {
+ this.ensureCliDetected();
+ return this.cliPath;
+ }
+
+ /**
+ * Get available Cursor models
+ */
+ getAvailableModels(): ModelDefinition[] {
+ return Object.entries(CURSOR_MODEL_MAP).map(([id, config]) => ({
+ id: `cursor-${id}`,
+ name: config.label,
+ modelString: id,
+ provider: 'cursor',
+ description: config.description,
+ supportsTools: true,
+ supportsVision: config.supportsVision,
+ }));
+ }
+
+ /**
+ * Check if a feature is supported
+ */
+ supportsFeature(feature: string): boolean {
+ const supported = ['tools', 'text', 'streaming'];
+ return supported.includes(feature);
+ }
+}
diff --git a/apps/server/src/providers/index.ts b/apps/server/src/providers/index.ts
new file mode 100644
index 00000000..b53695f6
--- /dev/null
+++ b/apps/server/src/providers/index.ts
@@ -0,0 +1,40 @@
+/**
+ * Provider exports
+ */
+
+// Base providers
+export { BaseProvider } from './base-provider.js';
+export {
+ CliProvider,
+ type SpawnStrategy,
+ type CliSpawnConfig,
+ type CliErrorInfo,
+} from './cli-provider.js';
+export type {
+ ProviderConfig,
+ ExecuteOptions,
+ ProviderMessage,
+ InstallationStatus,
+ ModelDefinition,
+} from './types.js';
+
+// Claude provider
+export { ClaudeProvider } from './claude-provider.js';
+
+// Cursor provider
+export { CursorProvider, CursorErrorCode, CursorError } from './cursor-provider.js';
+export { CursorConfigManager } from './cursor-config-manager.js';
+
+// OpenCode provider
+export { OpencodeProvider } from './opencode-provider.js';
+
+// Provider factory
+export { ProviderFactory } from './provider-factory.js';
+
+// Simple query service - unified interface for basic AI queries
+export { simpleQuery, streamingQuery } from './simple-query-service.js';
+export type {
+ SimpleQueryOptions,
+ SimpleQueryResult,
+ StreamingQueryOptions,
+} from './simple-query-service.js';
diff --git a/apps/server/src/providers/opencode-provider.ts b/apps/server/src/providers/opencode-provider.ts
new file mode 100644
index 00000000..a5b3bae2
--- /dev/null
+++ b/apps/server/src/providers/opencode-provider.ts
@@ -0,0 +1,1194 @@
+/**
+ * OpenCode Provider - Executes queries using opencode CLI
+ *
+ * Extends CliProvider with OpenCode-specific configuration:
+ * - Event normalization for OpenCode's stream-json format
+ * - Dynamic model discovery via `opencode models` CLI command
+ * - NPX-based Windows execution strategy
+ * - Platform-specific npm global installation paths
+ *
+ * Spawns the opencode CLI with --output-format stream-json for streaming responses.
+ */
+
+import * as path from 'path';
+import * as os from 'os';
+import { execFile } from 'child_process';
+import { promisify } from 'util';
+import { CliProvider, type CliSpawnConfig } from './cli-provider.js';
+
+const execFileAsync = promisify(execFile);
+import type {
+ ProviderConfig,
+ ExecuteOptions,
+ ProviderMessage,
+ ModelDefinition,
+ InstallationStatus,
+ ContentBlock,
+} from '@automaker/types';
+import { stripProviderPrefix } from '@automaker/types';
+import { type SubprocessOptions, getOpenCodeAuthIndicators } from '@automaker/platform';
+import { createLogger } from '@automaker/utils';
+
+// Create logger for OpenCode operations
+const opencodeLogger = createLogger('OpencodeProvider');
+
+// =============================================================================
+// OpenCode Auth Types
+// =============================================================================
+
+export interface OpenCodeAuthStatus {
+ authenticated: boolean;
+ method: 'api_key' | 'oauth' | 'none';
+ hasOAuthToken?: boolean;
+ hasApiKey?: boolean;
+}
+
+// =============================================================================
+// OpenCode Dynamic Model Types
+// =============================================================================
+
+/**
+ * Model information from `opencode models` CLI output
+ */
+export interface OpenCodeModelInfo {
+ /** Full model ID (e.g., "copilot/claude-sonnet-4-5") */
+ id: string;
+ /** Provider name (e.g., "copilot", "anthropic", "openai") */
+ provider: string;
+ /** Model name without provider prefix */
+ name: string;
+ /** Display name for UI */
+ displayName?: string;
+}
+
+/**
+ * Provider information from `opencode auth list` CLI output
+ */
+export interface OpenCodeProviderInfo {
+ /** Provider ID (e.g., "copilot", "anthropic") */
+ id: string;
+ /** Human-readable name */
+ name: string;
+ /** Whether the provider is authenticated */
+ authenticated: boolean;
+ /** Authentication method if authenticated */
+ authMethod?: 'oauth' | 'api_key';
+}
+
+/** Cache duration for dynamic model fetching (5 minutes) */
+const MODEL_CACHE_DURATION_MS = 5 * 60 * 1000;
+const OPENCODE_MODEL_ID_SEPARATOR = '/';
+const OPENCODE_MODEL_ID_PATTERN = /^[a-z0-9.-]+\/\S+$/;
+const OPENCODE_PROVIDER_PATTERN = /^[a-z0-9.-]+$/;
+const OPENCODE_MODEL_NAME_PATTERN = /^[a-zA-Z0-9._:/-]+$/;
+
+// =============================================================================
+// OpenCode Stream Event Types
+// =============================================================================
+
+/**
+ * Part object within OpenCode events
+ */
+interface OpenCodePart {
+ id?: string;
+ sessionID?: string;
+ messageID?: string;
+ type: string;
+ text?: string;
+ reason?: string;
+ error?: string;
+ name?: string;
+ args?: unknown;
+ call_id?: string;
+ output?: string;
+ tokens?: {
+ input?: number;
+ output?: number;
+ reasoning?: number;
+ };
+}
+
+/**
+ * Base interface for all OpenCode stream events
+ * Format: {"type":"event_type","timestamp":...,"sessionID":"...","part":{...}}
+ */
+interface OpenCodeBaseEvent {
+ /** Event type identifier (step_start, text, step_finish, tool_call, etc.) */
+ type: string;
+ /** Unix timestamp */
+ timestamp?: number;
+ /** Session identifier */
+ sessionID?: string;
+ /** Event details */
+ part?: OpenCodePart;
+}
+
+/**
+ * Text event - Text output from the model
+ */
+export interface OpenCodeTextEvent extends OpenCodeBaseEvent {
+ type: 'text';
+ part: OpenCodePart & { type: 'text'; text: string };
+}
+
+/**
+ * Step start event - Begins an agentic loop iteration
+ */
+export interface OpenCodeStepStartEvent extends OpenCodeBaseEvent {
+ type: 'step_start';
+ part: OpenCodePart & { type: 'step-start' };
+}
+
+/**
+ * Step finish event - Completes an agentic loop iteration
+ */
+export interface OpenCodeStepFinishEvent extends OpenCodeBaseEvent {
+ type: 'step_finish';
+ part: OpenCodePart & { type: 'step-finish'; reason?: string };
+}
+
+/**
+ * Tool call event - Request to execute a tool
+ */
+export interface OpenCodeToolCallEvent extends OpenCodeBaseEvent {
+ type: 'tool_call';
+ part: OpenCodePart & { type: 'tool-call'; name: string; args?: unknown };
+}
+
+/**
+ * Tool result event - Output from a tool execution
+ */
+export interface OpenCodeToolResultEvent extends OpenCodeBaseEvent {
+ type: 'tool_result';
+ part: OpenCodePart & { type: 'tool-result'; output: string };
+}
+
+/**
+ * Error details object in error events
+ */
+interface OpenCodeErrorDetails {
+ name?: string;
+ message?: string;
+ data?: {
+ message?: string;
+ statusCode?: number;
+ isRetryable?: boolean;
+ };
+}
+
+/**
+ * Error event - An error occurred
+ */
+export interface OpenCodeErrorEvent extends OpenCodeBaseEvent {
+ type: 'error';
+ part?: OpenCodePart & { error: string };
+ error?: string | OpenCodeErrorDetails;
+}
+
+/**
+ * Tool error event - A tool execution failed
+ */
+export interface OpenCodeToolErrorEvent extends OpenCodeBaseEvent {
+ type: 'tool_error';
+ part?: OpenCodePart & { error: string };
+}
+
+/**
+ * Union type of all OpenCode stream events
+ */
+export type OpenCodeStreamEvent =
+ | OpenCodeTextEvent
+ | OpenCodeStepStartEvent
+ | OpenCodeStepFinishEvent
+ | OpenCodeToolCallEvent
+ | OpenCodeToolResultEvent
+ | OpenCodeErrorEvent
+ | OpenCodeToolErrorEvent;
+
+// =============================================================================
+// Tool Use ID Generation
+// =============================================================================
+
+/** Counter for generating unique tool use IDs when call_id is not provided */
+let toolUseIdCounter = 0;
+
+/**
+ * Generate a unique tool use ID for tool calls without explicit IDs
+ */
+function generateToolUseId(): string {
+ toolUseIdCounter += 1;
+ return `opencode-tool-${toolUseIdCounter}`;
+}
+
+/**
+ * Reset the tool use ID counter (useful for testing)
+ */
+export function resetToolUseIdCounter(): void {
+ toolUseIdCounter = 0;
+}
+
+// =============================================================================
+// Provider Implementation
+// =============================================================================
+
+/**
+ * OpencodeProvider - Integrates opencode CLI as an AI provider
+ *
+ * OpenCode is an npm-distributed CLI tool that provides access to
+ * multiple AI model providers through a unified interface.
+ *
+ * Supports dynamic model discovery via `opencode models` CLI command,
+ * enabling access to 75+ providers including GitHub Copilot, Google,
+ * Anthropic, OpenAI, and more based on user authentication.
+ */
+export class OpencodeProvider extends CliProvider {
+ // ==========================================================================
+ // Dynamic Model Cache
+ // ==========================================================================
+
+ /** Cached model definitions */
+ private cachedModels: ModelDefinition[] | null = null;
+
+ /** Timestamp when cache expires */
+ private modelsCacheExpiry: number = 0;
+
+ /** Cached authenticated providers */
+ private cachedProviders: OpenCodeProviderInfo[] | null = null;
+
+ /** Whether model refresh is in progress */
+ private isRefreshing: boolean = false;
+
+ /** Promise that resolves when current refresh completes */
+ private refreshPromise: Promise | null = null;
+
+ constructor(config: ProviderConfig = {}) {
+ super(config);
+ }
+
+ // ==========================================================================
+ // CliProvider Abstract Method Implementations
+ // ==========================================================================
+
+ getName(): string {
+ return 'opencode';
+ }
+
+ getCliName(): string {
+ return 'opencode';
+ }
+
+ getSpawnConfig(): CliSpawnConfig {
+ return {
+ windowsStrategy: 'npx',
+ npxPackage: 'opencode-ai@latest',
+ commonPaths: {
+ linux: [
+ path.join(os.homedir(), '.opencode/bin/opencode'),
+ path.join(os.homedir(), '.npm-global/bin/opencode'),
+ '/usr/local/bin/opencode',
+ '/usr/bin/opencode',
+ path.join(os.homedir(), '.local/bin/opencode'),
+ ],
+ darwin: [
+ path.join(os.homedir(), '.opencode/bin/opencode'),
+ path.join(os.homedir(), '.npm-global/bin/opencode'),
+ '/usr/local/bin/opencode',
+ '/opt/homebrew/bin/opencode',
+ path.join(os.homedir(), '.local/bin/opencode'),
+ ],
+ win32: [
+ path.join(os.homedir(), '.opencode', 'bin', 'opencode.exe'),
+ path.join(os.homedir(), 'AppData', 'Roaming', 'npm', 'opencode.cmd'),
+ path.join(os.homedir(), 'AppData', 'Roaming', 'npm', 'opencode'),
+ path.join(process.env.APPDATA || '', 'npm', 'opencode.cmd'),
+ ],
+ },
+ };
+ }
+
+ /**
+ * Build CLI arguments for the `opencode run` command
+ *
+ * Arguments built:
+ * - 'run' subcommand for executing queries
+ * - '--format', 'json' for JSONL streaming output
+ * - '-c', '' for working directory (using opencode's -c flag)
+ * - '--model', '' for model selection (if specified)
+ *
+ * The prompt is passed via stdin (piped) to avoid shell escaping issues.
+ * OpenCode CLI automatically reads from stdin when input is piped.
+ *
+ * @param options - Execution options containing model, cwd, etc.
+ * @returns Array of CLI arguments for opencode run
+ */
+ buildCliArgs(options: ExecuteOptions): string[] {
+ const args: string[] = ['run'];
+
+ // Add JSON output format for JSONL parsing (not 'stream-json')
+ args.push('--format', 'json');
+
+ // Handle model selection
+ // Strip 'opencode-' prefix if present, OpenCode uses format like 'anthropic/claude-sonnet-4-5'
+ if (options.model) {
+ const model = stripProviderPrefix(options.model);
+ args.push('--model', model);
+ }
+
+ // Note: OpenCode reads from stdin automatically when input is piped
+ // No '-' argument needed
+
+ return args;
+ }
+
+ // ==========================================================================
+ // Prompt Handling
+ // ==========================================================================
+
+ /**
+ * Extract prompt text from ExecuteOptions for passing via stdin
+ *
+ * Handles both string prompts and array-based prompts with content blocks.
+ * For array prompts with images, extracts only text content (images would
+ * need separate handling via file paths if OpenCode supports them).
+ *
+ * @param options - Execution options containing the prompt
+ * @returns Plain text prompt string
+ */
+ private extractPromptText(options: ExecuteOptions): string {
+ if (typeof options.prompt === 'string') {
+ return options.prompt;
+ }
+
+ // Array-based prompt - extract text content
+ if (Array.isArray(options.prompt)) {
+ return options.prompt
+ .filter((block) => block.type === 'text' && block.text)
+ .map((block) => block.text)
+ .join('\n');
+ }
+
+ throw new Error('Invalid prompt format: expected string or content block array');
+ }
+
+ /**
+ * Build subprocess options with stdin data for prompt
+ *
+ * Extends the base class method to add stdinData containing the prompt.
+ * This allows passing prompts via stdin instead of CLI arguments,
+ * avoiding shell escaping issues with special characters.
+ *
+ * @param options - Execution options
+ * @param cliArgs - CLI arguments from buildCliArgs
+ * @returns SubprocessOptions with stdinData set
+ */
+ protected buildSubprocessOptions(options: ExecuteOptions, cliArgs: string[]): SubprocessOptions {
+ const subprocessOptions = super.buildSubprocessOptions(options, cliArgs);
+
+ // Pass prompt via stdin to avoid shell interpretation of special characters
+ // like $(), backticks, quotes, etc. that may appear in prompts or file content
+ subprocessOptions.stdinData = this.extractPromptText(options);
+
+ return subprocessOptions;
+ }
+
+ /**
+ * Normalize a raw CLI event to ProviderMessage format
+ *
+ * Maps OpenCode event types to the standard ProviderMessage structure:
+ * - text -> type: 'assistant', content with type: 'text'
+ * - step_start -> null (informational, no message needed)
+ * - step_finish with reason 'stop' -> type: 'result', subtype: 'success'
+ * - step_finish with error -> type: 'error'
+ * - tool_call -> type: 'assistant', content with type: 'tool_use'
+ * - tool_result -> type: 'assistant', content with type: 'tool_result'
+ * - error -> type: 'error'
+ *
+ * @param event - Raw event from OpenCode CLI JSONL output
+ * @returns Normalized ProviderMessage or null to skip the event
+ */
+ normalizeEvent(event: unknown): ProviderMessage | null {
+ if (!event || typeof event !== 'object') {
+ return null;
+ }
+
+ const openCodeEvent = event as OpenCodeStreamEvent;
+
+ switch (openCodeEvent.type) {
+ case 'text': {
+ const textEvent = openCodeEvent as OpenCodeTextEvent;
+
+ // Skip empty text
+ if (!textEvent.part?.text) {
+ return null;
+ }
+
+ const content: ContentBlock[] = [
+ {
+ type: 'text',
+ text: textEvent.part.text,
+ },
+ ];
+
+ return {
+ type: 'assistant',
+ session_id: textEvent.sessionID,
+ message: {
+ role: 'assistant',
+ content,
+ },
+ };
+ }
+
+ case 'step_start': {
+ // Step start is informational - no message needed
+ return null;
+ }
+
+ case 'step_finish': {
+ const finishEvent = openCodeEvent as OpenCodeStepFinishEvent;
+
+ // Check if the step failed - either by error property or reason='error'
+ if (finishEvent.part?.error) {
+ return {
+ type: 'error',
+ session_id: finishEvent.sessionID,
+ error: finishEvent.part.error,
+ };
+ }
+
+ // Check if reason indicates error (even without explicit error text)
+ if (finishEvent.part?.reason === 'error') {
+ return {
+ type: 'error',
+ session_id: finishEvent.sessionID,
+ error: 'Step execution failed',
+ };
+ }
+
+ // Successful completion (reason: 'stop' or 'end_turn')
+ return {
+ type: 'result',
+ subtype: 'success',
+ session_id: finishEvent.sessionID,
+ result: (finishEvent.part as OpenCodePart & { result?: string })?.result,
+ };
+ }
+
+ case 'tool_error': {
+ const toolErrorEvent = openCodeEvent as OpenCodeBaseEvent;
+
+ // Extract error message from part.error
+ const errorMessage = toolErrorEvent.part?.error || 'Tool execution failed';
+
+ return {
+ type: 'error',
+ session_id: toolErrorEvent.sessionID,
+ error: errorMessage,
+ };
+ }
+
+ case 'tool_call': {
+ const toolEvent = openCodeEvent as OpenCodeToolCallEvent;
+
+ // Generate a tool use ID if not provided
+ const toolUseId = toolEvent.part?.call_id || generateToolUseId();
+
+ const content: ContentBlock[] = [
+ {
+ type: 'tool_use',
+ name: toolEvent.part?.name || 'unknown',
+ tool_use_id: toolUseId,
+ input: toolEvent.part?.args,
+ },
+ ];
+
+ return {
+ type: 'assistant',
+ session_id: toolEvent.sessionID,
+ message: {
+ role: 'assistant',
+ content,
+ },
+ };
+ }
+
+ case 'tool_result': {
+ const resultEvent = openCodeEvent as OpenCodeToolResultEvent;
+
+ const content: ContentBlock[] = [
+ {
+ type: 'tool_result',
+ tool_use_id: resultEvent.part?.call_id,
+ content: resultEvent.part?.output || '',
+ },
+ ];
+
+ return {
+ type: 'assistant',
+ session_id: resultEvent.sessionID,
+ message: {
+ role: 'assistant',
+ content,
+ },
+ };
+ }
+
+ case 'error': {
+ const errorEvent = openCodeEvent as OpenCodeErrorEvent;
+
+ // Extract error message from various formats
+ let errorMessage = 'Unknown error';
+ if (errorEvent.error) {
+ if (typeof errorEvent.error === 'string') {
+ errorMessage = errorEvent.error;
+ } else {
+ // Error is an object with name/data structure
+ errorMessage =
+ errorEvent.error.data?.message ||
+ errorEvent.error.message ||
+ errorEvent.error.name ||
+ 'Unknown error';
+ }
+ } else if (errorEvent.part?.error) {
+ errorMessage = errorEvent.part.error;
+ }
+
+ return {
+ type: 'error',
+ session_id: errorEvent.sessionID,
+ error: errorMessage,
+ };
+ }
+
+ default: {
+ // Unknown event type - skip it
+ return null;
+ }
+ }
+ }
+
+ // ==========================================================================
+ // Model Configuration
+ // ==========================================================================
+
+ /**
+ * Get available models for OpenCode
+ *
+ * Returns cached models if available and not expired.
+ * Falls back to default models if cache is empty or CLI is unavailable.
+ *
+ * Use `refreshModels()` to force a fresh fetch from the CLI.
+ */
+ getAvailableModels(): ModelDefinition[] {
+ // Return cached models if available and not expired
+ if (this.cachedModels && Date.now() < this.modelsCacheExpiry) {
+ return this.cachedModels;
+ }
+
+ // Return cached models even if expired (better than nothing)
+ if (this.cachedModels) {
+ // Trigger background refresh
+ this.refreshModels().catch((err) => {
+ opencodeLogger.debug(`Background model refresh failed: ${err}`);
+ });
+ return this.cachedModels;
+ }
+
+ // Return default models while cache is empty
+ return this.getDefaultModels();
+ }
+
+ /**
+ * Get default hardcoded models (fallback when CLI is unavailable)
+ */
+ private getDefaultModels(): ModelDefinition[] {
+ return [
+ // OpenCode Free Tier Models
+ {
+ id: 'opencode/big-pickle',
+ name: 'Big Pickle (Free)',
+ modelString: 'opencode/big-pickle',
+ provider: 'opencode',
+ description: 'OpenCode free tier model - great for general coding',
+ supportsTools: true,
+ supportsVision: false,
+ tier: 'basic',
+ default: true,
+ },
+ {
+ id: 'opencode/glm-4.7-free',
+ name: 'GLM 4.7 Free',
+ modelString: 'opencode/glm-4.7-free',
+ provider: 'opencode',
+ description: 'OpenCode free tier GLM model',
+ supportsTools: true,
+ supportsVision: false,
+ tier: 'basic',
+ },
+ {
+ id: 'opencode/gpt-5-nano',
+ name: 'GPT-5 Nano (Free)',
+ modelString: 'opencode/gpt-5-nano',
+ provider: 'opencode',
+ description: 'Fast and lightweight free tier model',
+ supportsTools: true,
+ supportsVision: false,
+ tier: 'basic',
+ },
+ {
+ id: 'opencode/grok-code',
+ name: 'Grok Code (Free)',
+ modelString: 'opencode/grok-code',
+ provider: 'opencode',
+ description: 'OpenCode free tier Grok model for coding',
+ supportsTools: true,
+ supportsVision: false,
+ tier: 'basic',
+ },
+ {
+ id: 'opencode/minimax-m2.1-free',
+ name: 'MiniMax M2.1 Free',
+ modelString: 'opencode/minimax-m2.1-free',
+ provider: 'opencode',
+ description: 'OpenCode free tier MiniMax model',
+ supportsTools: true,
+ supportsVision: false,
+ tier: 'basic',
+ },
+ ];
+ }
+
+ // ==========================================================================
+ // Dynamic Model Discovery
+ // ==========================================================================
+
+ /**
+ * Refresh models from OpenCode CLI
+ *
+ * Fetches available models using `opencode models` command and updates cache.
+ * Returns the updated model definitions.
+ */
+ async refreshModels(): Promise {
+ // If refresh is in progress, wait for existing promise instead of busy-waiting
+ if (this.isRefreshing && this.refreshPromise) {
+ opencodeLogger.debug('Model refresh already in progress, waiting for completion...');
+ return this.refreshPromise;
+ }
+
+ this.isRefreshing = true;
+ opencodeLogger.debug('Starting model refresh from OpenCode CLI');
+
+ this.refreshPromise = this.doRefreshModels();
+ try {
+ return await this.refreshPromise;
+ } finally {
+ this.refreshPromise = null;
+ this.isRefreshing = false;
+ }
+ }
+
+ /**
+ * Internal method that performs the actual model refresh
+ */
+ private async doRefreshModels(): Promise {
+ try {
+ const models = await this.fetchModelsFromCli();
+
+ if (models.length > 0) {
+ this.cachedModels = models;
+ this.modelsCacheExpiry = Date.now() + MODEL_CACHE_DURATION_MS;
+ opencodeLogger.debug(`Cached ${models.length} models from OpenCode CLI`);
+ } else {
+ // Keep existing cache if fetch returned nothing
+ opencodeLogger.debug('No models returned from CLI, keeping existing cache');
+ }
+
+ return this.cachedModels || this.getDefaultModels();
+ } catch (error) {
+ opencodeLogger.debug(`Model refresh failed: ${error}`);
+ // Return existing cache or defaults on error
+ return this.cachedModels || this.getDefaultModels();
+ }
+ }
+
+ /**
+ * Fetch models from OpenCode CLI using `opencode models` command
+ *
+ * Uses async execFile to avoid blocking the event loop.
+ */
+ private async fetchModelsFromCli(): Promise {
+ this.ensureCliDetected();
+
+ if (!this.cliPath) {
+ opencodeLogger.debug('OpenCode CLI not available for model fetch');
+ return [];
+ }
+
+ try {
+ let command: string;
+ let args: string[];
+
+ if (this.detectedStrategy === 'npx') {
+ // NPX strategy: execute npx with opencode-ai package
+ command = 'npx';
+ args = ['opencode-ai@latest', 'models'];
+ opencodeLogger.debug(`Executing: ${command} ${args.join(' ')}`);
+ } else if (this.useWsl && this.wslCliPath) {
+ // WSL strategy: execute via wsl.exe
+ command = 'wsl.exe';
+ args = this.wslDistribution
+ ? ['-d', this.wslDistribution, this.wslCliPath, 'models']
+ : [this.wslCliPath, 'models'];
+ opencodeLogger.debug(`Executing: ${command} ${args.join(' ')}`);
+ } else {
+ // Direct CLI execution
+ command = this.cliPath;
+ args = ['models'];
+ opencodeLogger.debug(`Executing: ${command} ${args.join(' ')}`);
+ }
+
+ const { stdout } = await execFileAsync(command, args, {
+ encoding: 'utf-8',
+ timeout: 30000,
+ windowsHide: true,
+ });
+
+ opencodeLogger.debug(
+ `Models output (${stdout.length} chars): ${stdout.substring(0, 200)}...`
+ );
+ return this.parseModelsOutput(stdout);
+ } catch (error) {
+ opencodeLogger.error(`Failed to fetch models from CLI: ${error}`);
+ return [];
+ }
+ }
+
+ /**
+ * Parse the output of `opencode models` command
+ *
+ * OpenCode CLI output format (one model per line):
+ * opencode/big-pickle
+ * opencode/glm-4.7-free
+ * anthropic/claude-3-5-haiku-20241022
+ * github-copilot/claude-3.5-sonnet
+ * ...
+ */
+ private parseModelsOutput(output: string): ModelDefinition[] {
+ // Parse line-based format (one model ID per line)
+ const lines = output.split('\n');
+ const models: ModelDefinition[] = [];
+
+ // Regex to validate "provider/model-name" format
+ // Provider: lowercase letters, numbers, dots, hyphens
+ // Model name: non-whitespace (supports nested paths like openrouter/anthropic/claude)
+ const modelIdRegex = OPENCODE_MODEL_ID_PATTERN;
+
+ for (const line of lines) {
+ // Remove ANSI escape codes if any
+ const cleanLine = line.replace(/\x1b\[[0-9;]*m/g, '').trim();
+
+ // Skip empty lines
+ if (!cleanLine) continue;
+
+ // Validate format using regex for robustness
+ if (modelIdRegex.test(cleanLine)) {
+ const separatorIndex = cleanLine.indexOf(OPENCODE_MODEL_ID_SEPARATOR);
+ if (separatorIndex <= 0 || separatorIndex === cleanLine.length - 1) {
+ continue;
+ }
+
+ const provider = cleanLine.slice(0, separatorIndex);
+ const name = cleanLine.slice(separatorIndex + 1);
+
+ if (!OPENCODE_PROVIDER_PATTERN.test(provider) || !OPENCODE_MODEL_NAME_PATTERN.test(name)) {
+ continue;
+ }
+
+ models.push(
+ this.modelInfoToDefinition({
+ id: cleanLine,
+ provider,
+ name,
+ })
+ );
+ }
+ }
+
+ opencodeLogger.debug(`Parsed ${models.length} models from CLI output`);
+ return models;
+ }
+
+ /**
+ * Convert OpenCodeModelInfo to ModelDefinition
+ */
+ private modelInfoToDefinition(model: OpenCodeModelInfo): ModelDefinition {
+ const displayName = model.displayName || this.formatModelDisplayName(model);
+ const tier = this.inferModelTier(model.id);
+
+ return {
+ id: model.id,
+ name: displayName,
+ modelString: model.id,
+ provider: model.provider, // Use the actual provider (github-copilot, google, etc.)
+ description: `${model.name} via ${this.formatProviderName(model.provider)}`,
+ supportsTools: true,
+ supportsVision: this.modelSupportsVision(model.id),
+ tier,
+ // Mark Claude Sonnet as default if available
+ default: model.id.includes('claude-sonnet-4'),
+ };
+ }
+
+ /**
+ * Format provider name for display
+ */
+ private formatProviderName(provider: string): string {
+ const providerNames: Record = {
+ 'github-copilot': 'GitHub Copilot',
+ google: 'Google AI',
+ openai: 'OpenAI',
+ anthropic: 'Anthropic',
+ openrouter: 'OpenRouter',
+ opencode: 'OpenCode',
+ ollama: 'Ollama',
+ lmstudio: 'LM Studio',
+ azure: 'Azure OpenAI',
+ xai: 'xAI',
+ deepseek: 'DeepSeek',
+ };
+ return (
+ providerNames[provider] ||
+ provider.charAt(0).toUpperCase() + provider.slice(1).replace(/-/g, ' ')
+ );
+ }
+
+ /**
+ * Format a display name for a model
+ */
+ private formatModelDisplayName(model: OpenCodeModelInfo): string {
+ // Capitalize and format the model name
+ const formattedName = model.name
+ .split('-')
+ .map((part) => {
+ // Handle version numbers like "4-5" -> "4.5"
+ if (/^\d+$/.test(part)) {
+ return part;
+ }
+ return part.charAt(0).toUpperCase() + part.slice(1);
+ })
+ .join(' ')
+ .replace(/(\d)\s+(\d)/g, '$1.$2'); // "4 5" -> "4.5"
+
+ // Format provider name
+ const providerNames: Record = {
+ copilot: 'GitHub Copilot',
+ anthropic: 'Anthropic',
+ openai: 'OpenAI',
+ google: 'Google',
+ 'amazon-bedrock': 'AWS Bedrock',
+ bedrock: 'AWS Bedrock',
+ openrouter: 'OpenRouter',
+ opencode: 'OpenCode',
+ azure: 'Azure',
+ ollama: 'Ollama',
+ lmstudio: 'LM Studio',
+ };
+
+ const providerDisplay = providerNames[model.provider] || model.provider;
+ return `${formattedName} (${providerDisplay})`;
+ }
+
+ /**
+ * Infer model tier based on model ID
+ */
+ private inferModelTier(modelId: string): 'basic' | 'standard' | 'premium' {
+ const lowerModelId = modelId.toLowerCase();
+
+ // Premium tier: flagship models
+ if (
+ lowerModelId.includes('opus') ||
+ lowerModelId.includes('gpt-5') ||
+ lowerModelId.includes('o3') ||
+ lowerModelId.includes('o4') ||
+ lowerModelId.includes('gemini-2') ||
+ lowerModelId.includes('deepseek-r1')
+ ) {
+ return 'premium';
+ }
+
+ // Basic tier: free or lightweight models
+ if (
+ lowerModelId.includes('free') ||
+ lowerModelId.includes('nano') ||
+ lowerModelId.includes('mini') ||
+ lowerModelId.includes('haiku') ||
+ lowerModelId.includes('flash')
+ ) {
+ return 'basic';
+ }
+
+ // Standard tier: everything else
+ return 'standard';
+ }
+
+ /**
+ * Check if a model supports vision based on model ID
+ */
+ private modelSupportsVision(modelId: string): boolean {
+ const lowerModelId = modelId.toLowerCase();
+
+ // Models known to support vision
+ const visionModels = ['claude', 'gpt-4', 'gpt-5', 'gemini', 'nova', 'llama-3', 'llama-4'];
+
+ return visionModels.some((vm) => lowerModelId.includes(vm));
+ }
+
+ /**
+ * Fetch authenticated providers from OpenCode CLI
+ *
+ * Runs `opencode auth list` to get the list of authenticated providers.
+ * Uses async execFile to avoid blocking the event loop.
+ */
+ async fetchAuthenticatedProviders(): Promise {
+ this.ensureCliDetected();
+
+ if (!this.cliPath) {
+ opencodeLogger.debug('OpenCode CLI not available for provider fetch');
+ return [];
+ }
+
+ try {
+ let command: string;
+ let args: string[];
+
+ if (this.detectedStrategy === 'npx') {
+ // NPX strategy
+ command = 'npx';
+ args = ['opencode-ai@latest', 'auth', 'list'];
+ opencodeLogger.debug(`Executing: ${command} ${args.join(' ')}`);
+ } else if (this.useWsl && this.wslCliPath) {
+ // WSL strategy
+ command = 'wsl.exe';
+ args = this.wslDistribution
+ ? ['-d', this.wslDistribution, this.wslCliPath, 'auth', 'list']
+ : [this.wslCliPath, 'auth', 'list'];
+ opencodeLogger.debug(`Executing: ${command} ${args.join(' ')}`);
+ } else {
+ // Direct CLI execution
+ command = this.cliPath;
+ args = ['auth', 'list'];
+ opencodeLogger.debug(`Executing: ${command} ${args.join(' ')}`);
+ }
+
+ const { stdout } = await execFileAsync(command, args, {
+ encoding: 'utf-8',
+ timeout: 15000,
+ windowsHide: true,
+ });
+
+ opencodeLogger.debug(
+ `Auth list output (${stdout.length} chars): ${stdout.substring(0, 200)}...`
+ );
+ const providers = this.parseProvidersOutput(stdout);
+ this.cachedProviders = providers;
+ return providers;
+ } catch (error) {
+ opencodeLogger.error(`Failed to fetch providers from CLI: ${error}`);
+ return this.cachedProviders || [];
+ }
+ }
+
+ /**
+ * Parse the output of `opencode auth list` command
+ *
+ * OpenCode CLI output format:
+ * ┌ Credentials ~/.local/share/opencode/auth.json
+ * │
+ * ● Anthropic oauth
+ * │
+ * ● GitHub Copilot oauth
+ * │
+ * └ 4 credentials
+ *
+ * Each line with ● contains: provider name and auth method (oauth/api)
+ */
+ private parseProvidersOutput(output: string): OpenCodeProviderInfo[] {
+ const lines = output.split('\n');
+ const providers: OpenCodeProviderInfo[] = [];
+
+ // Provider name to ID mapping
+ const providerIdMap: Record = {
+ anthropic: 'anthropic',
+ 'github copilot': 'github-copilot',
+ copilot: 'github-copilot',
+ google: 'google',
+ openai: 'openai',
+ openrouter: 'openrouter',
+ azure: 'azure',
+ bedrock: 'amazon-bedrock',
+ 'amazon bedrock': 'amazon-bedrock',
+ ollama: 'ollama',
+ 'lm studio': 'lmstudio',
+ lmstudio: 'lmstudio',
+ opencode: 'opencode',
+ 'z.ai coding plan': 'z-ai',
+ 'z.ai': 'z-ai',
+ };
+
+ for (const line of lines) {
+ // Look for lines with ● which indicate authenticated providers
+ // Format: "● Provider Name auth_method"
+ if (line.includes('●')) {
+ // Remove ANSI escape codes and the ● symbol
+ const cleanLine = line
+ .replace(/\x1b\[[0-9;]*m/g, '') // Remove ANSI codes
+ .replace(/●/g, '') // Remove ● symbol
+ .trim();
+
+ if (!cleanLine) continue;
+
+ // Parse "Provider Name auth_method" format
+ // Auth method is the last word (oauth, api, etc.)
+ const parts = cleanLine.split(/\s+/);
+ if (parts.length >= 2) {
+ const authMethod = parts[parts.length - 1].toLowerCase();
+ const providerName = parts.slice(0, -1).join(' ');
+
+ // Determine auth method type
+ let authMethodType: 'oauth' | 'api_key' | undefined;
+ if (authMethod === 'oauth') {
+ authMethodType = 'oauth';
+ } else if (authMethod === 'api' || authMethod === 'api_key') {
+ authMethodType = 'api_key';
+ }
+
+ // Get provider ID from name
+ const providerNameLower = providerName.toLowerCase();
+ const providerId =
+ providerIdMap[providerNameLower] || providerNameLower.replace(/\s+/g, '-');
+
+ providers.push({
+ id: providerId,
+ name: providerName,
+ authenticated: true, // If it's listed with ●, it's authenticated
+ authMethod: authMethodType,
+ });
+ }
+ }
+ }
+
+ opencodeLogger.debug(`Parsed ${providers.length} providers from auth list`);
+ return providers;
+ }
+
+ /**
+ * Get cached authenticated providers
+ */
+ getCachedProviders(): OpenCodeProviderInfo[] | null {
+ return this.cachedProviders;
+ }
+
+ /**
+ * Clear the model cache, forcing a refresh on next access
+ */
+ clearModelCache(): void {
+ this.cachedModels = null;
+ this.modelsCacheExpiry = 0;
+ this.cachedProviders = null;
+ opencodeLogger.debug('Model cache cleared');
+ }
+
+ /**
+ * Check if we have cached models (not just defaults)
+ */
+ hasCachedModels(): boolean {
+ return this.cachedModels !== null && this.cachedModels.length > 0;
+ }
+
+ // ==========================================================================
+ // Feature Support
+ // ==========================================================================
+
+ /**
+ * Check if a feature is supported by OpenCode
+ *
+ * Supported features:
+ * - tools: Function calling / tool use
+ * - text: Text generation
+ * - vision: Image understanding
+ */
+ supportsFeature(feature: string): boolean {
+ const supportedFeatures = ['tools', 'text', 'vision'];
+ return supportedFeatures.includes(feature);
+ }
+
+ // ==========================================================================
+ // Authentication
+ // ==========================================================================
+
+ /**
+ * Check authentication status for OpenCode CLI
+ *
+ * Checks for authentication via:
+ * - OAuth token in auth file
+ * - API key in auth file
+ */
+ async checkAuth(): Promise {
+ const authIndicators = await getOpenCodeAuthIndicators();
+
+ // Check for OAuth token
+ if (authIndicators.hasOAuthToken) {
+ return {
+ authenticated: true,
+ method: 'oauth',
+ hasOAuthToken: true,
+ hasApiKey: authIndicators.hasApiKey,
+ };
+ }
+
+ // Check for API key
+ if (authIndicators.hasApiKey) {
+ return {
+ authenticated: true,
+ method: 'api_key',
+ hasOAuthToken: false,
+ hasApiKey: true,
+ };
+ }
+
+ return {
+ authenticated: false,
+ method: 'none',
+ hasOAuthToken: false,
+ hasApiKey: false,
+ };
+ }
+
+ // ==========================================================================
+ // Installation Detection
+ // ==========================================================================
+
+ /**
+ * Detect OpenCode installation status
+ *
+ * Checks if the opencode CLI is available either through:
+ * - Direct installation (npm global)
+ * - NPX (fallback on Windows)
+ * Also checks authentication status.
+ */
+ async detectInstallation(): Promise {
+ this.ensureCliDetected();
+
+ const installed = await this.isInstalled();
+ const auth = await this.checkAuth();
+
+ return {
+ installed,
+ path: this.cliPath || undefined,
+ method: this.detectedStrategy === 'npx' ? 'npm' : 'cli',
+ authenticated: auth.authenticated,
+ hasApiKey: auth.hasApiKey,
+ hasOAuthToken: auth.hasOAuthToken,
+ };
+ }
+}
diff --git a/apps/server/src/providers/provider-factory.ts b/apps/server/src/providers/provider-factory.ts
index 0ef9b36e..c2a18120 100644
--- a/apps/server/src/providers/provider-factory.ts
+++ b/apps/server/src/providers/provider-factory.ts
@@ -1,51 +1,168 @@
/**
* Provider Factory - Routes model IDs to the appropriate provider
*
- * This factory implements model-based routing to automatically select
- * the correct provider based on the model string. This makes adding
- * new providers (Cursor, OpenCode, etc.) trivial - just add one line.
+ * Uses a registry pattern for dynamic provider registration.
+ * Providers register themselves on import, making it easy to add new providers.
*/
import { BaseProvider } from './base-provider.js';
-import { ClaudeProvider } from './claude-provider.js';
-import type { InstallationStatus } from './types.js';
+import type { InstallationStatus, ModelDefinition } from './types.js';
+import { isCursorModel, isCodexModel, isOpencodeModel, type ModelProvider } from '@automaker/types';
+import * as fs from 'fs';
+import * as path from 'path';
+
+const DISCONNECTED_MARKERS: Record = {
+ claude: '.claude-disconnected',
+ codex: '.codex-disconnected',
+ cursor: '.cursor-disconnected',
+ opencode: '.opencode-disconnected',
+};
+
+/**
+ * Check if a provider CLI is disconnected from the app
+ */
+export function isProviderDisconnected(providerName: string): boolean {
+ const markerFile = DISCONNECTED_MARKERS[providerName.toLowerCase()];
+ if (!markerFile) return false;
+
+ const markerPath = path.join(process.cwd(), '.automaker', markerFile);
+ return fs.existsSync(markerPath);
+}
+
+/**
+ * Provider registration entry
+ */
+interface ProviderRegistration {
+ /** Factory function to create provider instance */
+ factory: () => BaseProvider;
+ /** Aliases for this provider (e.g., 'anthropic' for 'claude') */
+ aliases?: string[];
+ /** Function to check if this provider can handle a model ID */
+ canHandleModel?: (modelId: string) => boolean;
+ /** Priority for model matching (higher = checked first) */
+ priority?: number;
+}
+
+/**
+ * Provider registry - stores registered providers
+ */
+const providerRegistry = new Map();
+
+/**
+ * Register a provider with the factory
+ *
+ * @param name Provider name (e.g., 'claude', 'cursor')
+ * @param registration Provider registration config
+ */
+export function registerProvider(name: string, registration: ProviderRegistration): void {
+ providerRegistry.set(name.toLowerCase(), registration);
+}
export class ProviderFactory {
/**
- * Get the appropriate provider for a given model ID
+ * Determine which provider to use for a given model
*
- * @param modelId Model identifier (e.g., "claude-opus-4-5-20251101", "gpt-5.2", "cursor-fast")
- * @returns Provider instance for the model
+ * @param model Model identifier
+ * @returns Provider name (ModelProvider type)
*/
- static getProviderForModel(modelId: string): BaseProvider {
- const lowerModel = modelId.toLowerCase();
+ static getProviderNameForModel(model: string): ModelProvider {
+ const lowerModel = model.toLowerCase();
- // Claude models (claude-*, opus, sonnet, haiku)
- if (lowerModel.startsWith('claude-') || ['haiku', 'sonnet', 'opus'].includes(lowerModel)) {
- return new ClaudeProvider();
+ // Get all registered providers sorted by priority (descending)
+ const registrations = Array.from(providerRegistry.entries()).sort(
+ ([, a], [, b]) => (b.priority ?? 0) - (a.priority ?? 0)
+ );
+
+ // Check each provider's canHandleModel function
+ for (const [name, reg] of registrations) {
+ if (reg.canHandleModel?.(lowerModel)) {
+ return name as ModelProvider;
+ }
}
- // Future providers:
- // if (lowerModel.startsWith("cursor-")) {
- // return new CursorProvider();
- // }
- // if (lowerModel.startsWith("opencode-")) {
- // return new OpenCodeProvider();
- // }
+ // Fallback: Check for explicit prefixes
+ for (const [name] of registrations) {
+ if (lowerModel.startsWith(`${name}-`)) {
+ return name as ModelProvider;
+ }
+ }
- // Default to Claude for unknown models
- console.warn(`[ProviderFactory] Unknown model prefix for "${modelId}", defaulting to Claude`);
- return new ClaudeProvider();
+ // Default to claude (first registered provider or claude)
+ return 'claude';
+ }
+
+ /**
+ * Get the appropriate provider for a given model ID
+ *
+ * @param modelId Model identifier (e.g., "claude-opus-4-5-20251101", "cursor-gpt-4o", "cursor-auto")
+ * @param options Optional settings
+ * @param options.throwOnDisconnected Throw error if provider is disconnected (default: true)
+ * @returns Provider instance for the model
+ * @throws Error if provider is disconnected and throwOnDisconnected is true
+ */
+ static getProviderForModel(
+ modelId: string,
+ options: { throwOnDisconnected?: boolean } = {}
+ ): BaseProvider {
+ const { throwOnDisconnected = true } = options;
+ const providerName = this.getProviderForModelName(modelId);
+
+ // Check if provider is disconnected
+ if (throwOnDisconnected && isProviderDisconnected(providerName)) {
+ throw new Error(
+ `${providerName.charAt(0).toUpperCase() + providerName.slice(1)} CLI is disconnected from the app. ` +
+ `Please go to Settings > Providers and click "Sign In" to reconnect.`
+ );
+ }
+
+ const provider = this.getProviderByName(providerName);
+
+ if (!provider) {
+ // Fallback to claude if provider not found
+ const claudeReg = providerRegistry.get('claude');
+ if (claudeReg) {
+ return claudeReg.factory();
+ }
+ throw new Error(`No provider found for model: ${modelId}`);
+ }
+
+ return provider;
+ }
+
+ /**
+ * Get the provider name for a given model ID (without creating provider instance)
+ */
+ static getProviderForModelName(modelId: string): string {
+ const lowerModel = modelId.toLowerCase();
+
+ // Get all registered providers sorted by priority (descending)
+ const registrations = Array.from(providerRegistry.entries()).sort(
+ ([, a], [, b]) => (b.priority ?? 0) - (a.priority ?? 0)
+ );
+
+ // Check each provider's canHandleModel function
+ for (const [name, reg] of registrations) {
+ if (reg.canHandleModel?.(lowerModel)) {
+ return name;
+ }
+ }
+
+ // Fallback: Check for explicit prefixes
+ for (const [name] of registrations) {
+ if (lowerModel.startsWith(`${name}-`)) {
+ return name;
+ }
+ }
+
+ // Default to claude (first registered provider or claude)
+ return 'claude';
}
/**
* Get all available providers
*/
static getAllProviders(): BaseProvider[] {
- return [
- new ClaudeProvider(),
- // Future providers...
- ];
+ return Array.from(providerRegistry.values()).map((reg) => reg.factory());
}
/**
@@ -54,11 +171,10 @@ export class ProviderFactory {
* @returns Map of provider name to installation status
*/
static async checkAllProviders(): Promise> {
- const providers = this.getAllProviders();
const statuses: Record = {};
- for (const provider of providers) {
- const name = provider.getName();
+ for (const [name, reg] of providerRegistry.entries()) {
+ const provider = reg.factory();
const status = await provider.detectInstallation();
statuses[name] = status;
}
@@ -69,40 +185,119 @@ export class ProviderFactory {
/**
* Get provider by name (for direct access if needed)
*
- * @param name Provider name (e.g., "claude", "cursor")
+ * @param name Provider name (e.g., "claude", "cursor") or alias (e.g., "anthropic")
* @returns Provider instance or null if not found
*/
static getProviderByName(name: string): BaseProvider | null {
const lowerName = name.toLowerCase();
- switch (lowerName) {
- case 'claude':
- case 'anthropic':
- return new ClaudeProvider();
-
- // Future providers:
- // case "cursor":
- // return new CursorProvider();
- // case "opencode":
- // return new OpenCodeProvider();
-
- default:
- return null;
+ // Direct lookup
+ const directReg = providerRegistry.get(lowerName);
+ if (directReg) {
+ return directReg.factory();
}
+
+ // Check aliases
+ for (const [, reg] of providerRegistry.entries()) {
+ if (reg.aliases?.includes(lowerName)) {
+ return reg.factory();
+ }
+ }
+
+ return null;
}
/**
* Get all available models from all providers
*/
- static getAllAvailableModels() {
+ static getAllAvailableModels(): ModelDefinition[] {
const providers = this.getAllProviders();
- const allModels = [];
+ return providers.flatMap((p) => p.getAvailableModels());
+ }
- for (const provider of providers) {
- const models = provider.getAvailableModels();
- allModels.push(...models);
+ /**
+ * Get list of registered provider names
+ */
+ static getRegisteredProviderNames(): string[] {
+ return Array.from(providerRegistry.keys());
+ }
+
+ /**
+ * Check if a specific model supports vision/image input
+ *
+ * @param modelId Model identifier
+ * @returns Whether the model supports vision (defaults to true if model not found)
+ */
+ static modelSupportsVision(modelId: string): boolean {
+ const provider = this.getProviderForModel(modelId);
+ const models = provider.getAvailableModels();
+
+ // Find the model in the available models list
+ for (const model of models) {
+ if (
+ model.id === modelId ||
+ model.modelString === modelId ||
+ model.id.endsWith(`-${modelId}`) ||
+ model.modelString.endsWith(`-${modelId}`) ||
+ model.modelString === modelId.replace(/^(claude|cursor|codex)-/, '') ||
+ model.modelString === modelId.replace(/-(claude|cursor|codex)$/, '')
+ ) {
+ return model.supportsVision ?? true;
+ }
}
- return allModels;
+ // Also try exact match with model string from provider's model map
+ for (const model of models) {
+ if (model.modelString === modelId || model.id === modelId) {
+ return model.supportsVision ?? true;
+ }
+ }
+
+ // Default to true (Claude SDK supports vision by default)
+ return true;
}
}
+
+// =============================================================================
+// Provider Registrations
+// =============================================================================
+
+// Import providers for registration side-effects
+import { ClaudeProvider } from './claude-provider.js';
+import { CursorProvider } from './cursor-provider.js';
+import { CodexProvider } from './codex-provider.js';
+import { OpencodeProvider } from './opencode-provider.js';
+
+// Register Claude provider
+registerProvider('claude', {
+ factory: () => new ClaudeProvider(),
+ aliases: ['anthropic'],
+ canHandleModel: (model: string) => {
+ return (
+ model.startsWith('claude-') || ['opus', 'sonnet', 'haiku'].some((n) => model.includes(n))
+ );
+ },
+ priority: 0, // Default priority
+});
+
+// Register Cursor provider
+registerProvider('cursor', {
+ factory: () => new CursorProvider(),
+ canHandleModel: (model: string) => isCursorModel(model),
+ priority: 10, // Higher priority - check Cursor models first
+});
+
+// Register Codex provider
+registerProvider('codex', {
+ factory: () => new CodexProvider(),
+ aliases: ['openai'],
+ canHandleModel: (model: string) => isCodexModel(model),
+ priority: 5, // Medium priority - check after Cursor but before Claude
+});
+
+// Register OpenCode provider
+registerProvider('opencode', {
+ factory: () => new OpencodeProvider(),
+ canHandleModel: (model: string) => isOpencodeModel(model),
+ priority: 3, // Between codex (5) and claude (0)
+});
diff --git a/apps/server/src/providers/simple-query-service.ts b/apps/server/src/providers/simple-query-service.ts
new file mode 100644
index 00000000..b37ef732
--- /dev/null
+++ b/apps/server/src/providers/simple-query-service.ts
@@ -0,0 +1,240 @@
+/**
+ * Simple Query Service - Simplified interface for basic AI queries
+ *
+ * Use this for routes that need simple text responses without
+ * complex event handling. This service abstracts away the provider
+ * selection and streaming details, providing a clean interface
+ * for common query patterns.
+ *
+ * Benefits:
+ * - No direct SDK imports needed in route files
+ * - Consistent provider routing based on model
+ * - Automatic text extraction from streaming responses
+ * - Structured output support for JSON schema responses
+ * - Eliminates duplicate extractTextFromStream() functions
+ */
+
+import { ProviderFactory } from './provider-factory.js';
+import type { ProviderMessage, ContentBlock, ThinkingLevel } from '@automaker/types';
+
+/**
+ * Options for simple query execution
+ */
+export interface SimpleQueryOptions {
+ /** The prompt to send to the AI (can be text or multi-part content) */
+ prompt: string | Array<{ type: string; text?: string; source?: object }>;
+ /** Model to use (with or without provider prefix) */
+ model?: string;
+ /** Working directory for the query */
+ cwd: string;
+ /** System prompt (combined with user prompt for some providers) */
+ systemPrompt?: string;
+ /** Maximum turns for agentic operations (default: 1) */
+ maxTurns?: number;
+ /** Tools to allow (default: [] for simple queries) */
+ allowedTools?: string[];
+ /** Abort controller for cancellation */
+ abortController?: AbortController;
+ /** Structured output format for JSON responses */
+ outputFormat?: {
+ type: 'json_schema';
+ schema: Record;
+ };
+ /** Thinking level for Claude models */
+ thinkingLevel?: ThinkingLevel;
+ /** If true, runs in read-only mode (no file writes) */
+ readOnly?: boolean;
+ /** Setting sources for CLAUDE.md loading */
+ settingSources?: Array<'user' | 'project' | 'local'>;
+}
+
+/**
+ * Result from a simple query
+ */
+export interface SimpleQueryResult {
+ /** The accumulated text response */
+ text: string;
+ /** Structured output if outputFormat was specified and provider supports it */
+ structured_output?: Record;
+}
+
+/**
+ * Options for streaming query execution
+ */
+export interface StreamingQueryOptions extends SimpleQueryOptions {
+ /** Callback for each text chunk received */
+ onText?: (text: string) => void;
+ /** Callback for tool use events */
+ onToolUse?: (tool: string, input: unknown) => void;
+ /** Callback for thinking blocks (if available) */
+ onThinking?: (thinking: string) => void;
+}
+
+/**
+ * Default model to use when none specified
+ */
+const DEFAULT_MODEL = 'claude-sonnet-4-20250514';
+
+/**
+ * Execute a simple query and return the text result
+ *
+ * Use this for simple, non-streaming queries where you just need
+ * the final text response. For more complex use cases with progress
+ * callbacks, use streamingQuery() instead.
+ *
+ * @example
+ * ```typescript
+ * const result = await simpleQuery({
+ * prompt: 'Generate a title for: user authentication',
+ * cwd: process.cwd(),
+ * systemPrompt: 'You are a title generator...',
+ * maxTurns: 1,
+ * allowedTools: [],
+ * });
+ * console.log(result.text); // "Add user authentication"
+ * ```
+ */
+export async function simpleQuery(options: SimpleQueryOptions): Promise {
+ const model = options.model || DEFAULT_MODEL;
+ const provider = ProviderFactory.getProviderForModel(model);
+
+ let responseText = '';
+ let structuredOutput: Record | undefined;
+
+ // Build provider options
+ const providerOptions = {
+ prompt: options.prompt,
+ model: model,
+ cwd: options.cwd,
+ systemPrompt: options.systemPrompt,
+ maxTurns: options.maxTurns ?? 1,
+ allowedTools: options.allowedTools ?? [],
+ abortController: options.abortController,
+ outputFormat: options.outputFormat,
+ thinkingLevel: options.thinkingLevel,
+ readOnly: options.readOnly,
+ settingSources: options.settingSources,
+ };
+
+ for await (const msg of provider.executeQuery(providerOptions)) {
+ // Handle error messages
+ if (msg.type === 'error') {
+ const errorMessage = msg.error || 'Provider returned an error';
+ throw new Error(errorMessage);
+ }
+
+ // Extract text from assistant messages
+ if (msg.type === 'assistant' && msg.message?.content) {
+ for (const block of msg.message.content) {
+ if (block.type === 'text' && block.text) {
+ responseText += block.text;
+ }
+ }
+ }
+
+ // Handle result messages
+ if (msg.type === 'result') {
+ if (msg.subtype === 'success') {
+ // Use result text if longer than accumulated text
+ if (msg.result && msg.result.length > responseText.length) {
+ responseText = msg.result;
+ }
+ // Capture structured output if present
+ if (msg.structured_output) {
+ structuredOutput = msg.structured_output;
+ }
+ } else if (msg.subtype === 'error_max_turns') {
+ // Max turns reached - return what we have
+ break;
+ } else if (msg.subtype === 'error_max_structured_output_retries') {
+ throw new Error('Could not produce valid structured output after retries');
+ }
+ }
+ }
+
+ return { text: responseText, structured_output: structuredOutput };
+}
+
+/**
+ * Execute a streaming query with event callbacks
+ *
+ * Use this for queries where you need real-time progress updates,
+ * such as when displaying streaming output to a user.
+ *
+ * @example
+ * ```typescript
+ * const result = await streamingQuery({
+ * prompt: 'Analyze this project and suggest improvements',
+ * cwd: '/path/to/project',
+ * maxTurns: 250,
+ * allowedTools: ['Read', 'Glob', 'Grep'],
+ * onText: (text) => emitProgress(text),
+ * onToolUse: (tool, input) => emitToolUse(tool, input),
+ * });
+ * ```
+ */
+export async function streamingQuery(options: StreamingQueryOptions): Promise {
+ const model = options.model || DEFAULT_MODEL;
+ const provider = ProviderFactory.getProviderForModel(model);
+
+ let responseText = '';
+ let structuredOutput: Record | undefined;
+
+ // Build provider options
+ const providerOptions = {
+ prompt: options.prompt,
+ model: model,
+ cwd: options.cwd,
+ systemPrompt: options.systemPrompt,
+ maxTurns: options.maxTurns ?? 250,
+ allowedTools: options.allowedTools ?? ['Read', 'Glob', 'Grep'],
+ abortController: options.abortController,
+ outputFormat: options.outputFormat,
+ thinkingLevel: options.thinkingLevel,
+ readOnly: options.readOnly,
+ settingSources: options.settingSources,
+ };
+
+ for await (const msg of provider.executeQuery(providerOptions)) {
+ // Handle error messages
+ if (msg.type === 'error') {
+ const errorMessage = msg.error || 'Provider returned an error';
+ throw new Error(errorMessage);
+ }
+
+ // Extract content from assistant messages
+ if (msg.type === 'assistant' && msg.message?.content) {
+ for (const block of msg.message.content) {
+ if (block.type === 'text' && block.text) {
+ responseText += block.text;
+ options.onText?.(block.text);
+ } else if (block.type === 'tool_use' && block.name) {
+ options.onToolUse?.(block.name, block.input);
+ } else if (block.type === 'thinking' && block.thinking) {
+ options.onThinking?.(block.thinking);
+ }
+ }
+ }
+
+ // Handle result messages
+ if (msg.type === 'result') {
+ if (msg.subtype === 'success') {
+ // Use result text if longer than accumulated text
+ if (msg.result && msg.result.length > responseText.length) {
+ responseText = msg.result;
+ }
+ // Capture structured output if present
+ if (msg.structured_output) {
+ structuredOutput = msg.structured_output;
+ }
+ } else if (msg.subtype === 'error_max_turns') {
+ // Max turns reached - return what we have
+ break;
+ } else if (msg.subtype === 'error_max_structured_output_retries') {
+ throw new Error('Could not produce valid structured output after retries');
+ }
+ }
+ }
+
+ return { text: responseText, structured_output: structuredOutput };
+}
diff --git a/apps/server/src/providers/types.ts b/apps/server/src/providers/types.ts
index a3dcf58c..b995d0fb 100644
--- a/apps/server/src/providers/types.ts
+++ b/apps/server/src/providers/types.ts
@@ -2,6 +2,7 @@
* Shared types for AI model providers
*
* Re-exports types from @automaker/types for consistency across the codebase.
+ * All provider types are defined in @automaker/types to avoid duplication.
*/
// Re-export all provider types from @automaker/types
@@ -13,72 +14,9 @@ export type {
McpStdioServerConfig,
McpSSEServerConfig,
McpHttpServerConfig,
+ ContentBlock,
+ ProviderMessage,
+ InstallationStatus,
+ ValidationResult,
+ ModelDefinition,
} from '@automaker/types';
-
-/**
- * Content block in a provider message (matches Claude SDK format)
- */
-export interface ContentBlock {
- type: 'text' | 'tool_use' | 'thinking' | 'tool_result';
- text?: string;
- thinking?: string;
- name?: string;
- input?: unknown;
- tool_use_id?: string;
- content?: string;
-}
-
-/**
- * Message returned by a provider (matches Claude SDK streaming format)
- */
-export interface ProviderMessage {
- type: 'assistant' | 'user' | 'error' | 'result';
- subtype?: 'success' | 'error';
- session_id?: string;
- message?: {
- role: 'user' | 'assistant';
- content: ContentBlock[];
- };
- result?: string;
- error?: string;
- parent_tool_use_id?: string | null;
-}
-
-/**
- * Installation status for a provider
- */
-export interface InstallationStatus {
- installed: boolean;
- path?: string;
- version?: string;
- method?: 'cli' | 'npm' | 'brew' | 'sdk';
- hasApiKey?: boolean;
- authenticated?: boolean;
- error?: string;
-}
-
-/**
- * Validation result
- */
-export interface ValidationResult {
- valid: boolean;
- errors: string[];
- warnings?: string[];
-}
-
-/**
- * Model definition
- */
-export interface ModelDefinition {
- id: string;
- name: string;
- modelString: string;
- provider: string;
- description: string;
- contextWindow?: number;
- maxOutputTokens?: number;
- supportsVision?: boolean;
- supportsTools?: boolean;
- tier?: 'basic' | 'standard' | 'premium';
- default?: boolean;
-}
diff --git a/apps/server/src/routes/agent/routes/queue-add.ts b/apps/server/src/routes/agent/routes/queue-add.ts
index 697f51c3..e5b8a875 100644
--- a/apps/server/src/routes/agent/routes/queue-add.ts
+++ b/apps/server/src/routes/agent/routes/queue-add.ts
@@ -3,17 +3,19 @@
*/
import type { Request, Response } from 'express';
+import type { ThinkingLevel } from '@automaker/types';
import { AgentService } from '../../../services/agent-service.js';
import { getErrorMessage, logError } from '../common.js';
export function createQueueAddHandler(agentService: AgentService) {
return async (req: Request, res: Response): Promise => {
try {
- const { sessionId, message, imagePaths, model } = req.body as {
+ const { sessionId, message, imagePaths, model, thinkingLevel } = req.body as {
sessionId: string;
message: string;
imagePaths?: string[];
model?: string;
+ thinkingLevel?: ThinkingLevel;
};
if (!sessionId || !message) {
@@ -24,7 +26,12 @@ export function createQueueAddHandler(agentService: AgentService) {
return;
}
- const result = await agentService.addToQueue(sessionId, { message, imagePaths, model });
+ const result = await agentService.addToQueue(sessionId, {
+ message,
+ imagePaths,
+ model,
+ thinkingLevel,
+ });
res.json(result);
} catch (error) {
logError(error, 'Add to queue failed');
diff --git a/apps/server/src/routes/agent/routes/send.ts b/apps/server/src/routes/agent/routes/send.ts
index 35c1e88a..15e97f63 100644
--- a/apps/server/src/routes/agent/routes/send.ts
+++ b/apps/server/src/routes/agent/routes/send.ts
@@ -3,6 +3,7 @@
*/
import type { Request, Response } from 'express';
+import type { ThinkingLevel } from '@automaker/types';
import { AgentService } from '../../../services/agent-service.js';
import { createLogger } from '@automaker/utils';
import { getErrorMessage, logError } from '../common.js';
@@ -11,24 +12,27 @@ const logger = createLogger('Agent');
export function createSendHandler(agentService: AgentService) {
return async (req: Request, res: Response): Promise => {
try {
- const { sessionId, message, workingDirectory, imagePaths, model } = req.body as {
- sessionId: string;
- message: string;
- workingDirectory?: string;
- imagePaths?: string[];
- model?: string;
- };
+ const { sessionId, message, workingDirectory, imagePaths, model, thinkingLevel } =
+ req.body as {
+ sessionId: string;
+ message: string;
+ workingDirectory?: string;
+ imagePaths?: string[];
+ model?: string;
+ thinkingLevel?: ThinkingLevel;
+ };
- console.log('[Send Handler] Received request:', {
+ logger.debug('Received request:', {
sessionId,
messageLength: message?.length,
workingDirectory,
imageCount: imagePaths?.length || 0,
model,
+ thinkingLevel,
});
if (!sessionId || !message) {
- console.log('[Send Handler] ERROR: Validation failed - missing sessionId or message');
+ logger.warn('Validation failed - missing sessionId or message');
res.status(400).json({
success: false,
error: 'sessionId and message are required',
@@ -36,7 +40,7 @@ export function createSendHandler(agentService: AgentService) {
return;
}
- console.log('[Send Handler] Validation passed, calling agentService.sendMessage()');
+ logger.debug('Validation passed, calling agentService.sendMessage()');
// Start the message processing (don't await - it streams via WebSocket)
agentService
@@ -46,18 +50,19 @@ export function createSendHandler(agentService: AgentService) {
workingDirectory,
imagePaths,
model,
+ thinkingLevel,
})
.catch((error) => {
- console.error('[Send Handler] ERROR: Background error in sendMessage():', error);
+ logger.error('Background error in sendMessage():', error);
logError(error, 'Send message failed (background)');
});
- console.log('[Send Handler] Returning immediate response to client');
+ logger.debug('Returning immediate response to client');
// Return immediately - responses come via WebSocket
res.json({ success: true, message: 'Message sent' });
} catch (error) {
- console.error('[Send Handler] ERROR: Synchronous error:', error);
+ logger.error('Synchronous error:', error);
logError(error, 'Send message failed');
res.status(500).json({ success: false, error: getErrorMessage(error) });
}
diff --git a/apps/server/src/routes/app-spec/common.ts b/apps/server/src/routes/app-spec/common.ts
index df412dc6..7ef1aabe 100644
--- a/apps/server/src/routes/app-spec/common.ts
+++ b/apps/server/src/routes/app-spec/common.ts
@@ -6,26 +6,57 @@ import { createLogger } from '@automaker/utils';
const logger = createLogger('SpecRegeneration');
-// Shared state for tracking generation status - private
-let isRunning = false;
-let currentAbortController: AbortController | null = null;
+// Shared state for tracking generation status - scoped by project path
+const runningProjects = new Map();
+const abortControllers = new Map();
/**
- * Get the current running state
+ * Get the running state for a specific project
*/
-export function getSpecRegenerationStatus(): {
+export function getSpecRegenerationStatus(projectPath?: string): {
isRunning: boolean;
currentAbortController: AbortController | null;
+ projectPath?: string;
} {
- return { isRunning, currentAbortController };
+ if (projectPath) {
+ return {
+ isRunning: runningProjects.get(projectPath) || false,
+ currentAbortController: abortControllers.get(projectPath) || null,
+ projectPath,
+ };
+ }
+ // Fallback: check if any project is running (for backward compatibility)
+ const isAnyRunning = Array.from(runningProjects.values()).some((running) => running);
+ return { isRunning: isAnyRunning, currentAbortController: null };
}
/**
- * Set the running state and abort controller
+ * Get the project path that is currently running (if any)
*/
-export function setRunningState(running: boolean, controller: AbortController | null = null): void {
- isRunning = running;
- currentAbortController = controller;
+export function getRunningProjectPath(): string | null {
+ for (const [path, running] of runningProjects.entries()) {
+ if (running) return path;
+ }
+ return null;
+}
+
+/**
+ * Set the running state and abort controller for a specific project
+ */
+export function setRunningState(
+ projectPath: string,
+ running: boolean,
+ controller: AbortController | null = null
+): void {
+ if (running) {
+ runningProjects.set(projectPath, true);
+ if (controller) {
+ abortControllers.set(projectPath, controller);
+ }
+ } else {
+ runningProjects.delete(projectPath);
+ abortControllers.delete(projectPath);
+ }
}
/**
diff --git a/apps/server/src/routes/app-spec/generate-features-from-spec.ts b/apps/server/src/routes/app-spec/generate-features-from-spec.ts
index e2b7124d..55bf792b 100644
--- a/apps/server/src/routes/app-spec/generate-features-from-spec.ts
+++ b/apps/server/src/routes/app-spec/generate-features-from-spec.ts
@@ -1,13 +1,16 @@
/**
* Generate features from existing app_spec.txt
+ *
+ * Model is configurable via phaseModels.featureGenerationModel in settings
+ * (defaults to Sonnet for balanced speed and quality).
*/
-import { query } from '@anthropic-ai/claude-agent-sdk';
import * as secureFs from '../../lib/secure-fs.js';
import type { EventEmitter } from '../../lib/events.js';
import { createLogger } from '@automaker/utils';
-import { createFeatureGenerationOptions } from '../../lib/sdk-options.js';
-import { logAuthStatus } from './common.js';
+import { DEFAULT_PHASE_MODELS } from '@automaker/types';
+import { resolvePhaseModel } from '@automaker/model-resolver';
+import { streamingQuery } from '../../providers/simple-query-service.js';
import { parseAndCreateFeatures } from './parse-and-create-features.js';
import { getAppSpecPath } from '@automaker/platform';
import type { SettingsService } from '../../services/settings-service.js';
@@ -101,67 +104,38 @@ IMPORTANT: Do not ask for clarification. The specification is provided above. Ge
'[FeatureGeneration]'
);
- const options = createFeatureGenerationOptions({
+ // Get model from phase settings
+ const settings = await settingsService?.getGlobalSettings();
+ const phaseModelEntry =
+ settings?.phaseModels?.featureGenerationModel || DEFAULT_PHASE_MODELS.featureGenerationModel;
+ const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
+
+ logger.info('Using model:', model);
+
+ // Use streamingQuery with event callbacks
+ const result = await streamingQuery({
+ prompt,
+ model,
cwd: projectPath,
+ maxTurns: 250,
+ allowedTools: ['Read', 'Glob', 'Grep'],
abortController,
- autoLoadClaudeMd,
+ thinkingLevel,
+ readOnly: true, // Feature generation only reads code, doesn't write
+ settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
+ onText: (text) => {
+ logger.debug(`Feature text block received (${text.length} chars)`);
+ events.emit('spec-regeneration:event', {
+ type: 'spec_regeneration_progress',
+ content: text,
+ projectPath: projectPath,
+ });
+ },
});
- logger.debug('SDK Options:', JSON.stringify(options, null, 2));
- logger.info('Calling Claude Agent SDK query() for features...');
+ const responseText = result.text;
- logAuthStatus('Right before SDK query() for features');
-
- let stream;
- try {
- stream = query({ prompt, options });
- logger.debug('query() returned stream successfully');
- } catch (queryError) {
- logger.error('❌ query() threw an exception:');
- logger.error('Error:', queryError);
- throw queryError;
- }
-
- let responseText = '';
- let messageCount = 0;
-
- logger.debug('Starting to iterate over feature stream...');
-
- try {
- for await (const msg of stream) {
- messageCount++;
- logger.debug(
- `Feature stream message #${messageCount}:`,
- JSON.stringify({ type: msg.type, subtype: (msg as any).subtype }, null, 2)
- );
-
- if (msg.type === 'assistant' && msg.message.content) {
- for (const block of msg.message.content) {
- if (block.type === 'text') {
- responseText += block.text;
- logger.debug(`Feature text block received (${block.text.length} chars)`);
- events.emit('spec-regeneration:event', {
- type: 'spec_regeneration_progress',
- content: block.text,
- projectPath: projectPath,
- });
- }
- }
- } else if (msg.type === 'result' && (msg as any).subtype === 'success') {
- logger.debug('Received success result for features');
- responseText = (msg as any).result || responseText;
- } else if ((msg as { type: string }).type === 'error') {
- logger.error('❌ Received error message from feature stream:');
- logger.error('Error message:', JSON.stringify(msg, null, 2));
- }
- }
- } catch (streamError) {
- logger.error('❌ Error while iterating feature stream:');
- logger.error('Stream error:', streamError);
- throw streamError;
- }
-
- logger.info(`Feature stream complete. Total messages: ${messageCount}`);
+ logger.info(`Feature stream complete.`);
logger.info(`Feature response length: ${responseText.length} chars`);
logger.info('========== FULL RESPONSE TEXT ==========');
logger.info(responseText);
diff --git a/apps/server/src/routes/app-spec/generate-spec.ts b/apps/server/src/routes/app-spec/generate-spec.ts
index 0762bb90..4af42b48 100644
--- a/apps/server/src/routes/app-spec/generate-spec.ts
+++ b/apps/server/src/routes/app-spec/generate-spec.ts
@@ -1,9 +1,10 @@
/**
* Generate app_spec.txt from project overview
+ *
+ * Model is configurable via phaseModels.specGenerationModel in settings
+ * (defaults to Opus for high-quality specification generation).
*/
-import { query } from '@anthropic-ai/claude-agent-sdk';
-import path from 'path';
import * as secureFs from '../../lib/secure-fs.js';
import type { EventEmitter } from '../../lib/events.js';
import {
@@ -13,8 +14,10 @@ import {
type SpecOutput,
} from '../../lib/app-spec-format.js';
import { createLogger } from '@automaker/utils';
-import { createSpecGenerationOptions } from '../../lib/sdk-options.js';
-import { logAuthStatus } from './common.js';
+import { DEFAULT_PHASE_MODELS, isCursorModel } from '@automaker/types';
+import { resolvePhaseModel } from '@automaker/model-resolver';
+import { extractJson } from '../../lib/json-extractor.js';
+import { streamingQuery } from '../../providers/simple-query-service.js';
import { generateFeaturesFromSpec } from './generate-features-from-spec.js';
import { ensureAutomakerDir, getAppSpecPath } from '@automaker/platform';
import type { SettingsService } from '../../services/settings-service.js';
@@ -93,105 +96,84 @@ ${getStructuredSpecPromptInstruction()}`;
'[SpecRegeneration]'
);
- const options = createSpecGenerationOptions({
+ // Get model from phase settings
+ const settings = await settingsService?.getGlobalSettings();
+ const phaseModelEntry =
+ settings?.phaseModels?.specGenerationModel || DEFAULT_PHASE_MODELS.specGenerationModel;
+ const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
+
+ logger.info('Using model:', model);
+
+ let responseText = '';
+ let structuredOutput: SpecOutput | null = null;
+
+ // Determine if we should use structured output (Claude supports it, Cursor doesn't)
+ const useStructuredOutput = !isCursorModel(model);
+
+ // Build the final prompt - for Cursor, include JSON schema instructions
+ let finalPrompt = prompt;
+ if (!useStructuredOutput) {
+ finalPrompt = `${prompt}
+
+CRITICAL INSTRUCTIONS:
+1. DO NOT write any files. DO NOT create any files like "project_specification.json".
+2. After analyzing the project, respond with ONLY a JSON object - no explanations, no markdown, just raw JSON.
+3. The JSON must match this exact schema:
+
+${JSON.stringify(specOutputSchema, null, 2)}
+
+Your entire response should be valid JSON starting with { and ending with }. No text before or after.`;
+ }
+
+ // Use streamingQuery with event callbacks
+ const result = await streamingQuery({
+ prompt: finalPrompt,
+ model,
cwd: projectPath,
+ maxTurns: 250,
+ allowedTools: ['Read', 'Glob', 'Grep'],
abortController,
- autoLoadClaudeMd,
- outputFormat: {
- type: 'json_schema',
- schema: specOutputSchema,
+ thinkingLevel,
+ readOnly: true, // Spec generation only reads code, we write the spec ourselves
+ settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
+ outputFormat: useStructuredOutput
+ ? {
+ type: 'json_schema',
+ schema: specOutputSchema,
+ }
+ : undefined,
+ onText: (text) => {
+ responseText += text;
+ logger.info(
+ `Text block received (${text.length} chars), total now: ${responseText.length} chars`
+ );
+ events.emit('spec-regeneration:event', {
+ type: 'spec_regeneration_progress',
+ content: text,
+ projectPath: projectPath,
+ });
+ },
+ onToolUse: (tool, input) => {
+ logger.info('Tool use:', tool);
+ events.emit('spec-regeneration:event', {
+ type: 'spec_tool',
+ tool,
+ input,
+ });
},
});
- logger.debug('SDK Options:', JSON.stringify(options, null, 2));
- logger.info('Calling Claude Agent SDK query()...');
-
- // Log auth status right before the SDK call
- logAuthStatus('Right before SDK query()');
-
- let stream;
- try {
- stream = query({ prompt, options });
- logger.debug('query() returned stream successfully');
- } catch (queryError) {
- logger.error('❌ query() threw an exception:');
- logger.error('Error:', queryError);
- throw queryError;
+ // Get structured output if available
+ if (result.structured_output) {
+ structuredOutput = result.structured_output as unknown as SpecOutput;
+ logger.info('✅ Received structured output');
+ logger.debug('Structured output:', JSON.stringify(structuredOutput, null, 2));
+ } else if (!useStructuredOutput && responseText) {
+ // For non-Claude providers, parse JSON from response text
+ structuredOutput = extractJson(responseText, { logger });
}
- let responseText = '';
- let messageCount = 0;
- let structuredOutput: SpecOutput | null = null;
-
- logger.info('Starting to iterate over stream...');
-
- try {
- for await (const msg of stream) {
- messageCount++;
- logger.info(
- `Stream message #${messageCount}: type=${msg.type}, subtype=${(msg as any).subtype}`
- );
-
- if (msg.type === 'assistant') {
- const msgAny = msg as any;
- if (msgAny.message?.content) {
- for (const block of msgAny.message.content) {
- if (block.type === 'text') {
- responseText += block.text;
- logger.info(
- `Text block received (${block.text.length} chars), total now: ${responseText.length} chars`
- );
- events.emit('spec-regeneration:event', {
- type: 'spec_regeneration_progress',
- content: block.text,
- projectPath: projectPath,
- });
- } else if (block.type === 'tool_use') {
- logger.info('Tool use:', block.name);
- events.emit('spec-regeneration:event', {
- type: 'spec_tool',
- tool: block.name,
- input: block.input,
- });
- }
- }
- }
- } else if (msg.type === 'result' && (msg as any).subtype === 'success') {
- logger.info('Received success result');
- // Check for structured output - this is the reliable way to get spec data
- const resultMsg = msg as any;
- if (resultMsg.structured_output) {
- structuredOutput = resultMsg.structured_output as SpecOutput;
- logger.info('✅ Received structured output');
- logger.debug('Structured output:', JSON.stringify(structuredOutput, null, 2));
- } else {
- logger.warn('⚠️ No structured output in result, will fall back to text parsing');
- }
- } else if (msg.type === 'result') {
- // Handle error result types
- const subtype = (msg as any).subtype;
- logger.info(`Result message: subtype=${subtype}`);
- if (subtype === 'error_max_turns') {
- logger.error('❌ Hit max turns limit!');
- } else if (subtype === 'error_max_structured_output_retries') {
- logger.error('❌ Failed to produce valid structured output after retries');
- throw new Error('Could not produce valid spec output');
- }
- } else if ((msg as { type: string }).type === 'error') {
- logger.error('❌ Received error message from stream:');
- logger.error('Error message:', JSON.stringify(msg, null, 2));
- } else if (msg.type === 'user') {
- // Log user messages (tool results)
- logger.info(`User message (tool result): ${JSON.stringify(msg).substring(0, 500)}`);
- }
- }
- } catch (streamError) {
- logger.error('❌ Error while iterating stream:');
- logger.error('Stream error:', streamError);
- throw streamError;
- }
-
- logger.info(`Stream iteration complete. Total messages: ${messageCount}`);
+ logger.info(`Stream iteration complete.`);
logger.info(`Response text length: ${responseText.length} chars`);
// Determine XML content to save
diff --git a/apps/server/src/routes/app-spec/parse-and-create-features.ts b/apps/server/src/routes/app-spec/parse-and-create-features.ts
index 364f64ad..78137a73 100644
--- a/apps/server/src/routes/app-spec/parse-and-create-features.ts
+++ b/apps/server/src/routes/app-spec/parse-and-create-features.ts
@@ -7,6 +7,7 @@ import * as secureFs from '../../lib/secure-fs.js';
import type { EventEmitter } from '../../lib/events.js';
import { createLogger } from '@automaker/utils';
import { getFeaturesDir } from '@automaker/platform';
+import { extractJsonWithArray } from '../../lib/json-extractor.js';
const logger = createLogger('SpecRegeneration');
@@ -22,23 +23,30 @@ export async function parseAndCreateFeatures(
logger.info('========== END CONTENT ==========');
try {
- // Extract JSON from response
- logger.info('Extracting JSON from response...');
- logger.info(`Looking for pattern: /{[\\s\\S]*"features"[\\s\\S]*}/`);
- const jsonMatch = content.match(/\{[\s\S]*"features"[\s\S]*\}/);
- if (!jsonMatch) {
- logger.error('❌ No valid JSON found in response');
+ // Extract JSON from response using shared utility
+ logger.info('Extracting JSON from response using extractJsonWithArray...');
+
+ interface FeaturesResponse {
+ features: Array<{
+ id: string;
+ category?: string;
+ title: string;
+ description: string;
+ priority?: number;
+ complexity?: string;
+ dependencies?: string[];
+ }>;
+ }
+
+ const parsed = extractJsonWithArray(content, 'features', { logger });
+
+ if (!parsed || !parsed.features) {
+ logger.error('❌ No valid JSON with "features" array found in response');
logger.error('Full content received:');
logger.error(content);
throw new Error('No valid JSON found in response');
}
- logger.info(`JSON match found (${jsonMatch[0].length} chars)`);
- logger.info('========== MATCHED JSON ==========');
- logger.info(jsonMatch[0]);
- logger.info('========== END MATCHED JSON ==========');
-
- const parsed = JSON.parse(jsonMatch[0]);
logger.info(`Parsed ${parsed.features?.length || 0} features`);
logger.info('Parsed features:', JSON.stringify(parsed.features, null, 2));
diff --git a/apps/server/src/routes/app-spec/routes/create.ts b/apps/server/src/routes/app-spec/routes/create.ts
index ed6f68f1..31836867 100644
--- a/apps/server/src/routes/app-spec/routes/create.ts
+++ b/apps/server/src/routes/app-spec/routes/create.ts
@@ -47,17 +47,17 @@ export function createCreateHandler(events: EventEmitter) {
return;
}
- const { isRunning } = getSpecRegenerationStatus();
+ const { isRunning } = getSpecRegenerationStatus(projectPath);
if (isRunning) {
- logger.warn('Generation already running, rejecting request');
- res.json({ success: false, error: 'Spec generation already running' });
+ logger.warn('Generation already running for project:', projectPath);
+ res.json({ success: false, error: 'Spec generation already running for this project' });
return;
}
logAuthStatus('Before starting generation');
const abortController = new AbortController();
- setRunningState(true, abortController);
+ setRunningState(projectPath, true, abortController);
logger.info('Starting background generation task...');
// Start generation in background
@@ -80,7 +80,7 @@ export function createCreateHandler(events: EventEmitter) {
})
.finally(() => {
logger.info('Generation task finished (success or error)');
- setRunningState(false, null);
+ setRunningState(projectPath, false, null);
});
logger.info('Returning success response (generation running in background)');
diff --git a/apps/server/src/routes/app-spec/routes/generate-features.ts b/apps/server/src/routes/app-spec/routes/generate-features.ts
index 0c80a9b6..dc627964 100644
--- a/apps/server/src/routes/app-spec/routes/generate-features.ts
+++ b/apps/server/src/routes/app-spec/routes/generate-features.ts
@@ -40,17 +40,17 @@ export function createGenerateFeaturesHandler(
return;
}
- const { isRunning } = getSpecRegenerationStatus();
+ const { isRunning } = getSpecRegenerationStatus(projectPath);
if (isRunning) {
- logger.warn('Generation already running, rejecting request');
- res.json({ success: false, error: 'Generation already running' });
+ logger.warn('Generation already running for project:', projectPath);
+ res.json({ success: false, error: 'Generation already running for this project' });
return;
}
logAuthStatus('Before starting feature generation');
const abortController = new AbortController();
- setRunningState(true, abortController);
+ setRunningState(projectPath, true, abortController);
logger.info('Starting background feature generation task...');
generateFeaturesFromSpec(projectPath, events, abortController, maxFeatures, settingsService)
@@ -63,7 +63,7 @@ export function createGenerateFeaturesHandler(
})
.finally(() => {
logger.info('Feature generation task finished (success or error)');
- setRunningState(false, null);
+ setRunningState(projectPath, false, null);
});
logger.info('Returning success response (generation running in background)');
diff --git a/apps/server/src/routes/app-spec/routes/generate.ts b/apps/server/src/routes/app-spec/routes/generate.ts
index a03dacb7..ffc792ae 100644
--- a/apps/server/src/routes/app-spec/routes/generate.ts
+++ b/apps/server/src/routes/app-spec/routes/generate.ts
@@ -48,17 +48,17 @@ export function createGenerateHandler(events: EventEmitter, settingsService?: Se
return;
}
- const { isRunning } = getSpecRegenerationStatus();
+ const { isRunning } = getSpecRegenerationStatus(projectPath);
if (isRunning) {
- logger.warn('Generation already running, rejecting request');
- res.json({ success: false, error: 'Spec generation already running' });
+ logger.warn('Generation already running for project:', projectPath);
+ res.json({ success: false, error: 'Spec generation already running for this project' });
return;
}
logAuthStatus('Before starting generation');
const abortController = new AbortController();
- setRunningState(true, abortController);
+ setRunningState(projectPath, true, abortController);
logger.info('Starting background generation task...');
generateSpec(
@@ -81,7 +81,7 @@ export function createGenerateHandler(events: EventEmitter, settingsService?: Se
})
.finally(() => {
logger.info('Generation task finished (success or error)');
- setRunningState(false, null);
+ setRunningState(projectPath, false, null);
});
logger.info('Returning success response (generation running in background)');
diff --git a/apps/server/src/routes/app-spec/routes/status.ts b/apps/server/src/routes/app-spec/routes/status.ts
index 542dd4f3..34caea32 100644
--- a/apps/server/src/routes/app-spec/routes/status.ts
+++ b/apps/server/src/routes/app-spec/routes/status.ts
@@ -6,10 +6,11 @@ import type { Request, Response } from 'express';
import { getSpecRegenerationStatus, getErrorMessage } from '../common.js';
export function createStatusHandler() {
- return async (_req: Request, res: Response): Promise => {
+ return async (req: Request, res: Response): Promise => {
try {
- const { isRunning } = getSpecRegenerationStatus();
- res.json({ success: true, isRunning });
+ const projectPath = req.query.projectPath as string | undefined;
+ const { isRunning } = getSpecRegenerationStatus(projectPath);
+ res.json({ success: true, isRunning, projectPath });
} catch (error) {
res.status(500).json({ success: false, error: getErrorMessage(error) });
}
diff --git a/apps/server/src/routes/app-spec/routes/stop.ts b/apps/server/src/routes/app-spec/routes/stop.ts
index 0751147b..2a7b0aab 100644
--- a/apps/server/src/routes/app-spec/routes/stop.ts
+++ b/apps/server/src/routes/app-spec/routes/stop.ts
@@ -6,13 +6,16 @@ import type { Request, Response } from 'express';
import { getSpecRegenerationStatus, setRunningState, getErrorMessage } from '../common.js';
export function createStopHandler() {
- return async (_req: Request, res: Response): Promise => {
+ return async (req: Request, res: Response): Promise => {
try {
- const { currentAbortController } = getSpecRegenerationStatus();
+ const { projectPath } = req.body as { projectPath?: string };
+ const { currentAbortController } = getSpecRegenerationStatus(projectPath);
if (currentAbortController) {
currentAbortController.abort();
}
- setRunningState(false, null);
+ if (projectPath) {
+ setRunningState(projectPath, false, null);
+ }
res.json({ success: true });
} catch (error) {
res.status(500).json({ success: false, error: getErrorMessage(error) });
diff --git a/apps/server/src/routes/auth/index.ts b/apps/server/src/routes/auth/index.ts
index 575000a8..e4ff2c45 100644
--- a/apps/server/src/routes/auth/index.ts
+++ b/apps/server/src/routes/auth/index.ts
@@ -229,12 +229,13 @@ export function createAuthRoutes(): Router {
await invalidateSession(sessionToken);
}
- // Clear the cookie
- res.clearCookie(cookieName, {
- httpOnly: true,
- secure: process.env.NODE_ENV === 'production',
- sameSite: 'strict',
- path: '/',
+ // Clear the cookie by setting it to empty with immediate expiration
+ // Using res.cookie() with maxAge: 0 is more reliable than clearCookie()
+ // in cross-origin development environments
+ res.cookie(cookieName, '', {
+ ...getSessionCookieOptions(),
+ maxAge: 0,
+ expires: new Date(0),
});
res.json({
diff --git a/apps/server/src/routes/auto-mode/index.ts b/apps/server/src/routes/auto-mode/index.ts
index 5f36d691..16dbd197 100644
--- a/apps/server/src/routes/auto-mode/index.ts
+++ b/apps/server/src/routes/auto-mode/index.ts
@@ -17,6 +17,7 @@ import { createAnalyzeProjectHandler } from './routes/analyze-project.js';
import { createFollowUpFeatureHandler } from './routes/follow-up-feature.js';
import { createCommitFeatureHandler } from './routes/commit-feature.js';
import { createApprovePlanHandler } from './routes/approve-plan.js';
+import { createResumeInterruptedHandler } from './routes/resume-interrupted.js';
export function createAutoModeRoutes(autoModeService: AutoModeService): Router {
const router = Router();
@@ -63,6 +64,11 @@ export function createAutoModeRoutes(autoModeService: AutoModeService): Router {
validatePathParams('projectPath'),
createApprovePlanHandler(autoModeService)
);
+ router.post(
+ '/resume-interrupted',
+ validatePathParams('projectPath'),
+ createResumeInterruptedHandler(autoModeService)
+ );
return router;
}
diff --git a/apps/server/src/routes/auto-mode/routes/follow-up-feature.ts b/apps/server/src/routes/auto-mode/routes/follow-up-feature.ts
index 1ed14c39..bd9c480d 100644
--- a/apps/server/src/routes/auto-mode/routes/follow-up-feature.ts
+++ b/apps/server/src/routes/auto-mode/routes/follow-up-feature.ts
@@ -31,7 +31,9 @@ export function createFollowUpFeatureHandler(autoModeService: AutoModeService) {
// Start follow-up in background
// followUpFeature derives workDir from feature.branchName
autoModeService
- .followUpFeature(projectPath, featureId, prompt, imagePaths, useWorktrees ?? true)
+ // Default to false to match run-feature/resume-feature behavior.
+ // Worktrees should only be used when explicitly enabled by the user.
+ .followUpFeature(projectPath, featureId, prompt, imagePaths, useWorktrees ?? false)
.catch((error) => {
logger.error(`[AutoMode] Follow up feature ${featureId} error:`, error);
})
diff --git a/apps/server/src/routes/auto-mode/routes/resume-feature.ts b/apps/server/src/routes/auto-mode/routes/resume-feature.ts
index 198f24ef..0a5eb54d 100644
--- a/apps/server/src/routes/auto-mode/routes/resume-feature.ts
+++ b/apps/server/src/routes/auto-mode/routes/resume-feature.ts
@@ -31,7 +31,7 @@ export function createResumeFeatureHandler(autoModeService: AutoModeService) {
autoModeService
.resumeFeature(projectPath, featureId, useWorktrees ?? false)
.catch((error) => {
- logger.error(`[AutoMode] Resume feature ${featureId} error:`, error);
+ logger.error(`Resume feature ${featureId} error:`, error);
});
res.json({ success: true });
diff --git a/apps/server/src/routes/auto-mode/routes/resume-interrupted.ts b/apps/server/src/routes/auto-mode/routes/resume-interrupted.ts
new file mode 100644
index 00000000..36cda2bd
--- /dev/null
+++ b/apps/server/src/routes/auto-mode/routes/resume-interrupted.ts
@@ -0,0 +1,42 @@
+/**
+ * Resume Interrupted Features Handler
+ *
+ * Checks for features that were interrupted (in pipeline steps or in_progress)
+ * when the server was restarted and resumes them.
+ */
+
+import type { Request, Response } from 'express';
+import { createLogger } from '@automaker/utils';
+import type { AutoModeService } from '../../../services/auto-mode-service.js';
+
+const logger = createLogger('ResumeInterrupted');
+
+interface ResumeInterruptedRequest {
+ projectPath: string;
+}
+
+export function createResumeInterruptedHandler(autoModeService: AutoModeService) {
+ return async (req: Request, res: Response): Promise => {
+ const { projectPath } = req.body as ResumeInterruptedRequest;
+
+ if (!projectPath) {
+ res.status(400).json({ error: 'Project path is required' });
+ return;
+ }
+
+ logger.info(`Checking for interrupted features in ${projectPath}`);
+
+ try {
+ await autoModeService.resumeInterruptedFeatures(projectPath);
+ res.json({
+ success: true,
+ message: 'Resume check completed',
+ });
+ } catch (error) {
+ logger.error('Error resuming interrupted features:', error);
+ res.status(500).json({
+ error: error instanceof Error ? error.message : 'Unknown error',
+ });
+ }
+ };
+}
diff --git a/apps/server/src/routes/auto-mode/routes/run-feature.ts b/apps/server/src/routes/auto-mode/routes/run-feature.ts
index 16ed475a..1bec9368 100644
--- a/apps/server/src/routes/auto-mode/routes/run-feature.ts
+++ b/apps/server/src/routes/auto-mode/routes/run-feature.ts
@@ -31,7 +31,7 @@ export function createRunFeatureHandler(autoModeService: AutoModeService) {
autoModeService
.executeFeature(projectPath, featureId, useWorktrees ?? false, false)
.catch((error) => {
- logger.error(`[AutoMode] Feature ${featureId} error:`, error);
+ logger.error(`Feature ${featureId} error:`, error);
})
.finally(() => {
// Release the starting slot when execution completes (success or error)
diff --git a/apps/server/src/routes/backlog-plan/generate-plan.ts b/apps/server/src/routes/backlog-plan/generate-plan.ts
index f67cac04..d8235e50 100644
--- a/apps/server/src/routes/backlog-plan/generate-plan.ts
+++ b/apps/server/src/routes/backlog-plan/generate-plan.ts
@@ -1,11 +1,22 @@
/**
* Generate backlog plan using Claude AI
+ *
+ * Model is configurable via phaseModels.backlogPlanningModel in settings
+ * (defaults to Sonnet). Can be overridden per-call via model parameter.
*/
import type { EventEmitter } from '../../lib/events.js';
import type { Feature, BacklogPlanResult, BacklogChange, DependencyUpdate } from '@automaker/types';
+import {
+ DEFAULT_PHASE_MODELS,
+ isCursorModel,
+ stripProviderPrefix,
+ type ThinkingLevel,
+} from '@automaker/types';
+import { resolvePhaseModel } from '@automaker/model-resolver';
import { FeatureLoader } from '../../services/feature-loader.js';
import { ProviderFactory } from '../../providers/provider-factory.js';
+import { extractJsonWithArray } from '../../lib/json-extractor.js';
import { logger, setRunningState, getErrorMessage } from './common.js';
import type { SettingsService } from '../../services/settings-service.js';
import { getAutoLoadClaudeMdSetting, getPromptCustomization } from '../../lib/settings-helpers.js';
@@ -39,24 +50,28 @@ function formatFeaturesForPrompt(features: Feature[]): string {
* Parse the AI response into a BacklogPlanResult
*/
function parsePlanResponse(response: string): BacklogPlanResult {
- try {
- // Try to extract JSON from the response
- const jsonMatch = response.match(/```json\n?([\s\S]*?)\n?```/);
- if (jsonMatch) {
- return JSON.parse(jsonMatch[1]);
- }
+ // Use shared JSON extraction utility for robust parsing
+ // extractJsonWithArray validates that 'changes' exists AND is an array
+ const parsed = extractJsonWithArray(response, 'changes', {
+ logger,
+ });
- // Try to parse the whole response as JSON
- return JSON.parse(response);
- } catch {
- // If parsing fails, return an empty result
- logger.warn('[BacklogPlan] Failed to parse AI response as JSON');
- return {
- changes: [],
- summary: 'Failed to parse AI response',
- dependencyUpdates: [],
- };
+ if (parsed) {
+ return parsed;
}
+
+ // If parsing fails, log details and return an empty result
+ logger.warn('[BacklogPlan] Failed to parse AI response as JSON');
+ logger.warn('[BacklogPlan] Response text length:', response.length);
+ logger.warn('[BacklogPlan] Response preview:', response.slice(0, 500));
+ if (response.length === 0) {
+ logger.error('[BacklogPlan] Response text is EMPTY! No content was extracted from stream.');
+ }
+ return {
+ changes: [],
+ summary: 'Failed to parse AI response',
+ dependencyUpdates: [],
+ };
}
/**
@@ -96,9 +111,22 @@ export async function generateBacklogPlan(
content: 'Generating plan with AI...',
});
- // Get the model to use
- const effectiveModel = model || 'sonnet';
+ // Get the model to use from settings or provided override
+ let effectiveModel = model;
+ let thinkingLevel: ThinkingLevel | undefined;
+ if (!effectiveModel) {
+ const settings = await settingsService?.getGlobalSettings();
+ const phaseModelEntry =
+ settings?.phaseModels?.backlogPlanningModel || DEFAULT_PHASE_MODELS.backlogPlanningModel;
+ const resolved = resolvePhaseModel(phaseModelEntry);
+ effectiveModel = resolved.model;
+ thinkingLevel = resolved.thinkingLevel;
+ }
+ logger.info('[BacklogPlan] Using model:', effectiveModel);
+
const provider = ProviderFactory.getProviderForModel(effectiveModel);
+ // Strip provider prefix - providers expect bare model IDs
+ const bareModel = stripProviderPrefix(effectiveModel);
// Get autoLoadClaudeMd setting
const autoLoadClaudeMd = await getAutoLoadClaudeMdSetting(
@@ -107,16 +135,38 @@ export async function generateBacklogPlan(
'[BacklogPlan]'
);
+ // For Cursor models, we need to combine prompts with explicit instructions
+ // because Cursor doesn't support systemPrompt separation like Claude SDK
+ let finalPrompt = userPrompt;
+ let finalSystemPrompt: string | undefined = systemPrompt;
+
+ if (isCursorModel(effectiveModel)) {
+ logger.info('[BacklogPlan] Using Cursor model - adding explicit no-file-write instructions');
+ finalPrompt = `${systemPrompt}
+
+CRITICAL INSTRUCTIONS:
+1. DO NOT write any files. Return the JSON in your response only.
+2. DO NOT use Write, Edit, or any file modification tools.
+3. Respond with ONLY a JSON object - no explanations, no markdown, just raw JSON.
+4. Your entire response should be valid JSON starting with { and ending with }.
+5. No text before or after the JSON object.
+
+${userPrompt}`;
+ finalSystemPrompt = undefined; // System prompt is now embedded in the user prompt
+ }
+
// Execute the query
const stream = provider.executeQuery({
- prompt: userPrompt,
- model: effectiveModel,
+ prompt: finalPrompt,
+ model: bareModel,
cwd: projectPath,
- systemPrompt,
+ systemPrompt: finalSystemPrompt,
maxTurns: 1,
allowedTools: [], // No tools needed for this
abortController,
settingSources: autoLoadClaudeMd ? ['user', 'project'] : undefined,
+ readOnly: true, // Plan generation only generates text, doesn't write files
+ thinkingLevel, // Pass thinking level for extended thinking
});
let responseText = '';
@@ -134,6 +184,16 @@ export async function generateBacklogPlan(
}
}
}
+ } else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
+ // Use result if it's a final accumulated message (from Cursor provider)
+ logger.info('[BacklogPlan] Received result from Cursor, length:', msg.result.length);
+ logger.info('[BacklogPlan] Previous responseText length:', responseText.length);
+ if (msg.result.length > responseText.length) {
+ logger.info('[BacklogPlan] Using Cursor result (longer than accumulated text)');
+ responseText = msg.result;
+ } else {
+ logger.info('[BacklogPlan] Keeping accumulated text (longer than Cursor result)');
+ }
}
}
diff --git a/apps/server/src/routes/backlog-plan/routes/apply.ts b/apps/server/src/routes/backlog-plan/routes/apply.ts
index 71dc3bd9..b6c257a0 100644
--- a/apps/server/src/routes/backlog-plan/routes/apply.ts
+++ b/apps/server/src/routes/backlog-plan/routes/apply.ts
@@ -12,11 +12,22 @@ const featureLoader = new FeatureLoader();
export function createApplyHandler() {
return async (req: Request, res: Response): Promise => {
try {
- const { projectPath, plan } = req.body as {
+ const {
+ projectPath,
+ plan,
+ branchName: rawBranchName,
+ } = req.body as {
projectPath: string;
plan: BacklogPlanResult;
+ branchName?: string;
};
+ // Validate branchName: must be undefined or a non-empty trimmed string
+ const branchName =
+ typeof rawBranchName === 'string' && rawBranchName.trim().length > 0
+ ? rawBranchName.trim()
+ : undefined;
+
if (!projectPath) {
res.status(400).json({ success: false, error: 'projectPath required' });
return;
@@ -82,6 +93,7 @@ export function createApplyHandler() {
dependencies: change.feature.dependencies,
priority: change.feature.priority,
status: 'backlog',
+ branchName,
});
appliedChanges.push(`added:${newFeature.id}`);
diff --git a/apps/server/src/routes/claude/index.ts b/apps/server/src/routes/claude/index.ts
index 326b6d90..20816bbc 100644
--- a/apps/server/src/routes/claude/index.ts
+++ b/apps/server/src/routes/claude/index.ts
@@ -1,5 +1,8 @@
import { Router, Request, Response } from 'express';
import { ClaudeUsageService } from '../../services/claude-usage-service.js';
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('Claude');
export function createClaudeRoutes(service: ClaudeUsageService): Router {
const router = Router();
@@ -10,7 +13,10 @@ export function createClaudeRoutes(service: ClaudeUsageService): Router {
// Check if Claude CLI is available first
const isAvailable = await service.isAvailable();
if (!isAvailable) {
- res.status(503).json({
+ // IMPORTANT: This endpoint is behind Automaker session auth already.
+ // Use a 200 + error payload for Claude CLI issues so the UI doesn't
+ // interpret it as an invalid Automaker session (401/403 triggers logout).
+ res.status(200).json({
error: 'Claude CLI not found',
message: "Please install Claude Code CLI and run 'claude login' to authenticate",
});
@@ -23,17 +29,18 @@ export function createClaudeRoutes(service: ClaudeUsageService): Router {
const message = error instanceof Error ? error.message : 'Unknown error';
if (message.includes('Authentication required') || message.includes('token_expired')) {
- res.status(401).json({
+ // Do NOT use 401/403 here: that status code is reserved for Automaker session auth.
+ res.status(200).json({
error: 'Authentication required',
message: "Please run 'claude login' to authenticate",
});
} else if (message.includes('timed out')) {
- res.status(504).json({
+ res.status(200).json({
error: 'Command timed out',
message: 'The Claude CLI took too long to respond',
});
} else {
- console.error('Error fetching usage:', error);
+ logger.error('Error fetching usage:', error);
res.status(500).json({ error: message });
}
}
diff --git a/apps/server/src/routes/codex/index.ts b/apps/server/src/routes/codex/index.ts
new file mode 100644
index 00000000..005a81bc
--- /dev/null
+++ b/apps/server/src/routes/codex/index.ts
@@ -0,0 +1,90 @@
+import { Router, Request, Response } from 'express';
+import { CodexUsageService } from '../../services/codex-usage-service.js';
+import { CodexModelCacheService } from '../../services/codex-model-cache-service.js';
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('Codex');
+
+export function createCodexRoutes(
+ usageService: CodexUsageService,
+ modelCacheService: CodexModelCacheService
+): Router {
+ const router = Router();
+
+ // Get current usage (attempts to fetch from Codex CLI)
+ router.get('/usage', async (_req: Request, res: Response) => {
+ try {
+ // Check if Codex CLI is available first
+ const isAvailable = await usageService.isAvailable();
+ if (!isAvailable) {
+ // IMPORTANT: This endpoint is behind Automaker session auth already.
+ // Use a 200 + error payload for Codex CLI issues so the UI doesn't
+ // interpret it as an invalid Automaker session (401/403 triggers logout).
+ res.status(200).json({
+ error: 'Codex CLI not found',
+ message: "Please install Codex CLI and run 'codex login' to authenticate",
+ });
+ return;
+ }
+
+ const usage = await usageService.fetchUsageData();
+ res.json(usage);
+ } catch (error) {
+ const message = error instanceof Error ? error.message : 'Unknown error';
+
+ if (message.includes('not authenticated') || message.includes('login')) {
+ // Do NOT use 401/403 here: that status code is reserved for Automaker session auth.
+ res.status(200).json({
+ error: 'Authentication required',
+ message: "Please run 'codex login' to authenticate",
+ });
+ } else if (message.includes('not available') || message.includes('does not provide')) {
+ // This is the expected case - Codex doesn't provide usage stats
+ res.status(200).json({
+ error: 'Usage statistics not available',
+ message: message,
+ });
+ } else if (message.includes('timed out')) {
+ res.status(200).json({
+ error: 'Command timed out',
+ message: 'The Codex CLI took too long to respond',
+ });
+ } else {
+ logger.error('Error fetching usage:', error);
+ res.status(500).json({ error: message });
+ }
+ }
+ });
+
+ // Get available Codex models (cached)
+ router.get('/models', async (req: Request, res: Response) => {
+ try {
+ const forceRefresh = req.query.refresh === 'true';
+ const { models, cachedAt } = await modelCacheService.getModelsWithMetadata(forceRefresh);
+
+ if (models.length === 0) {
+ res.status(503).json({
+ success: false,
+ error: 'Codex CLI not available or not authenticated',
+ message: "Please install Codex CLI and run 'codex login' to authenticate",
+ });
+ return;
+ }
+
+ res.json({
+ success: true,
+ models,
+ cachedAt,
+ });
+ } catch (error) {
+ logger.error('Error fetching models:', error);
+ const message = error instanceof Error ? error.message : 'Unknown error';
+ res.status(500).json({
+ success: false,
+ error: message,
+ });
+ }
+ });
+
+ return router;
+}
diff --git a/apps/server/src/routes/context/routes/describe-file.ts b/apps/server/src/routes/context/routes/describe-file.ts
index 472cbb76..ca8c2b4a 100644
--- a/apps/server/src/routes/context/routes/describe-file.ts
+++ b/apps/server/src/routes/context/routes/describe-file.ts
@@ -1,8 +1,9 @@
/**
* POST /context/describe-file endpoint - Generate description for a text file
*
- * Uses Claude Haiku to analyze a text file and generate a concise description
- * suitable for context file metadata.
+ * Uses AI to analyze a text file and generate a concise description
+ * suitable for context file metadata. Model is configurable via
+ * phaseModels.fileDescriptionModel in settings (defaults to Haiku).
*
* SECURITY: This endpoint validates file paths against ALLOWED_ROOT_DIRECTORY
* and reads file content directly (not via Claude's Read tool) to prevent
@@ -10,11 +11,11 @@
*/
import type { Request, Response } from 'express';
-import { query } from '@anthropic-ai/claude-agent-sdk';
import { createLogger } from '@automaker/utils';
-import { CLAUDE_MODEL_MAP } from '@automaker/types';
+import { DEFAULT_PHASE_MODELS } from '@automaker/types';
import { PathNotAllowedError } from '@automaker/platform';
-import { createCustomOptions } from '../../../lib/sdk-options.js';
+import { resolvePhaseModel } from '@automaker/model-resolver';
+import { simpleQuery } from '../../../providers/simple-query-service.js';
import * as secureFs from '../../../lib/secure-fs.js';
import * as path from 'path';
import type { SettingsService } from '../../../services/settings-service.js';
@@ -46,31 +47,6 @@ interface DescribeFileErrorResponse {
error: string;
}
-/**
- * Extract text content from Claude SDK response messages
- */
-async function extractTextFromStream(
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
- stream: AsyncIterable
-): Promise {
- let responseText = '';
-
- for await (const msg of stream) {
- if (msg.type === 'assistant' && msg.message?.content) {
- const blocks = msg.message.content as Array<{ type: string; text?: string }>;
- for (const block of blocks) {
- if (block.type === 'text' && block.text) {
- responseText += block.text;
- }
- }
- } else if (msg.type === 'result' && msg.subtype === 'success') {
- responseText = msg.result || responseText;
- }
- }
-
- return responseText;
-}
-
/**
* Create the describe-file request handler
*
@@ -94,7 +70,7 @@ export function createDescribeFileHandler(
return;
}
- logger.info(`[DescribeFile] Starting description generation for: ${filePath}`);
+ logger.info(`Starting description generation for: ${filePath}`);
// Resolve the path for logging and cwd derivation
const resolvedPath = secureFs.resolvePath(filePath);
@@ -109,7 +85,7 @@ export function createDescribeFileHandler(
} catch (readError) {
// Path not allowed - return 403 Forbidden
if (readError instanceof PathNotAllowedError) {
- logger.warn(`[DescribeFile] Path not allowed: ${filePath}`);
+ logger.warn(`Path not allowed: ${filePath}`);
const response: DescribeFileErrorResponse = {
success: false,
error: 'File path is not within the allowed directory',
@@ -125,7 +101,7 @@ export function createDescribeFileHandler(
'code' in readError &&
readError.code === 'ENOENT'
) {
- logger.warn(`[DescribeFile] File not found: ${resolvedPath}`);
+ logger.warn(`File not found: ${resolvedPath}`);
const response: DescribeFileErrorResponse = {
success: false,
error: `File not found: ${filePath}`,
@@ -135,7 +111,7 @@ export function createDescribeFileHandler(
}
const errorMessage = readError instanceof Error ? readError.message : 'Unknown error';
- logger.error(`[DescribeFile] Failed to read file: ${errorMessage}`);
+ logger.error(`Failed to read file: ${errorMessage}`);
const response: DescribeFileErrorResponse = {
success: false,
error: `Failed to read file: ${errorMessage}`,
@@ -156,16 +132,14 @@ export function createDescribeFileHandler(
// Build prompt with file content passed as structured data
// The file content is included directly, not via tool invocation
- const instructionText = `Analyze the following file and provide a 1-2 sentence description suitable for use as context in an AI coding assistant. Focus on what the file contains, its purpose, and why an AI agent might want to use this context in the future (e.g., "API documentation for the authentication endpoints", "Configuration file for database connections", "Coding style guidelines for the project").
+ const prompt = `Analyze the following file and provide a 1-2 sentence description suitable for use as context in an AI coding assistant. Focus on what the file contains, its purpose, and why an AI agent might want to use this context in the future (e.g., "API documentation for the authentication endpoints", "Configuration file for database connections", "Coding style guidelines for the project").
Respond with ONLY the description text, no additional formatting, preamble, or explanation.
-File: ${fileName}${truncated ? ' (truncated)' : ''}`;
+File: ${fileName}${truncated ? ' (truncated)' : ''}
- const promptContent = [
- { type: 'text' as const, text: instructionText },
- { type: 'text' as const, text: `\n\n--- FILE CONTENT ---\n${contentToAnalyze}` },
- ];
+--- FILE CONTENT ---
+${contentToAnalyze}`;
// Use the file's directory as the working directory
const cwd = path.dirname(resolvedPath);
@@ -177,30 +151,29 @@ File: ${fileName}${truncated ? ' (truncated)' : ''}`;
'[DescribeFile]'
);
- // Use centralized SDK options with proper cwd validation
- // No tools needed since we're passing file content directly
- const sdkOptions = createCustomOptions({
+ // Get model from phase settings
+ const settings = await settingsService?.getGlobalSettings();
+ logger.info(`Raw phaseModels from settings:`, JSON.stringify(settings?.phaseModels, null, 2));
+ const phaseModelEntry =
+ settings?.phaseModels?.fileDescriptionModel || DEFAULT_PHASE_MODELS.fileDescriptionModel;
+ logger.info(`fileDescriptionModel entry:`, JSON.stringify(phaseModelEntry));
+ const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
+
+ logger.info(`Resolved model: ${model}, thinkingLevel: ${thinkingLevel}`);
+
+ // Use simpleQuery - provider abstraction handles routing to correct provider
+ const result = await simpleQuery({
+ prompt,
+ model,
cwd,
- model: CLAUDE_MODEL_MAP.haiku,
maxTurns: 1,
allowedTools: [],
- autoLoadClaudeMd,
- sandbox: { enabled: true, autoAllowBashIfSandboxed: true },
+ thinkingLevel,
+ readOnly: true, // File description only reads, doesn't write
+ settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
});
- const promptGenerator = (async function* () {
- yield {
- type: 'user' as const,
- session_id: '',
- message: { role: 'user' as const, content: promptContent },
- parent_tool_use_id: null,
- };
- })();
-
- const stream = query({ prompt: promptGenerator, options: sdkOptions });
-
- // Extract the description from the response
- const description = await extractTextFromStream(stream);
+ const description = result.text;
if (!description || description.trim().length === 0) {
logger.warn('Received empty response from Claude');
diff --git a/apps/server/src/routes/context/routes/describe-image.ts b/apps/server/src/routes/context/routes/describe-image.ts
index bce87740..e5e50963 100644
--- a/apps/server/src/routes/context/routes/describe-image.ts
+++ b/apps/server/src/routes/context/routes/describe-image.ts
@@ -1,8 +1,9 @@
/**
* POST /context/describe-image endpoint - Generate description for an image
*
- * Uses Claude Haiku to analyze an image and generate a concise description
- * suitable for context file metadata.
+ * Uses AI to analyze an image and generate a concise description
+ * suitable for context file metadata. Model is configurable via
+ * phaseModels.imageDescriptionModel in settings (defaults to Haiku).
*
* IMPORTANT:
* The agent runner (chat/auto-mode) sends images as multi-part content blocks (base64 image blocks),
@@ -11,10 +12,10 @@
*/
import type { Request, Response } from 'express';
-import { query } from '@anthropic-ai/claude-agent-sdk';
import { createLogger, readImageAsBase64 } from '@automaker/utils';
-import { CLAUDE_MODEL_MAP } from '@automaker/types';
-import { createCustomOptions } from '../../../lib/sdk-options.js';
+import { DEFAULT_PHASE_MODELS, isCursorModel } from '@automaker/types';
+import { resolvePhaseModel } from '@automaker/model-resolver';
+import { simpleQuery } from '../../../providers/simple-query-service.js';
import * as secureFs from '../../../lib/secure-fs.js';
import * as path from 'path';
import type { SettingsService } from '../../../services/settings-service.js';
@@ -175,57 +176,10 @@ function mapDescribeImageError(rawMessage: string | undefined): {
return baseResponse;
}
-/**
- * Extract text content from Claude SDK response messages and log high-signal stream events.
- */
-async function extractTextFromStream(
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
- stream: AsyncIterable,
- requestId: string
-): Promise {
- let responseText = '';
- let messageCount = 0;
-
- logger.info(`[${requestId}] [Stream] Begin reading SDK stream...`);
-
- for await (const msg of stream) {
- messageCount++;
- const msgType = msg?.type;
- const msgSubtype = msg?.subtype;
-
- // Keep this concise but informative. Full error object is logged in catch blocks.
- logger.info(
- `[${requestId}] [Stream] #${messageCount} type=${String(msgType)} subtype=${String(msgSubtype ?? '')}`
- );
-
- if (msgType === 'assistant' && msg.message?.content) {
- const blocks = msg.message.content as Array<{ type: string; text?: string }>;
- logger.info(`[${requestId}] [Stream] assistant blocks=${blocks.length}`);
- for (const block of blocks) {
- if (block.type === 'text' && block.text) {
- responseText += block.text;
- }
- }
- }
-
- if (msgType === 'result' && msgSubtype === 'success') {
- if (typeof msg.result === 'string' && msg.result.length > 0) {
- responseText = msg.result;
- }
- }
- }
-
- logger.info(
- `[${requestId}] [Stream] End of stream. messages=${messageCount} textLength=${responseText.length}`
- );
-
- return responseText;
-}
-
/**
* Create the describe-image request handler
*
- * Uses Claude SDK query with multi-part content blocks to include the image (base64),
+ * Uses the provider abstraction with multi-part content blocks to include the image (base64),
* matching the agent runner behavior.
*
* @param settingsService - Optional settings service for loading autoLoadClaudeMd setting
@@ -306,27 +260,6 @@ export function createDescribeImageHandler(
`[${requestId}] image meta filename=${imageData.filename} mime=${imageData.mimeType} base64Len=${base64Length} estBytes=${estimatedBytes}`
);
- // Build multi-part prompt with image block (no Read tool required)
- const instructionText =
- `Describe this image in 1-2 sentences suitable for use as context in an AI coding assistant. ` +
- `Focus on what the image shows and its purpose (e.g., "UI mockup showing login form with email/password fields", ` +
- `"Architecture diagram of microservices", "Screenshot of error message in terminal").\n\n` +
- `Respond with ONLY the description text, no additional formatting, preamble, or explanation.`;
-
- const promptContent = [
- { type: 'text' as const, text: instructionText },
- {
- type: 'image' as const,
- source: {
- type: 'base64' as const,
- media_type: imageData.mimeType,
- data: imageData.base64,
- },
- },
- ];
-
- logger.info(`[${requestId}] Built multi-part prompt blocks=${promptContent.length}`);
-
const cwd = path.dirname(actualPath);
logger.info(`[${requestId}] Using cwd=${cwd}`);
@@ -337,43 +270,67 @@ export function createDescribeImageHandler(
'[DescribeImage]'
);
- // Use the same centralized option builder used across the server (validates cwd)
- const sdkOptions = createCustomOptions({
+ // Get model from phase settings
+ const settings = await settingsService?.getGlobalSettings();
+ const phaseModelEntry =
+ settings?.phaseModels?.imageDescriptionModel || DEFAULT_PHASE_MODELS.imageDescriptionModel;
+ const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
+
+ logger.info(`[${requestId}] Using model: ${model}`);
+
+ // Build the instruction text
+ const instructionText =
+ `Describe this image in 1-2 sentences suitable for use as context in an AI coding assistant. ` +
+ `Focus on what the image shows and its purpose (e.g., "UI mockup showing login form with email/password fields", ` +
+ `"Architecture diagram of microservices", "Screenshot of error message in terminal").\n\n` +
+ `Respond with ONLY the description text, no additional formatting, preamble, or explanation.`;
+
+ // Build prompt based on provider capability
+ // Some providers (like Cursor) may not support image content blocks
+ let prompt: string | Array<{ type: string; text?: string; source?: object }>;
+
+ if (isCursorModel(model)) {
+ // Cursor may not support base64 image blocks directly
+ // Use text prompt with image path reference
+ logger.info(`[${requestId}] Using text prompt for Cursor model`);
+ prompt = `${instructionText}\n\nImage file: ${actualPath}\nMIME type: ${imageData.mimeType}`;
+ } else {
+ // Claude and other vision-capable models support multi-part prompts with images
+ logger.info(`[${requestId}] Using multi-part prompt with image block`);
+ prompt = [
+ { type: 'text', text: instructionText },
+ {
+ type: 'image',
+ source: {
+ type: 'base64',
+ media_type: imageData.mimeType,
+ data: imageData.base64,
+ },
+ },
+ ];
+ }
+
+ logger.info(`[${requestId}] Calling simpleQuery...`);
+ const queryStart = Date.now();
+
+ // Use simpleQuery - provider abstraction handles routing
+ const result = await simpleQuery({
+ prompt,
+ model,
cwd,
- model: CLAUDE_MODEL_MAP.haiku,
maxTurns: 1,
- allowedTools: [],
- autoLoadClaudeMd,
- sandbox: { enabled: true, autoAllowBashIfSandboxed: true },
+ allowedTools: isCursorModel(model) ? ['Read'] : [], // Allow Read for Cursor to read image if needed
+ thinkingLevel,
+ readOnly: true, // Image description only reads, doesn't write
+ settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
});
- logger.info(
- `[${requestId}] SDK options model=${sdkOptions.model} maxTurns=${sdkOptions.maxTurns} allowedTools=${JSON.stringify(
- sdkOptions.allowedTools
- )} sandbox=${JSON.stringify(sdkOptions.sandbox)}`
- );
+ logger.info(`[${requestId}] simpleQuery completed in ${Date.now() - queryStart}ms`);
- const promptGenerator = (async function* () {
- yield {
- type: 'user' as const,
- session_id: '',
- message: { role: 'user' as const, content: promptContent },
- parent_tool_use_id: null,
- };
- })();
-
- logger.info(`[${requestId}] Calling query()...`);
- const queryStart = Date.now();
- const stream = query({ prompt: promptGenerator, options: sdkOptions });
- logger.info(`[${requestId}] query() returned stream in ${Date.now() - queryStart}ms`);
-
- // Extract the description from the response
- const extractStart = Date.now();
- const description = await extractTextFromStream(stream, requestId);
- logger.info(`[${requestId}] extractMs=${Date.now() - extractStart}`);
+ const description = result.text;
if (!description || description.trim().length === 0) {
- logger.warn(`[${requestId}] Received empty response from Claude`);
+ logger.warn(`[${requestId}] Received empty response from AI`);
const response: DescribeImageErrorResponse = {
success: false,
error: 'Failed to generate description - empty response',
diff --git a/apps/server/src/routes/enhance-prompt/routes/enhance.ts b/apps/server/src/routes/enhance-prompt/routes/enhance.ts
index ad6e9602..5861b418 100644
--- a/apps/server/src/routes/enhance-prompt/routes/enhance.ts
+++ b/apps/server/src/routes/enhance-prompt/routes/enhance.ts
@@ -1,15 +1,16 @@
/**
* POST /enhance-prompt endpoint - Enhance user input text
*
- * Uses Claude AI to enhance text based on the specified enhancement mode.
- * Supports modes: improve, technical, simplify, acceptance
+ * Uses the provider abstraction to enhance text based on the specified
+ * enhancement mode. Works with any configured provider (Claude, Cursor, etc.).
+ * Supports modes: improve, technical, simplify, acceptance, ux-reviewer
*/
import type { Request, Response } from 'express';
-import { query } from '@anthropic-ai/claude-agent-sdk';
import { createLogger } from '@automaker/utils';
import { resolveModelString } from '@automaker/model-resolver';
-import { CLAUDE_MODEL_MAP } from '@automaker/types';
+import { CLAUDE_MODEL_MAP, type ThinkingLevel } from '@automaker/types';
+import { simpleQuery } from '../../../providers/simple-query-service.js';
import type { SettingsService } from '../../../services/settings-service.js';
import { getPromptCustomization } from '../../../lib/settings-helpers.js';
import {
@@ -30,6 +31,8 @@ interface EnhanceRequestBody {
enhancementMode: string;
/** Optional model override */
model?: string;
+ /** Optional thinking level for Claude models */
+ thinkingLevel?: ThinkingLevel;
}
/**
@@ -48,39 +51,6 @@ interface EnhanceErrorResponse {
error: string;
}
-/**
- * Extract text content from Claude SDK response messages
- *
- * @param stream - The async iterable from the query function
- * @returns The extracted text content
- */
-async function extractTextFromStream(
- stream: AsyncIterable<{
- type: string;
- subtype?: string;
- result?: string;
- message?: {
- content?: Array<{ type: string; text?: string }>;
- };
- }>
-): Promise {
- let responseText = '';
-
- for await (const msg of stream) {
- if (msg.type === 'assistant' && msg.message?.content) {
- for (const block of msg.message.content) {
- if (block.type === 'text' && block.text) {
- responseText += block.text;
- }
- }
- } else if (msg.type === 'result' && msg.subtype === 'success') {
- responseText = msg.result || responseText;
- }
- }
-
- return responseText;
-}
-
/**
* Create the enhance request handler
*
@@ -92,7 +62,8 @@ export function createEnhanceHandler(
): (req: Request, res: Response) => Promise {
return async (req: Request, res: Response): Promise => {
try {
- const { originalText, enhancementMode, model } = req.body as EnhanceRequestBody;
+ const { originalText, enhancementMode, model, thinkingLevel } =
+ req.body as EnhanceRequestBody;
// Validate required fields
if (!originalText || typeof originalText !== 'string') {
@@ -141,13 +112,13 @@ export function createEnhanceHandler(
technical: prompts.enhancement.technicalSystemPrompt,
simplify: prompts.enhancement.simplifySystemPrompt,
acceptance: prompts.enhancement.acceptanceSystemPrompt,
+ 'ux-reviewer': prompts.enhancement.uxReviewerSystemPrompt,
};
const systemPrompt = systemPromptMap[validMode];
logger.debug(`Using ${validMode} system prompt (length: ${systemPrompt.length} chars)`);
// Build the user prompt with few-shot examples
- // This helps the model understand this is text transformation, not a coding task
const userPrompt = buildUserPrompt(validMode, trimmedText, true);
// Resolve the model - use the passed model, default to sonnet for quality
@@ -155,24 +126,23 @@ export function createEnhanceHandler(
logger.debug(`Using model: ${resolvedModel}`);
- // Call Claude SDK with minimal configuration for text transformation
- // Key: no tools, just text completion
- const stream = query({
- prompt: userPrompt,
- options: {
- model: resolvedModel,
- systemPrompt,
- maxTurns: 1,
- allowedTools: [],
- permissionMode: 'acceptEdits',
- },
+ // Use simpleQuery - provider abstraction handles routing to correct provider
+ // The system prompt is combined with user prompt since some providers
+ // don't have a separate system prompt concept
+ const result = await simpleQuery({
+ prompt: `${systemPrompt}\n\n${userPrompt}`,
+ model: resolvedModel,
+ cwd: process.cwd(), // Enhancement doesn't need a specific working directory
+ maxTurns: 1,
+ allowedTools: [],
+ thinkingLevel,
+ readOnly: true, // Prompt enhancement only generates text, doesn't write files
});
- // Extract the enhanced text from the response
- const enhancedText = await extractTextFromStream(stream);
+ const enhancedText = result.text;
if (!enhancedText || enhancedText.trim().length === 0) {
- logger.warn('Received empty response from Claude');
+ logger.warn('Received empty response from AI');
const response: EnhanceErrorResponse = {
success: false,
error: 'Failed to generate enhanced text - empty response',
diff --git a/apps/server/src/routes/features/index.ts b/apps/server/src/routes/features/index.ts
index 5863c4d4..e0435f35 100644
--- a/apps/server/src/routes/features/index.ts
+++ b/apps/server/src/routes/features/index.ts
@@ -9,8 +9,10 @@ import { createListHandler } from './routes/list.js';
import { createGetHandler } from './routes/get.js';
import { createCreateHandler } from './routes/create.js';
import { createUpdateHandler } from './routes/update.js';
+import { createBulkUpdateHandler } from './routes/bulk-update.js';
+import { createBulkDeleteHandler } from './routes/bulk-delete.js';
import { createDeleteHandler } from './routes/delete.js';
-import { createAgentOutputHandler } from './routes/agent-output.js';
+import { createAgentOutputHandler, createRawOutputHandler } from './routes/agent-output.js';
import { createGenerateTitleHandler } from './routes/generate-title.js';
export function createFeaturesRoutes(featureLoader: FeatureLoader): Router {
@@ -20,8 +22,19 @@ export function createFeaturesRoutes(featureLoader: FeatureLoader): Router {
router.post('/get', validatePathParams('projectPath'), createGetHandler(featureLoader));
router.post('/create', validatePathParams('projectPath'), createCreateHandler(featureLoader));
router.post('/update', validatePathParams('projectPath'), createUpdateHandler(featureLoader));
+ router.post(
+ '/bulk-update',
+ validatePathParams('projectPath'),
+ createBulkUpdateHandler(featureLoader)
+ );
+ router.post(
+ '/bulk-delete',
+ validatePathParams('projectPath'),
+ createBulkDeleteHandler(featureLoader)
+ );
router.post('/delete', validatePathParams('projectPath'), createDeleteHandler(featureLoader));
router.post('/agent-output', createAgentOutputHandler(featureLoader));
+ router.post('/raw-output', createRawOutputHandler(featureLoader));
router.post('/generate-title', createGenerateTitleHandler());
return router;
diff --git a/apps/server/src/routes/features/routes/agent-output.ts b/apps/server/src/routes/features/routes/agent-output.ts
index f928644a..d88e6d6f 100644
--- a/apps/server/src/routes/features/routes/agent-output.ts
+++ b/apps/server/src/routes/features/routes/agent-output.ts
@@ -1,5 +1,6 @@
/**
* POST /agent-output endpoint - Get agent output for a feature
+ * POST /raw-output endpoint - Get raw JSONL output for debugging
*/
import type { Request, Response } from 'express';
@@ -30,3 +31,31 @@ export function createAgentOutputHandler(featureLoader: FeatureLoader) {
}
};
}
+
+/**
+ * Handler for getting raw JSONL output for debugging
+ */
+export function createRawOutputHandler(featureLoader: FeatureLoader) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath, featureId } = req.body as {
+ projectPath: string;
+ featureId: string;
+ };
+
+ if (!projectPath || !featureId) {
+ res.status(400).json({
+ success: false,
+ error: 'projectPath and featureId are required',
+ });
+ return;
+ }
+
+ const content = await featureLoader.getRawOutput(projectPath, featureId);
+ res.json({ success: true, content });
+ } catch (error) {
+ logError(error, 'Get raw output failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
diff --git a/apps/server/src/routes/features/routes/bulk-delete.ts b/apps/server/src/routes/features/routes/bulk-delete.ts
new file mode 100644
index 00000000..555515ae
--- /dev/null
+++ b/apps/server/src/routes/features/routes/bulk-delete.ts
@@ -0,0 +1,61 @@
+/**
+ * POST /bulk-delete endpoint - Delete multiple features at once
+ */
+
+import type { Request, Response } from 'express';
+import { FeatureLoader } from '../../../services/feature-loader.js';
+import { getErrorMessage, logError } from '../common.js';
+
+interface BulkDeleteRequest {
+ projectPath: string;
+ featureIds: string[];
+}
+
+interface BulkDeleteResult {
+ featureId: string;
+ success: boolean;
+ error?: string;
+}
+
+export function createBulkDeleteHandler(featureLoader: FeatureLoader) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath, featureIds } = req.body as BulkDeleteRequest;
+
+ if (!projectPath || !featureIds || !Array.isArray(featureIds) || featureIds.length === 0) {
+ res.status(400).json({
+ success: false,
+ error: 'projectPath and featureIds (non-empty array) are required',
+ });
+ return;
+ }
+
+ const results = await Promise.all(
+ featureIds.map(async (featureId) => {
+ const success = await featureLoader.delete(projectPath, featureId);
+ if (success) {
+ return { featureId, success: true };
+ }
+ return {
+ featureId,
+ success: false,
+ error: 'Deletion failed. Check server logs for details.',
+ };
+ })
+ );
+
+ const successCount = results.reduce((count, r) => count + (r.success ? 1 : 0), 0);
+ const failureCount = results.length - successCount;
+
+ res.json({
+ success: failureCount === 0,
+ deletedCount: successCount,
+ failedCount: failureCount,
+ results,
+ });
+ } catch (error) {
+ logError(error, 'Bulk delete features failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
diff --git a/apps/server/src/routes/features/routes/bulk-update.ts b/apps/server/src/routes/features/routes/bulk-update.ts
new file mode 100644
index 00000000..a1c97e72
--- /dev/null
+++ b/apps/server/src/routes/features/routes/bulk-update.ts
@@ -0,0 +1,75 @@
+/**
+ * POST /bulk-update endpoint - Update multiple features at once
+ */
+
+import type { Request, Response } from 'express';
+import { FeatureLoader } from '../../../services/feature-loader.js';
+import type { Feature } from '@automaker/types';
+import { getErrorMessage, logError } from '../common.js';
+
+interface BulkUpdateRequest {
+ projectPath: string;
+ featureIds: string[];
+ updates: Partial;
+}
+
+interface BulkUpdateResult {
+ featureId: string;
+ success: boolean;
+ error?: string;
+}
+
+export function createBulkUpdateHandler(featureLoader: FeatureLoader) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath, featureIds, updates } = req.body as BulkUpdateRequest;
+
+ if (!projectPath || !featureIds || !Array.isArray(featureIds) || featureIds.length === 0) {
+ res.status(400).json({
+ success: false,
+ error: 'projectPath and featureIds (non-empty array) are required',
+ });
+ return;
+ }
+
+ if (!updates || Object.keys(updates).length === 0) {
+ res.status(400).json({
+ success: false,
+ error: 'updates object with at least one field is required',
+ });
+ return;
+ }
+
+ const results: BulkUpdateResult[] = [];
+ const updatedFeatures: Feature[] = [];
+
+ for (const featureId of featureIds) {
+ try {
+ const updated = await featureLoader.update(projectPath, featureId, updates);
+ results.push({ featureId, success: true });
+ updatedFeatures.push(updated);
+ } catch (error) {
+ results.push({
+ featureId,
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ }
+
+ const successCount = results.filter((r) => r.success).length;
+ const failureCount = results.filter((r) => !r.success).length;
+
+ res.json({
+ success: failureCount === 0,
+ updatedCount: successCount,
+ failedCount: failureCount,
+ results,
+ features: updatedFeatures,
+ });
+ } catch (error) {
+ logError(error, 'Bulk update features failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
diff --git a/apps/server/src/routes/features/routes/generate-title.ts b/apps/server/src/routes/features/routes/generate-title.ts
index 1225a825..a838e5aa 100644
--- a/apps/server/src/routes/features/routes/generate-title.ts
+++ b/apps/server/src/routes/features/routes/generate-title.ts
@@ -1,13 +1,14 @@
/**
* POST /features/generate-title endpoint - Generate a concise title from description
*
- * Uses Claude Haiku to generate a short, descriptive title from feature description.
+ * Uses the provider abstraction to generate a short, descriptive title
+ * from a feature description. Works with any configured provider (Claude, Cursor, etc.).
*/
import type { Request, Response } from 'express';
-import { query } from '@anthropic-ai/claude-agent-sdk';
import { createLogger } from '@automaker/utils';
import { CLAUDE_MODEL_MAP } from '@automaker/model-resolver';
+import { simpleQuery } from '../../../providers/simple-query-service.js';
const logger = createLogger('GenerateTitle');
@@ -34,33 +35,6 @@ Rules:
- No quotes, periods, or extra formatting
- Capture the essence of the feature in a scannable way`;
-async function extractTextFromStream(
- stream: AsyncIterable<{
- type: string;
- subtype?: string;
- result?: string;
- message?: {
- content?: Array<{ type: string; text?: string }>;
- };
- }>
-): Promise {
- let responseText = '';
-
- for await (const msg of stream) {
- if (msg.type === 'assistant' && msg.message?.content) {
- for (const block of msg.message.content) {
- if (block.type === 'text' && block.text) {
- responseText += block.text;
- }
- }
- } else if (msg.type === 'result' && msg.subtype === 'success') {
- responseText = msg.result || responseText;
- }
- }
-
- return responseText;
-}
-
export function createGenerateTitleHandler(): (req: Request, res: Response) => Promise {
return async (req: Request, res: Response): Promise => {
try {
@@ -89,21 +63,19 @@ export function createGenerateTitleHandler(): (req: Request, res: Response) => P
const userPrompt = `Generate a concise title for this feature:\n\n${trimmedDescription}`;
- const stream = query({
- prompt: userPrompt,
- options: {
- model: CLAUDE_MODEL_MAP.haiku,
- systemPrompt: SYSTEM_PROMPT,
- maxTurns: 1,
- allowedTools: [],
- permissionMode: 'acceptEdits',
- },
+ // Use simpleQuery - provider abstraction handles all the streaming/extraction
+ const result = await simpleQuery({
+ prompt: `${SYSTEM_PROMPT}\n\n${userPrompt}`,
+ model: CLAUDE_MODEL_MAP.haiku,
+ cwd: process.cwd(),
+ maxTurns: 1,
+ allowedTools: [],
});
- const title = await extractTextFromStream(stream);
+ const title = result.text;
if (!title || title.trim().length === 0) {
- logger.warn('Received empty response from Claude');
+ logger.warn('Received empty response from AI');
const response: GenerateTitleErrorResponse = {
success: false,
error: 'Failed to generate title - empty response',
diff --git a/apps/server/src/routes/features/routes/update.ts b/apps/server/src/routes/features/routes/update.ts
index 830fb21a..1a89cda3 100644
--- a/apps/server/src/routes/features/routes/update.ts
+++ b/apps/server/src/routes/features/routes/update.ts
@@ -10,10 +10,20 @@ import { getErrorMessage, logError } from '../common.js';
export function createUpdateHandler(featureLoader: FeatureLoader) {
return async (req: Request, res: Response): Promise => {
try {
- const { projectPath, featureId, updates } = req.body as {
+ const {
+ projectPath,
+ featureId,
+ updates,
+ descriptionHistorySource,
+ enhancementMode,
+ preEnhancementDescription,
+ } = req.body as {
projectPath: string;
featureId: string;
updates: Partial;
+ descriptionHistorySource?: 'enhance' | 'edit';
+ enhancementMode?: 'improve' | 'technical' | 'simplify' | 'acceptance' | 'ux-reviewer';
+ preEnhancementDescription?: string;
};
if (!projectPath || !featureId || !updates) {
@@ -24,7 +34,14 @@ export function createUpdateHandler(featureLoader: FeatureLoader) {
return;
}
- const updated = await featureLoader.update(projectPath, featureId, updates);
+ const updated = await featureLoader.update(
+ projectPath,
+ featureId,
+ updates,
+ descriptionHistorySource,
+ enhancementMode,
+ preEnhancementDescription
+ );
res.json({ success: true, feature: updated });
} catch (error) {
logError(error, 'Update feature failed');
diff --git a/apps/server/src/routes/github/routes/common.ts b/apps/server/src/routes/github/routes/common.ts
index 790f92c3..211be715 100644
--- a/apps/server/src/routes/github/routes/common.ts
+++ b/apps/server/src/routes/github/routes/common.ts
@@ -4,6 +4,9 @@
import { exec } from 'child_process';
import { promisify } from 'util';
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('GitHub');
export const execAsync = promisify(exec);
@@ -31,5 +34,5 @@ export function getErrorMessage(error: unknown): string {
}
export function logError(error: unknown, context: string): void {
- console.error(`[GitHub] ${context}:`, error);
+ logger.error(`${context}:`, error);
}
diff --git a/apps/server/src/routes/github/routes/list-issues.ts b/apps/server/src/routes/github/routes/list-issues.ts
index c4ed58f1..9c0f8933 100644
--- a/apps/server/src/routes/github/routes/list-issues.ts
+++ b/apps/server/src/routes/github/routes/list-issues.ts
@@ -6,6 +6,9 @@ import { spawn } from 'child_process';
import type { Request, Response } from 'express';
import { execAsync, execEnv, getErrorMessage, logError } from './common.js';
import { checkGitHubRemote } from './check-github-remote.js';
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('ListIssues');
export interface GitHubLabel {
name: string;
@@ -179,7 +182,7 @@ async function fetchLinkedPRs(
}
} catch (error) {
// If GraphQL fails, continue without linked PRs
- console.warn(
+ logger.warn(
'Failed to fetch linked PRs via GraphQL:',
error instanceof Error ? error.message : error
);
diff --git a/apps/server/src/routes/github/routes/validate-issue.ts b/apps/server/src/routes/github/routes/validate-issue.ts
index 99673593..aaa83c9a 100644
--- a/apps/server/src/routes/github/routes/validate-issue.ts
+++ b/apps/server/src/routes/github/routes/validate-issue.ts
@@ -1,22 +1,27 @@
/**
- * POST /validate-issue endpoint - Validate a GitHub issue using Claude SDK (async)
+ * POST /validate-issue endpoint - Validate a GitHub issue using provider abstraction (async)
*
* Scans the codebase to determine if an issue is valid, invalid, or needs clarification.
* Runs asynchronously and emits events for progress and completion.
+ * Supports both Claude models and Cursor models.
*/
import type { Request, Response } from 'express';
-import { query } from '@anthropic-ai/claude-agent-sdk';
import type { EventEmitter } from '../../../lib/events.js';
import type {
IssueValidationResult,
IssueValidationEvent,
- AgentModel,
+ ModelAlias,
+ CursorModelId,
GitHubComment,
LinkedPRInfo,
+ ThinkingLevel,
} from '@automaker/types';
-import { createSuggestionsOptions } from '../../../lib/sdk-options.js';
+import { isCursorModel, DEFAULT_PHASE_MODELS } from '@automaker/types';
+import { resolvePhaseModel } from '@automaker/model-resolver';
+import { extractJson } from '../../../lib/json-extractor.js';
import { writeValidation } from '../../../lib/validation-storage.js';
+import { streamingQuery } from '../../../providers/simple-query-service.js';
import {
issueValidationSchema,
ISSUE_VALIDATION_SYSTEM_PROMPT,
@@ -34,8 +39,8 @@ import {
import type { SettingsService } from '../../../services/settings-service.js';
import { getAutoLoadClaudeMdSetting } from '../../../lib/settings-helpers.js';
-/** Valid model values for validation */
-const VALID_MODELS: readonly AgentModel[] = ['opus', 'sonnet', 'haiku'] as const;
+/** Valid Claude model values for validation */
+const VALID_CLAUDE_MODELS: readonly ModelAlias[] = ['opus', 'sonnet', 'haiku'] as const;
/**
* Request body for issue validation
@@ -46,8 +51,10 @@ interface ValidateIssueRequestBody {
issueTitle: string;
issueBody: string;
issueLabels?: string[];
- /** Model to use for validation (opus, sonnet, haiku) */
- model?: AgentModel;
+ /** Model to use for validation (opus, sonnet, haiku, or cursor model IDs) */
+ model?: ModelAlias | CursorModelId;
+ /** Thinking level for Claude models (ignored for Cursor models) */
+ thinkingLevel?: ThinkingLevel;
/** Comments to include in validation analysis */
comments?: GitHubComment[];
/** Linked pull requests for this issue */
@@ -59,6 +66,7 @@ interface ValidateIssueRequestBody {
*
* Emits events for start, progress, complete, and error.
* Stores result on completion.
+ * Supports both Claude models (with structured output) and Cursor models (with JSON parsing).
*/
async function runValidation(
projectPath: string,
@@ -66,12 +74,13 @@ async function runValidation(
issueTitle: string,
issueBody: string,
issueLabels: string[] | undefined,
- model: AgentModel,
+ model: ModelAlias | CursorModelId,
events: EventEmitter,
abortController: AbortController,
settingsService?: SettingsService,
comments?: ValidationComment[],
- linkedPRs?: ValidationLinkedPR[]
+ linkedPRs?: ValidationLinkedPR[],
+ thinkingLevel?: ThinkingLevel
): Promise {
// Emit start event
const startEvent: IssueValidationEvent = {
@@ -91,7 +100,7 @@ async function runValidation(
try {
// Build the prompt (include comments and linked PRs if provided)
- const prompt = buildValidationPrompt(
+ const basePrompt = buildValidationPrompt(
issueNumber,
issueTitle,
issueBody,
@@ -100,6 +109,28 @@ async function runValidation(
linkedPRs
);
+ let responseText = '';
+
+ // Determine if we should use structured output (Claude supports it, Cursor doesn't)
+ const useStructuredOutput = !isCursorModel(model);
+
+ // Build the final prompt - for Cursor, include system prompt and JSON schema instructions
+ let finalPrompt = basePrompt;
+ if (!useStructuredOutput) {
+ finalPrompt = `${ISSUE_VALIDATION_SYSTEM_PROMPT}
+
+CRITICAL INSTRUCTIONS:
+1. DO NOT write any files. Return the JSON in your response only.
+2. Respond with ONLY a JSON object - no explanations, no markdown, just raw JSON.
+3. The JSON must match this exact schema:
+
+${JSON.stringify(issueValidationSchema, null, 2)}
+
+Your entire response should be valid JSON starting with { and ending with }. No text before or after.
+
+${basePrompt}`;
+ }
+
// Load autoLoadClaudeMd setting
const autoLoadClaudeMd = await getAutoLoadClaudeMdSetting(
projectPath,
@@ -107,64 +138,65 @@ async function runValidation(
'[ValidateIssue]'
);
- // Create SDK options with structured output and abort controller
- const options = createSuggestionsOptions({
+ // Use thinkingLevel from request if provided, otherwise fall back to settings
+ let effectiveThinkingLevel: ThinkingLevel | undefined = thinkingLevel;
+ if (!effectiveThinkingLevel) {
+ const settings = await settingsService?.getGlobalSettings();
+ const phaseModelEntry =
+ settings?.phaseModels?.validationModel || DEFAULT_PHASE_MODELS.validationModel;
+ const resolved = resolvePhaseModel(phaseModelEntry);
+ effectiveThinkingLevel = resolved.thinkingLevel;
+ }
+
+ logger.info(`Using model: ${model}`);
+
+ // Use streamingQuery with event callbacks
+ const result = await streamingQuery({
+ prompt: finalPrompt,
+ model: model as string,
cwd: projectPath,
- model,
- systemPrompt: ISSUE_VALIDATION_SYSTEM_PROMPT,
+ systemPrompt: useStructuredOutput ? ISSUE_VALIDATION_SYSTEM_PROMPT : undefined,
abortController,
- autoLoadClaudeMd,
- outputFormat: {
- type: 'json_schema',
- schema: issueValidationSchema as Record,
+ thinkingLevel: effectiveThinkingLevel,
+ readOnly: true, // Issue validation only reads code, doesn't write
+ settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
+ outputFormat: useStructuredOutput
+ ? {
+ type: 'json_schema',
+ schema: issueValidationSchema as Record,
+ }
+ : undefined,
+ onText: (text) => {
+ responseText += text;
+ // Emit progress event
+ const progressEvent: IssueValidationEvent = {
+ type: 'issue_validation_progress',
+ issueNumber,
+ content: text,
+ projectPath,
+ };
+ events.emit('issue-validation:event', progressEvent);
},
});
- // Execute the query
- const stream = query({ prompt, options });
- let validationResult: IssueValidationResult | null = null;
-
- for await (const msg of stream) {
- // Emit progress events for assistant text
- if (msg.type === 'assistant' && msg.message?.content) {
- for (const block of msg.message.content) {
- if (block.type === 'text') {
- const progressEvent: IssueValidationEvent = {
- type: 'issue_validation_progress',
- issueNumber,
- content: block.text,
- projectPath,
- };
- events.emit('issue-validation:event', progressEvent);
- }
- }
- }
-
- // Extract structured output on success
- if (msg.type === 'result' && msg.subtype === 'success') {
- const resultMsg = msg as { structured_output?: IssueValidationResult };
- if (resultMsg.structured_output) {
- validationResult = resultMsg.structured_output;
- }
- }
-
- // Handle errors
- if (msg.type === 'result') {
- const resultMsg = msg as { subtype?: string };
- if (resultMsg.subtype === 'error_max_structured_output_retries') {
- logger.error('Failed to produce valid structured output after retries');
- throw new Error('Could not produce valid validation output');
- }
- }
- }
-
// Clear timeout
clearTimeout(timeoutId);
- // Require structured output
+ // Get validation result from structured output or parse from text
+ let validationResult: IssueValidationResult | null = null;
+
+ if (result.structured_output) {
+ validationResult = result.structured_output as unknown as IssueValidationResult;
+ logger.debug('Received structured output:', validationResult);
+ } else if (responseText) {
+ // Parse JSON from response text
+ validationResult = extractJson(responseText, { logger });
+ }
+
+ // Require validation result
if (!validationResult) {
- logger.error('No structured output received from Claude SDK');
- throw new Error('Validation failed: no structured output received');
+ logger.error('No validation result received from AI provider');
+ throw new Error('Validation failed: no valid result received');
}
logger.info(`Issue #${issueNumber} validation complete: ${validationResult.verdict}`);
@@ -210,7 +242,7 @@ async function runValidation(
/**
* Creates the handler for validating GitHub issues against the codebase.
*
- * Uses Claude SDK with:
+ * Uses the provider abstraction with:
* - Read-only tools (Read, Glob, Grep) for codebase analysis
* - JSON schema structured output for reliable parsing
* - System prompt guiding the validation process
@@ -229,6 +261,7 @@ export function createValidateIssueHandler(
issueBody,
issueLabels,
model = 'opus',
+ thinkingLevel,
comments: rawComments,
linkedPRs: rawLinkedPRs,
} = req.body as ValidateIssueRequestBody;
@@ -276,11 +309,14 @@ export function createValidateIssueHandler(
return;
}
- // Validate model parameter at runtime
- if (!VALID_MODELS.includes(model)) {
+ // Validate model parameter at runtime - accept Claude models or Cursor models
+ const isValidClaudeModel = VALID_CLAUDE_MODELS.includes(model as ModelAlias);
+ const isValidCursorModel = isCursorModel(model);
+
+ if (!isValidClaudeModel && !isValidCursorModel) {
res.status(400).json({
success: false,
- error: `Invalid model. Must be one of: ${VALID_MODELS.join(', ')}`,
+ error: `Invalid model. Must be one of: ${VALID_CLAUDE_MODELS.join(', ')}, or a Cursor model ID`,
});
return;
}
@@ -310,7 +346,8 @@ export function createValidateIssueHandler(
abortController,
settingsService,
validationComments,
- validationLinkedPRs
+ validationLinkedPRs,
+ thinkingLevel
)
.catch(() => {
// Error is already handled inside runValidation (event emitted)
diff --git a/apps/server/src/routes/ideation/common.ts b/apps/server/src/routes/ideation/common.ts
new file mode 100644
index 00000000..2cca3654
--- /dev/null
+++ b/apps/server/src/routes/ideation/common.ts
@@ -0,0 +1,12 @@
+/**
+ * Common utilities for ideation routes
+ */
+
+import { createLogger } from '@automaker/utils';
+import { getErrorMessage as getErrorMessageShared, createLogError } from '../common.js';
+
+const logger = createLogger('Ideation');
+
+// Re-export shared utilities
+export { getErrorMessageShared as getErrorMessage };
+export const logError = createLogError(logger);
diff --git a/apps/server/src/routes/ideation/index.ts b/apps/server/src/routes/ideation/index.ts
new file mode 100644
index 00000000..95fe128b
--- /dev/null
+++ b/apps/server/src/routes/ideation/index.ts
@@ -0,0 +1,109 @@
+/**
+ * Ideation routes - HTTP API for brainstorming and idea management
+ */
+
+import { Router } from 'express';
+import type { EventEmitter } from '../../lib/events.js';
+import { validatePathParams } from '../../middleware/validate-paths.js';
+import type { IdeationService } from '../../services/ideation-service.js';
+import type { FeatureLoader } from '../../services/feature-loader.js';
+
+// Route handlers
+import { createSessionStartHandler } from './routes/session-start.js';
+import { createSessionMessageHandler } from './routes/session-message.js';
+import { createSessionStopHandler } from './routes/session-stop.js';
+import { createSessionGetHandler } from './routes/session-get.js';
+import { createIdeasListHandler } from './routes/ideas-list.js';
+import { createIdeasCreateHandler } from './routes/ideas-create.js';
+import { createIdeasGetHandler } from './routes/ideas-get.js';
+import { createIdeasUpdateHandler } from './routes/ideas-update.js';
+import { createIdeasDeleteHandler } from './routes/ideas-delete.js';
+import { createAnalyzeHandler, createGetAnalysisHandler } from './routes/analyze.js';
+import { createConvertHandler } from './routes/convert.js';
+import { createAddSuggestionHandler } from './routes/add-suggestion.js';
+import { createPromptsHandler, createPromptsByCategoryHandler } from './routes/prompts.js';
+import { createSuggestionsGenerateHandler } from './routes/suggestions-generate.js';
+
+export function createIdeationRoutes(
+ events: EventEmitter,
+ ideationService: IdeationService,
+ featureLoader: FeatureLoader
+): Router {
+ const router = Router();
+
+ // Session management
+ router.post(
+ '/session/start',
+ validatePathParams('projectPath'),
+ createSessionStartHandler(ideationService)
+ );
+ router.post('/session/message', createSessionMessageHandler(ideationService));
+ router.post('/session/stop', createSessionStopHandler(events, ideationService));
+ router.post(
+ '/session/get',
+ validatePathParams('projectPath'),
+ createSessionGetHandler(ideationService)
+ );
+
+ // Ideas CRUD
+ router.post(
+ '/ideas/list',
+ validatePathParams('projectPath'),
+ createIdeasListHandler(ideationService)
+ );
+ router.post(
+ '/ideas/create',
+ validatePathParams('projectPath'),
+ createIdeasCreateHandler(events, ideationService)
+ );
+ router.post(
+ '/ideas/get',
+ validatePathParams('projectPath'),
+ createIdeasGetHandler(ideationService)
+ );
+ router.post(
+ '/ideas/update',
+ validatePathParams('projectPath'),
+ createIdeasUpdateHandler(events, ideationService)
+ );
+ router.post(
+ '/ideas/delete',
+ validatePathParams('projectPath'),
+ createIdeasDeleteHandler(events, ideationService)
+ );
+
+ // Project analysis
+ router.post('/analyze', validatePathParams('projectPath'), createAnalyzeHandler(ideationService));
+ router.post(
+ '/analysis',
+ validatePathParams('projectPath'),
+ createGetAnalysisHandler(ideationService)
+ );
+
+ // Convert to feature
+ router.post(
+ '/convert',
+ validatePathParams('projectPath'),
+ createConvertHandler(events, ideationService, featureLoader)
+ );
+
+ // Add suggestion to board as a feature
+ router.post(
+ '/add-suggestion',
+ validatePathParams('projectPath'),
+ createAddSuggestionHandler(ideationService, featureLoader)
+ );
+
+ // Guided prompts (no validation needed - static data)
+ router.get('/prompts', createPromptsHandler(ideationService));
+ router.get('/prompts/:category', createPromptsByCategoryHandler(ideationService));
+
+ // Generate suggestions (structured output)
+ router.post(
+ '/suggestions/generate',
+ validatePathParams('projectPath'),
+ createSuggestionsGenerateHandler(ideationService)
+ );
+
+ return router;
+}
diff --git a/apps/server/src/routes/ideation/routes/add-suggestion.ts b/apps/server/src/routes/ideation/routes/add-suggestion.ts
new file mode 100644
index 00000000..3326bfc3
--- /dev/null
+++ b/apps/server/src/routes/ideation/routes/add-suggestion.ts
@@ -0,0 +1,70 @@
+/**
+ * POST /add-suggestion - Add an analysis suggestion to the board as a feature
+ *
+ * This endpoint converts an AnalysisSuggestion to a Feature using the
+ * IdeationService's mapIdeaCategoryToFeatureCategory for consistent category mapping.
+ * This ensures a single source of truth for the conversion logic.
+ */
+
+import type { Request, Response } from 'express';
+import type { IdeationService } from '../../../services/ideation-service.js';
+import type { FeatureLoader } from '../../../services/feature-loader.js';
+import type { AnalysisSuggestion } from '@automaker/types';
+import { getErrorMessage, logError } from '../common.js';
+
+export function createAddSuggestionHandler(
+ ideationService: IdeationService,
+ featureLoader: FeatureLoader
+) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath, suggestion } = req.body as {
+ projectPath: string;
+ suggestion: AnalysisSuggestion;
+ };
+
+ if (!projectPath) {
+ res.status(400).json({ success: false, error: 'projectPath is required' });
+ return;
+ }
+
+ if (!suggestion) {
+ res.status(400).json({ success: false, error: 'suggestion is required' });
+ return;
+ }
+
+ if (!suggestion.title) {
+ res.status(400).json({ success: false, error: 'suggestion.title is required' });
+ return;
+ }
+
+ if (!suggestion.category) {
+ res.status(400).json({ success: false, error: 'suggestion.category is required' });
+ return;
+ }
+
+ // Build description with rationale if provided
+ const description = suggestion.rationale
+ ? `${suggestion.description}\n\n**Rationale:** ${suggestion.rationale}`
+ : suggestion.description;
+
+ // Use the service's category mapping for consistency
+ const featureCategory = ideationService.mapSuggestionCategoryToFeatureCategory(
+ suggestion.category
+ );
+
+ // Create the feature
+ const feature = await featureLoader.create(projectPath, {
+ title: suggestion.title,
+ description,
+ category: featureCategory,
+ status: 'backlog',
+ });
+
+ res.json({ success: true, featureId: feature.id });
+ } catch (error) {
+ logError(error, 'Add suggestion to board failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
diff --git a/apps/server/src/routes/ideation/routes/analyze.ts b/apps/server/src/routes/ideation/routes/analyze.ts
new file mode 100644
index 00000000..e8e0b213
--- /dev/null
+++ b/apps/server/src/routes/ideation/routes/analyze.ts
@@ -0,0 +1,49 @@
+/**
+ * POST /analyze - Analyze project and generate suggestions
+ */
+
+import type { Request, Response } from 'express';
+import type { IdeationService } from '../../../services/ideation-service.js';
+import { getErrorMessage, logError } from '../common.js';
+
+export function createAnalyzeHandler(ideationService: IdeationService) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath } = req.body as { projectPath: string };
+
+ if (!projectPath) {
+ res.status(400).json({ success: false, error: 'projectPath is required' });
+ return;
+ }
+
+ // Start analysis - results come via WebSocket events
+ ideationService.analyzeProject(projectPath).catch((error) => {
+ logError(error, 'Analyze project failed (async)');
+ });
+
+ res.json({ success: true, message: 'Analysis started' });
+ } catch (error) {
+ logError(error, 'Analyze project failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
+
+export function createGetAnalysisHandler(ideationService: IdeationService) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath } = req.body as { projectPath: string };
+
+ if (!projectPath) {
+ res.status(400).json({ success: false, error: 'projectPath is required' });
+ return;
+ }
+
+ const result = await ideationService.getCachedAnalysis(projectPath);
+ res.json({ success: true, result });
+ } catch (error) {
+ logError(error, 'Get analysis failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
diff --git a/apps/server/src/routes/ideation/routes/convert.ts b/apps/server/src/routes/ideation/routes/convert.ts
new file mode 100644
index 00000000..e1939bb4
--- /dev/null
+++ b/apps/server/src/routes/ideation/routes/convert.ts
@@ -0,0 +1,77 @@
+/**
+ * POST /convert - Convert an idea to a feature
+ */
+
+import type { Request, Response } from 'express';
+import type { EventEmitter } from '../../../lib/events.js';
+import type { IdeationService } from '../../../services/ideation-service.js';
+import type { FeatureLoader } from '../../../services/feature-loader.js';
+import type { ConvertToFeatureOptions } from '@automaker/types';
+import { getErrorMessage, logError } from '../common.js';
+
+export function createConvertHandler(
+ events: EventEmitter,
+ ideationService: IdeationService,
+ featureLoader: FeatureLoader
+) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath, ideaId, keepIdea, column, dependencies, tags } = req.body as {
+ projectPath: string;
+ ideaId: string;
+ } & ConvertToFeatureOptions;
+
+ if (!projectPath) {
+ res.status(400).json({ success: false, error: 'projectPath is required' });
+ return;
+ }
+
+ if (!ideaId) {
+ res.status(400).json({ success: false, error: 'ideaId is required' });
+ return;
+ }
+
+ // Convert idea to feature structure
+ const featureData = await ideationService.convertToFeature(projectPath, ideaId);
+
+ // Apply any options from the request
+ if (column) {
+ featureData.status = column;
+ }
+ if (dependencies && dependencies.length > 0) {
+ featureData.dependencies = dependencies;
+ }
+ if (tags && tags.length > 0) {
+ featureData.tags = tags;
+ }
+
+ // Create the feature using FeatureLoader
+ const feature = await featureLoader.create(projectPath, featureData);
+
+ // Delete the idea unless keepIdea is explicitly true
+ if (!keepIdea) {
+ await ideationService.deleteIdea(projectPath, ideaId);
+
+ // Emit idea deleted event
+ events.emit('ideation:idea-deleted', {
+ projectPath,
+ ideaId,
+ });
+ }
+
+ // Emit idea converted event to notify frontend
+ events.emit('ideation:idea-converted', {
+ projectPath,
+ ideaId,
+ featureId: feature.id,
+ keepIdea: !!keepIdea,
+ });
+
+ // Return featureId as expected by the frontend API interface
+ res.json({ success: true, featureId: feature.id });
+ } catch (error) {
+ logError(error, 'Convert to feature failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
diff --git a/apps/server/src/routes/ideation/routes/ideas-create.ts b/apps/server/src/routes/ideation/routes/ideas-create.ts
new file mode 100644
index 00000000..bf368fd9
--- /dev/null
+++ b/apps/server/src/routes/ideation/routes/ideas-create.ts
@@ -0,0 +1,51 @@
+/**
+ * POST /ideas/create - Create a new idea
+ */
+
+import type { Request, Response } from 'express';
+import type { EventEmitter } from '../../../lib/events.js';
+import type { IdeationService } from '../../../services/ideation-service.js';
+import type { CreateIdeaInput } from '@automaker/types';
+import { getErrorMessage, logError } from '../common.js';
+
+export function createIdeasCreateHandler(events: EventEmitter, ideationService: IdeationService) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath, idea } = req.body as {
+ projectPath: string;
+ idea: CreateIdeaInput;
+ };
+
+ if (!projectPath) {
+ res.status(400).json({ success: false, error: 'projectPath is required' });
+ return;
+ }
+
+ if (!idea) {
+ res.status(400).json({ success: false, error: 'idea is required' });
+ return;
+ }
+
+ if (!idea.title || !idea.description || !idea.category) {
+ res.status(400).json({
+ success: false,
+ error: 'idea must have title, description, and category',
+ });
+ return;
+ }
+
+ const created = await ideationService.createIdea(projectPath, idea);
+
+ // Emit idea created event for frontend notification
+ events.emit('ideation:idea-created', {
+ projectPath,
+ idea: created,
+ });
+
+ res.json({ success: true, idea: created });
+ } catch (error) {
+ logError(error, 'Create idea failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
diff --git a/apps/server/src/routes/ideation/routes/ideas-delete.ts b/apps/server/src/routes/ideation/routes/ideas-delete.ts
new file mode 100644
index 00000000..b1bcf006
--- /dev/null
+++ b/apps/server/src/routes/ideation/routes/ideas-delete.ts
@@ -0,0 +1,42 @@
+/**
+ * POST /ideas/delete - Delete an idea
+ */
+
+import type { Request, Response } from 'express';
+import type { EventEmitter } from '../../../lib/events.js';
+import type { IdeationService } from '../../../services/ideation-service.js';
+import { getErrorMessage, logError } from '../common.js';
+
+export function createIdeasDeleteHandler(events: EventEmitter, ideationService: IdeationService) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath, ideaId } = req.body as {
+ projectPath: string;
+ ideaId: string;
+ };
+
+ if (!projectPath) {
+ res.status(400).json({ success: false, error: 'projectPath is required' });
+ return;
+ }
+
+ if (!ideaId) {
+ res.status(400).json({ success: false, error: 'ideaId is required' });
+ return;
+ }
+
+ await ideationService.deleteIdea(projectPath, ideaId);
+
+ // Emit idea deleted event for frontend notification
+ events.emit('ideation:idea-deleted', {
+ projectPath,
+ ideaId,
+ });
+
+ res.json({ success: true });
+ } catch (error) {
+ logError(error, 'Delete idea failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
diff --git a/apps/server/src/routes/ideation/routes/ideas-get.ts b/apps/server/src/routes/ideation/routes/ideas-get.ts
new file mode 100644
index 00000000..d4865b46
--- /dev/null
+++ b/apps/server/src/routes/ideation/routes/ideas-get.ts
@@ -0,0 +1,39 @@
+/**
+ * POST /ideas/get - Get a single idea
+ */
+
+import type { Request, Response } from 'express';
+import type { IdeationService } from '../../../services/ideation-service.js';
+import { getErrorMessage, logError } from '../common.js';
+
+export function createIdeasGetHandler(ideationService: IdeationService) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath, ideaId } = req.body as {
+ projectPath: string;
+ ideaId: string;
+ };
+
+ if (!projectPath) {
+ res.status(400).json({ success: false, error: 'projectPath is required' });
+ return;
+ }
+
+ if (!ideaId) {
+ res.status(400).json({ success: false, error: 'ideaId is required' });
+ return;
+ }
+
+ const idea = await ideationService.getIdea(projectPath, ideaId);
+ if (!idea) {
+ res.status(404).json({ success: false, error: 'Idea not found' });
+ return;
+ }
+
+ res.json({ success: true, idea });
+ } catch (error) {
+ logError(error, 'Get idea failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
diff --git a/apps/server/src/routes/ideation/routes/ideas-list.ts b/apps/server/src/routes/ideation/routes/ideas-list.ts
new file mode 100644
index 00000000..5f6b4504
--- /dev/null
+++ b/apps/server/src/routes/ideation/routes/ideas-list.ts
@@ -0,0 +1,26 @@
+/**
+ * POST /ideas/list - List all ideas for a project
+ */
+
+import type { Request, Response } from 'express';
+import type { IdeationService } from '../../../services/ideation-service.js';
+import { getErrorMessage, logError } from '../common.js';
+
+export function createIdeasListHandler(ideationService: IdeationService) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath } = req.body as { projectPath: string };
+
+ if (!projectPath) {
+ res.status(400).json({ success: false, error: 'projectPath is required' });
+ return;
+ }
+
+ const ideas = await ideationService.getIdeas(projectPath);
+ res.json({ success: true, ideas });
+ } catch (error) {
+ logError(error, 'List ideas failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
diff --git a/apps/server/src/routes/ideation/routes/ideas-update.ts b/apps/server/src/routes/ideation/routes/ideas-update.ts
new file mode 100644
index 00000000..fbf0d8b6
--- /dev/null
+++ b/apps/server/src/routes/ideation/routes/ideas-update.ts
@@ -0,0 +1,54 @@
+/**
+ * POST /ideas/update - Update an idea
+ */
+
+import type { Request, Response } from 'express';
+import type { EventEmitter } from '../../../lib/events.js';
+import type { IdeationService } from '../../../services/ideation-service.js';
+import type { UpdateIdeaInput } from '@automaker/types';
+import { getErrorMessage, logError } from '../common.js';
+
+export function createIdeasUpdateHandler(events: EventEmitter, ideationService: IdeationService) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath, ideaId, updates } = req.body as {
+ projectPath: string;
+ ideaId: string;
+ updates: UpdateIdeaInput;
+ };
+
+ if (!projectPath) {
+ res.status(400).json({ success: false, error: 'projectPath is required' });
+ return;
+ }
+
+ if (!ideaId) {
+ res.status(400).json({ success: false, error: 'ideaId is required' });
+ return;
+ }
+
+ if (!updates) {
+ res.status(400).json({ success: false, error: 'updates is required' });
+ return;
+ }
+
+ const idea = await ideationService.updateIdea(projectPath, ideaId, updates);
+ if (!idea) {
+ res.status(404).json({ success: false, error: 'Idea not found' });
+ return;
+ }
+
+ // Emit idea updated event for frontend notification
+ events.emit('ideation:idea-updated', {
+ projectPath,
+ ideaId,
+ idea,
+ });
+
+ res.json({ success: true, idea });
+ } catch (error) {
+ logError(error, 'Update idea failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
diff --git a/apps/server/src/routes/ideation/routes/prompts.ts b/apps/server/src/routes/ideation/routes/prompts.ts
new file mode 100644
index 00000000..8d686bbb
--- /dev/null
+++ b/apps/server/src/routes/ideation/routes/prompts.ts
@@ -0,0 +1,42 @@
+/**
+ * GET /prompts - Get all guided prompts
+ * GET /prompts/:category - Get prompts for a specific category
+ */
+
+import type { Request, Response } from 'express';
+import type { IdeationService } from '../../../services/ideation-service.js';
+import type { IdeaCategory } from '@automaker/types';
+import { getErrorMessage, logError } from '../common.js';
+
+export function createPromptsHandler(ideationService: IdeationService) {
+ return async (_req: Request, res: Response): Promise => {
+ try {
+ const prompts = ideationService.getAllPrompts();
+ const categories = ideationService.getPromptCategories();
+ res.json({ success: true, prompts, categories });
+ } catch (error) {
+ logError(error, 'Get prompts failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
+
+export function createPromptsByCategoryHandler(ideationService: IdeationService) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { category } = req.params as { category: string };
+
+ const validCategories = ideationService.getPromptCategories().map((c) => c.id);
+ if (!validCategories.includes(category as IdeaCategory)) {
+ res.status(400).json({ success: false, error: 'Invalid category' });
+ return;
+ }
+
+ const prompts = ideationService.getPromptsByCategory(category as IdeaCategory);
+ res.json({ success: true, prompts });
+ } catch (error) {
+ logError(error, 'Get prompts by category failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
diff --git a/apps/server/src/routes/ideation/routes/session-get.ts b/apps/server/src/routes/ideation/routes/session-get.ts
new file mode 100644
index 00000000..c95bd6cb
--- /dev/null
+++ b/apps/server/src/routes/ideation/routes/session-get.ts
@@ -0,0 +1,45 @@
+/**
+ * POST /session/get - Get an ideation session with messages
+ */
+
+import type { Request, Response } from 'express';
+import type { IdeationService } from '../../../services/ideation-service.js';
+import { getErrorMessage, logError } from '../common.js';
+
+export function createSessionGetHandler(ideationService: IdeationService) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath, sessionId } = req.body as {
+ projectPath: string;
+ sessionId: string;
+ };
+
+ if (!projectPath) {
+ res.status(400).json({ success: false, error: 'projectPath is required' });
+ return;
+ }
+
+ if (!sessionId) {
+ res.status(400).json({ success: false, error: 'sessionId is required' });
+ return;
+ }
+
+ const session = await ideationService.getSession(projectPath, sessionId);
+ if (!session) {
+ res.status(404).json({ success: false, error: 'Session not found' });
+ return;
+ }
+
+ const isRunning = ideationService.isSessionRunning(sessionId);
+
+ res.json({
+ success: true,
+ session: { ...session, isRunning },
+ messages: session.messages,
+ });
+ } catch (error) {
+ logError(error, 'Get session failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
diff --git a/apps/server/src/routes/ideation/routes/session-message.ts b/apps/server/src/routes/ideation/routes/session-message.ts
new file mode 100644
index 00000000..0668583e
--- /dev/null
+++ b/apps/server/src/routes/ideation/routes/session-message.ts
@@ -0,0 +1,40 @@
+/**
+ * POST /session/message - Send a message in an ideation session
+ */
+
+import type { Request, Response } from 'express';
+import type { IdeationService } from '../../../services/ideation-service.js';
+import type { SendMessageOptions } from '@automaker/types';
+import { getErrorMessage, logError } from '../common.js';
+
+export function createSessionMessageHandler(ideationService: IdeationService) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { sessionId, message, options } = req.body as {
+ sessionId: string;
+ message: string;
+ options?: SendMessageOptions;
+ };
+
+ if (!sessionId) {
+ res.status(400).json({ success: false, error: 'sessionId is required' });
+ return;
+ }
+
+ if (!message) {
+ res.status(400).json({ success: false, error: 'message is required' });
+ return;
+ }
+
+ // This is async but we don't await - responses come via WebSocket
+ ideationService.sendMessage(sessionId, message, options).catch((error) => {
+ logError(error, 'Send message failed (async)');
+ });
+
+ res.json({ success: true });
+ } catch (error) {
+ logError(error, 'Send message failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
diff --git a/apps/server/src/routes/ideation/routes/session-start.ts b/apps/server/src/routes/ideation/routes/session-start.ts
new file mode 100644
index 00000000..5d1ae838
--- /dev/null
+++ b/apps/server/src/routes/ideation/routes/session-start.ts
@@ -0,0 +1,30 @@
+/**
+ * POST /session/start - Start a new ideation session
+ */
+
+import type { Request, Response } from 'express';
+import type { IdeationService } from '../../../services/ideation-service.js';
+import type { StartSessionOptions } from '@automaker/types';
+import { getErrorMessage, logError } from '../common.js';
+
+export function createSessionStartHandler(ideationService: IdeationService) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath, options } = req.body as {
+ projectPath: string;
+ options?: StartSessionOptions;
+ };
+
+ if (!projectPath) {
+ res.status(400).json({ success: false, error: 'projectPath is required' });
+ return;
+ }
+
+ const session = await ideationService.startSession(projectPath, options);
+ res.json({ success: true, session });
+ } catch (error) {
+ logError(error, 'Start session failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
diff --git a/apps/server/src/routes/ideation/routes/session-stop.ts b/apps/server/src/routes/ideation/routes/session-stop.ts
new file mode 100644
index 00000000..c0d59e3b
--- /dev/null
+++ b/apps/server/src/routes/ideation/routes/session-stop.ts
@@ -0,0 +1,39 @@
+/**
+ * POST /session/stop - Stop an ideation session
+ */
+
+import type { Request, Response } from 'express';
+import type { EventEmitter } from '../../../lib/events.js';
+import type { IdeationService } from '../../../services/ideation-service.js';
+import { getErrorMessage, logError } from '../common.js';
+
+export function createSessionStopHandler(events: EventEmitter, ideationService: IdeationService) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { sessionId, projectPath } = req.body as {
+ sessionId: string;
+ projectPath?: string;
+ };
+
+ if (!sessionId) {
+ res.status(400).json({ success: false, error: 'sessionId is required' });
+ return;
+ }
+
+ await ideationService.stopSession(sessionId);
+
+ // Emit session stopped event for frontend notification
+ // Note: The service also emits 'ideation:session-ended' internally,
+ // but we emit here as well for route-level consistency with other routes
+ events.emit('ideation:session-ended', {
+ sessionId,
+ projectPath,
+ });
+
+ res.json({ success: true });
+ } catch (error) {
+ logError(error, 'Stop session failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
diff --git a/apps/server/src/routes/ideation/routes/suggestions-generate.ts b/apps/server/src/routes/ideation/routes/suggestions-generate.ts
new file mode 100644
index 00000000..8add2af5
--- /dev/null
+++ b/apps/server/src/routes/ideation/routes/suggestions-generate.ts
@@ -0,0 +1,56 @@
+/**
+ * Generate suggestions route - Returns structured AI suggestions for a prompt
+ */
+
+import type { Request, Response } from 'express';
+import type { IdeationService } from '../../../services/ideation-service.js';
+import { createLogger } from '@automaker/utils';
+import { getErrorMessage, logError } from '../common.js';
+
+const logger = createLogger('ideation:suggestions-generate');
+
+export function createSuggestionsGenerateHandler(ideationService: IdeationService) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath, promptId, category, count } = req.body;
+
+ if (!projectPath) {
+ res.status(400).json({ success: false, error: 'projectPath is required' });
+ return;
+ }
+
+ if (!promptId) {
+ res.status(400).json({ success: false, error: 'promptId is required' });
+ return;
+ }
+
+ if (!category) {
+ res.status(400).json({ success: false, error: 'category is required' });
+ return;
+ }
+
+ // Default to 10 suggestions, allow 1-20
+ const suggestionCount = Math.min(Math.max(count || 10, 1), 20);
+
+ logger.info(`Generating ${suggestionCount} suggestions for prompt: ${promptId}`);
+
+ const suggestions = await ideationService.generateSuggestions(
+ projectPath,
+ promptId,
+ category,
+ suggestionCount
+ );
+
+ res.json({
+ success: true,
+ suggestions,
+ });
+ } catch (error) {
+ logError(error, 'Failed to generate suggestions');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
diff --git a/apps/server/src/routes/mcp/common.ts b/apps/server/src/routes/mcp/common.ts
index 5da4789c..7256ef12 100644
--- a/apps/server/src/routes/mcp/common.ts
+++ b/apps/server/src/routes/mcp/common.ts
@@ -2,6 +2,10 @@
* Common utilities for MCP routes
*/
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('MCP');
+
/**
* Extract error message from unknown error
*/
@@ -16,5 +20,5 @@ export function getErrorMessage(error: unknown): string {
* Log error with prefix
*/
export function logError(error: unknown, message: string): void {
- console.error(`[MCP] ${message}:`, error);
+ logger.error(`${message}:`, error);
}
diff --git a/apps/server/src/routes/models/routes/available.ts b/apps/server/src/routes/models/routes/available.ts
index 4ac4e0b1..2ebb4992 100644
--- a/apps/server/src/routes/models/routes/available.ts
+++ b/apps/server/src/routes/models/routes/available.ts
@@ -1,61 +1,16 @@
/**
- * GET /available endpoint - Get available models
+ * GET /available endpoint - Get available models from all providers
*/
import type { Request, Response } from 'express';
+import { ProviderFactory } from '../../../providers/provider-factory.js';
import { getErrorMessage, logError } from '../common.js';
-interface ModelDefinition {
- id: string;
- name: string;
- provider: string;
- contextWindow: number;
- maxOutputTokens: number;
- supportsVision: boolean;
- supportsTools: boolean;
-}
-
export function createAvailableHandler() {
return async (_req: Request, res: Response): Promise => {
try {
- const models: ModelDefinition[] = [
- {
- id: 'claude-opus-4-5-20251101',
- name: 'Claude Opus 4.5',
- provider: 'anthropic',
- contextWindow: 200000,
- maxOutputTokens: 16384,
- supportsVision: true,
- supportsTools: true,
- },
- {
- id: 'claude-sonnet-4-20250514',
- name: 'Claude Sonnet 4',
- provider: 'anthropic',
- contextWindow: 200000,
- maxOutputTokens: 16384,
- supportsVision: true,
- supportsTools: true,
- },
- {
- id: 'claude-3-5-sonnet-20241022',
- name: 'Claude 3.5 Sonnet',
- provider: 'anthropic',
- contextWindow: 200000,
- maxOutputTokens: 8192,
- supportsVision: true,
- supportsTools: true,
- },
- {
- id: 'claude-3-5-haiku-20241022',
- name: 'Claude 3.5 Haiku',
- provider: 'anthropic',
- contextWindow: 200000,
- maxOutputTokens: 8192,
- supportsVision: true,
- supportsTools: true,
- },
- ];
+ // Get all models from all registered providers (Claude + Cursor)
+ const models = ProviderFactory.getAllAvailableModels();
res.json({ success: true, models });
} catch (error) {
diff --git a/apps/server/src/routes/models/routes/providers.ts b/apps/server/src/routes/models/routes/providers.ts
index b7ef1b85..174a1fac 100644
--- a/apps/server/src/routes/models/routes/providers.ts
+++ b/apps/server/src/routes/models/routes/providers.ts
@@ -17,6 +17,13 @@ export function createProvidersHandler() {
available: statuses.claude?.installed || false,
hasApiKey: !!process.env.ANTHROPIC_API_KEY,
},
+ cursor: {
+ available: statuses.cursor?.installed || false,
+ version: statuses.cursor?.version,
+ path: statuses.cursor?.path,
+ method: statuses.cursor?.method,
+ authenticated: statuses.cursor?.authenticated,
+ },
};
res.json({ success: true, providers });
diff --git a/apps/server/src/routes/settings/index.ts b/apps/server/src/routes/settings/index.ts
index cc164856..6f6f6d40 100644
--- a/apps/server/src/routes/settings/index.ts
+++ b/apps/server/src/routes/settings/index.ts
@@ -23,6 +23,7 @@ import { createGetProjectHandler } from './routes/get-project.js';
import { createUpdateProjectHandler } from './routes/update-project.js';
import { createMigrateHandler } from './routes/migrate.js';
import { createStatusHandler } from './routes/status.js';
+import { createDiscoverAgentsHandler } from './routes/discover-agents.js';
/**
* Create settings router with all endpoints
@@ -39,6 +40,7 @@ import { createStatusHandler } from './routes/status.js';
* - POST /project - Get project settings (requires projectPath in body)
* - PUT /project - Update project settings
* - POST /migrate - Migrate settings from localStorage
+ * - POST /agents/discover - Discover filesystem agents from .claude/agents/ (read-only)
*
* @param settingsService - Instance of SettingsService for file I/O
* @returns Express Router configured with all settings endpoints
@@ -72,5 +74,8 @@ export function createSettingsRoutes(settingsService: SettingsService): Router {
// Migration from localStorage
router.post('/migrate', createMigrateHandler(settingsService));
+ // Filesystem agents discovery (read-only)
+ router.post('/agents/discover', createDiscoverAgentsHandler());
+
return router;
}
diff --git a/apps/server/src/routes/settings/routes/discover-agents.ts b/apps/server/src/routes/settings/routes/discover-agents.ts
new file mode 100644
index 00000000..aee4a2a2
--- /dev/null
+++ b/apps/server/src/routes/settings/routes/discover-agents.ts
@@ -0,0 +1,61 @@
+/**
+ * Discover Agents Route - Returns filesystem-based agents from .claude/agents/
+ *
+ * Scans both user-level (~/.claude/agents/) and project-level (.claude/agents/)
+ * directories for AGENT.md files and returns parsed agent definitions.
+ */
+
+import type { Request, Response } from 'express';
+import { discoverFilesystemAgents } from '../../../lib/agent-discovery.js';
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('DiscoverAgentsRoute');
+
+interface DiscoverAgentsRequest {
+ projectPath?: string;
+ sources?: Array<'user' | 'project'>;
+}
+
+/**
+ * Create handler for discovering filesystem agents
+ *
+ * POST /api/settings/agents/discover
+ * Body: { projectPath?: string, sources?: ['user', 'project'] }
+ *
+ * Returns:
+ * {
+ * success: true,
+ * agents: Array<{
+ * name: string,
+ * definition: AgentDefinition,
+ * source: 'user' | 'project',
+ * filePath: string
+ * }>
+ * }
+ */
+export function createDiscoverAgentsHandler() {
+ return async (req: Request, res: Response) => {
+ try {
+ const { projectPath, sources = ['user', 'project'] } = req.body as DiscoverAgentsRequest;
+
+ logger.info(
+ `Discovering agents from sources: ${sources.join(', ')}${projectPath ? ` (project: ${projectPath})` : ''}`
+ );
+
+ const agents = await discoverFilesystemAgents(projectPath, sources);
+
+ logger.info(`Discovered ${agents.length} filesystem agents`);
+
+ res.json({
+ success: true,
+ agents,
+ });
+ } catch (error) {
+ logger.error('Failed to discover agents:', error);
+ res.status(500).json({
+ success: false,
+ error: error instanceof Error ? error.message : 'Failed to discover agents',
+ });
+ }
+ };
+}
diff --git a/apps/server/src/routes/settings/routes/update-global.ts b/apps/server/src/routes/settings/routes/update-global.ts
index 6072f237..aafbc5b1 100644
--- a/apps/server/src/routes/settings/routes/update-global.ts
+++ b/apps/server/src/routes/settings/routes/update-global.ts
@@ -11,7 +11,7 @@
import type { Request, Response } from 'express';
import type { SettingsService } from '../../../services/settings-service.js';
import type { GlobalSettings } from '../../../types/settings.js';
-import { getErrorMessage, logError } from '../common.js';
+import { getErrorMessage, logError, logger } from '../common.js';
/**
* Create handler factory for PUT /api/settings/global
@@ -32,6 +32,18 @@ export function createUpdateGlobalHandler(settingsService: SettingsService) {
return;
}
+ // Minimal debug logging to help diagnose accidental wipes.
+ if ('projects' in updates || 'theme' in updates || 'localStorageMigrated' in updates) {
+ const projectsLen = Array.isArray((updates as any).projects)
+ ? (updates as any).projects.length
+ : undefined;
+ logger.info(
+ `Update global settings request: projects=${projectsLen ?? 'n/a'}, theme=${
+ (updates as any).theme ?? 'n/a'
+ }, localStorageMigrated=${(updates as any).localStorageMigrated ?? 'n/a'}`
+ );
+ }
+
const settings = await settingsService.updateGlobalSettings(updates);
res.json({
diff --git a/apps/server/src/routes/setup/get-claude-status.ts b/apps/server/src/routes/setup/get-claude-status.ts
index 3ddd8ed4..4a3ccaf6 100644
--- a/apps/server/src/routes/setup/get-claude-status.ts
+++ b/apps/server/src/routes/setup/get-claude-status.ts
@@ -6,9 +6,24 @@ import { exec } from 'child_process';
import { promisify } from 'util';
import { getClaudeCliPaths, getClaudeAuthIndicators, systemPathAccess } from '@automaker/platform';
import { getApiKey } from './common.js';
+import * as fs from 'fs';
+import * as path from 'path';
const execAsync = promisify(exec);
+const DISCONNECTED_MARKER_FILE = '.claude-disconnected';
+
+function isDisconnectedFromApp(): boolean {
+ try {
+ // Check if we're in a project directory
+ const projectRoot = process.cwd();
+ const markerPath = path.join(projectRoot, '.automaker', DISCONNECTED_MARKER_FILE);
+ return fs.existsSync(markerPath);
+ } catch {
+ return false;
+ }
+}
+
export async function getClaudeStatus() {
let installed = false;
let version = '';
@@ -60,6 +75,30 @@ export async function getClaudeStatus() {
}
}
+ // Check if user has manually disconnected from the app
+ if (isDisconnectedFromApp()) {
+ return {
+ status: installed ? 'installed' : 'not_installed',
+ installed,
+ method,
+ version,
+ path: cliPath,
+ auth: {
+ authenticated: false,
+ method: 'none',
+ hasCredentialsFile: false,
+ hasToken: false,
+ hasStoredOAuthToken: false,
+ hasStoredApiKey: false,
+ hasEnvApiKey: false,
+ oauthTokenValid: false,
+ apiKeyValid: false,
+ hasCliAuth: false,
+ hasRecentActivity: false,
+ },
+ };
+ }
+
// Check authentication - detect all possible auth methods
// Note: apiKeys.anthropic_oauth_token stores OAuth tokens from subscription auth
// apiKeys.anthropic stores direct API keys for pay-per-use
diff --git a/apps/server/src/routes/setup/index.ts b/apps/server/src/routes/setup/index.ts
index 3681b2fc..a35c5e6b 100644
--- a/apps/server/src/routes/setup/index.ts
+++ b/apps/server/src/routes/setup/index.ts
@@ -11,7 +11,35 @@ import { createDeleteApiKeyHandler } from './routes/delete-api-key.js';
import { createApiKeysHandler } from './routes/api-keys.js';
import { createPlatformHandler } from './routes/platform.js';
import { createVerifyClaudeAuthHandler } from './routes/verify-claude-auth.js';
+import { createVerifyCodexAuthHandler } from './routes/verify-codex-auth.js';
import { createGhStatusHandler } from './routes/gh-status.js';
+import { createCursorStatusHandler } from './routes/cursor-status.js';
+import { createCodexStatusHandler } from './routes/codex-status.js';
+import { createInstallCodexHandler } from './routes/install-codex.js';
+import { createAuthCodexHandler } from './routes/auth-codex.js';
+import { createAuthCursorHandler } from './routes/auth-cursor.js';
+import { createDeauthClaudeHandler } from './routes/deauth-claude.js';
+import { createDeauthCodexHandler } from './routes/deauth-codex.js';
+import { createDeauthCursorHandler } from './routes/deauth-cursor.js';
+import { createAuthOpencodeHandler } from './routes/auth-opencode.js';
+import { createDeauthOpencodeHandler } from './routes/deauth-opencode.js';
+import { createOpencodeStatusHandler } from './routes/opencode-status.js';
+import {
+ createGetOpencodeModelsHandler,
+ createRefreshOpencodeModelsHandler,
+ createGetOpencodeProvidersHandler,
+ createClearOpencodeCacheHandler,
+} from './routes/opencode-models.js';
+import {
+ createGetCursorConfigHandler,
+ createSetCursorDefaultModelHandler,
+ createSetCursorModelsHandler,
+ createGetCursorPermissionsHandler,
+ createApplyPermissionProfileHandler,
+ createSetCustomPermissionsHandler,
+ createDeleteProjectPermissionsHandler,
+ createGetExampleConfigHandler,
+} from './routes/cursor-config.js';
export function createSetupRoutes(): Router {
const router = Router();
@@ -19,12 +47,46 @@ export function createSetupRoutes(): Router {
router.get('/claude-status', createClaudeStatusHandler());
router.post('/install-claude', createInstallClaudeHandler());
router.post('/auth-claude', createAuthClaudeHandler());
+ router.post('/deauth-claude', createDeauthClaudeHandler());
router.post('/store-api-key', createStoreApiKeyHandler());
router.post('/delete-api-key', createDeleteApiKeyHandler());
router.get('/api-keys', createApiKeysHandler());
router.get('/platform', createPlatformHandler());
router.post('/verify-claude-auth', createVerifyClaudeAuthHandler());
+ router.post('/verify-codex-auth', createVerifyCodexAuthHandler());
router.get('/gh-status', createGhStatusHandler());
+ // Cursor CLI routes
+ router.get('/cursor-status', createCursorStatusHandler());
+ router.post('/auth-cursor', createAuthCursorHandler());
+ router.post('/deauth-cursor', createDeauthCursorHandler());
+
+ // Codex CLI routes
+ router.get('/codex-status', createCodexStatusHandler());
+ router.post('/install-codex', createInstallCodexHandler());
+ router.post('/auth-codex', createAuthCodexHandler());
+ router.post('/deauth-codex', createDeauthCodexHandler());
+
+ // OpenCode CLI routes
+ router.get('/opencode-status', createOpencodeStatusHandler());
+ router.post('/auth-opencode', createAuthOpencodeHandler());
+ router.post('/deauth-opencode', createDeauthOpencodeHandler());
+
+ // OpenCode Dynamic Model Discovery routes
+ router.get('/opencode/models', createGetOpencodeModelsHandler());
+ router.post('/opencode/models/refresh', createRefreshOpencodeModelsHandler());
+ router.get('/opencode/providers', createGetOpencodeProvidersHandler());
+ router.post('/opencode/cache/clear', createClearOpencodeCacheHandler());
+ router.get('/cursor-config', createGetCursorConfigHandler());
+ router.post('/cursor-config/default-model', createSetCursorDefaultModelHandler());
+ router.post('/cursor-config/models', createSetCursorModelsHandler());
+
+ // Cursor CLI Permissions routes
+ router.get('/cursor-permissions', createGetCursorPermissionsHandler());
+ router.post('/cursor-permissions/profile', createApplyPermissionProfileHandler());
+ router.post('/cursor-permissions/custom', createSetCustomPermissionsHandler());
+ router.delete('/cursor-permissions', createDeleteProjectPermissionsHandler());
+ router.get('/cursor-permissions/example', createGetExampleConfigHandler());
+
return router;
}
diff --git a/apps/server/src/routes/setup/routes/api-keys.ts b/apps/server/src/routes/setup/routes/api-keys.ts
index d052c187..047b6455 100644
--- a/apps/server/src/routes/setup/routes/api-keys.ts
+++ b/apps/server/src/routes/setup/routes/api-keys.ts
@@ -11,6 +11,7 @@ export function createApiKeysHandler() {
res.json({
success: true,
hasAnthropicKey: !!getApiKey('anthropic') || !!process.env.ANTHROPIC_API_KEY,
+ hasOpenaiKey: !!getApiKey('openai') || !!process.env.OPENAI_API_KEY,
});
} catch (error) {
logError(error, 'Get API keys failed');
diff --git a/apps/server/src/routes/setup/routes/auth-claude.ts b/apps/server/src/routes/setup/routes/auth-claude.ts
index 4531501d..97a170f4 100644
--- a/apps/server/src/routes/setup/routes/auth-claude.ts
+++ b/apps/server/src/routes/setup/routes/auth-claude.ts
@@ -4,19 +4,54 @@
import type { Request, Response } from 'express';
import { getErrorMessage, logError } from '../common.js';
+import { exec } from 'child_process';
+import { promisify } from 'util';
+import * as fs from 'fs';
+import * as path from 'path';
+
+const execAsync = promisify(exec);
export function createAuthClaudeHandler() {
return async (_req: Request, res: Response): Promise => {
try {
- res.json({
- success: true,
- requiresManualAuth: true,
- command: 'claude login',
- message: "Please run 'claude login' in your terminal to authenticate",
- });
+ // Remove the disconnected marker file to reconnect the app to the CLI
+ const markerPath = path.join(process.cwd(), '.automaker', '.claude-disconnected');
+ if (fs.existsSync(markerPath)) {
+ fs.unlinkSync(markerPath);
+ }
+
+ // Check if CLI is already authenticated by checking auth indicators
+ const { getClaudeAuthIndicators } = await import('@automaker/platform');
+ const indicators = await getClaudeAuthIndicators();
+ const isAlreadyAuthenticated =
+ indicators.hasStatsCacheWithActivity ||
+ (indicators.hasSettingsFile && indicators.hasProjectsSessions) ||
+ indicators.hasCredentialsFile;
+
+ if (isAlreadyAuthenticated) {
+ // CLI is already authenticated, just reconnect
+ res.json({
+ success: true,
+ message: 'Claude CLI is now linked with the app',
+ wasAlreadyAuthenticated: true,
+ });
+ } else {
+ // CLI needs authentication - but we can't run claude login here
+ // because it requires browser OAuth. Just reconnect and let the user authenticate if needed.
+ res.json({
+ success: true,
+ message:
+ 'Claude CLI is now linked with the app. If prompted, please authenticate with "claude login" in your terminal.',
+ requiresManualAuth: true,
+ });
+ }
} catch (error) {
logError(error, 'Auth Claude failed');
- res.status(500).json({ success: false, error: getErrorMessage(error) });
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ message: 'Failed to link Claude CLI with the app',
+ });
}
};
}
diff --git a/apps/server/src/routes/setup/routes/auth-codex.ts b/apps/server/src/routes/setup/routes/auth-codex.ts
new file mode 100644
index 00000000..79857bd8
--- /dev/null
+++ b/apps/server/src/routes/setup/routes/auth-codex.ts
@@ -0,0 +1,50 @@
+/**
+ * POST /auth-codex endpoint - Authenticate Codex CLI
+ */
+
+import type { Request, Response } from 'express';
+import { logError, getErrorMessage } from '../common.js';
+import * as fs from 'fs';
+import * as path from 'path';
+
+export function createAuthCodexHandler() {
+ return async (_req: Request, res: Response): Promise => {
+ try {
+ // Remove the disconnected marker file to reconnect the app to the CLI
+ const markerPath = path.join(process.cwd(), '.automaker', '.codex-disconnected');
+ if (fs.existsSync(markerPath)) {
+ fs.unlinkSync(markerPath);
+ }
+
+ // Use the same detection logic as the Codex provider
+ const { getCodexAuthIndicators } = await import('@automaker/platform');
+ const indicators = await getCodexAuthIndicators();
+
+ const isAlreadyAuthenticated =
+ indicators.hasApiKey || indicators.hasAuthFile || indicators.hasOAuthToken;
+
+ if (isAlreadyAuthenticated) {
+ // Already has authentication, just reconnect
+ res.json({
+ success: true,
+ message: 'Codex CLI is now linked with the app',
+ wasAlreadyAuthenticated: true,
+ });
+ } else {
+ res.json({
+ success: true,
+ message:
+ 'Codex CLI is now linked with the app. If prompted, please authenticate with "codex login" in your terminal.',
+ requiresManualAuth: true,
+ });
+ }
+ } catch (error) {
+ logError(error, 'Auth Codex failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ message: 'Failed to link Codex CLI with the app',
+ });
+ }
+ };
+}
diff --git a/apps/server/src/routes/setup/routes/auth-cursor.ts b/apps/server/src/routes/setup/routes/auth-cursor.ts
new file mode 100644
index 00000000..fbd6339c
--- /dev/null
+++ b/apps/server/src/routes/setup/routes/auth-cursor.ts
@@ -0,0 +1,73 @@
+/**
+ * POST /auth-cursor endpoint - Authenticate Cursor CLI
+ */
+
+import type { Request, Response } from 'express';
+import { logError, getErrorMessage } from '../common.js';
+import * as fs from 'fs';
+import * as path from 'path';
+import os from 'os';
+
+export function createAuthCursorHandler() {
+ return async (_req: Request, res: Response): Promise => {
+ try {
+ // Remove the disconnected marker file to reconnect the app to the CLI
+ const markerPath = path.join(process.cwd(), '.automaker', '.cursor-disconnected');
+ if (fs.existsSync(markerPath)) {
+ fs.unlinkSync(markerPath);
+ }
+
+ // Check if Cursor is already authenticated using the same logic as CursorProvider
+ const isAlreadyAuthenticated = (): boolean => {
+ // Check for API key in environment
+ if (process.env.CURSOR_API_KEY) {
+ return true;
+ }
+
+ // Check for credentials files
+ const credentialPaths = [
+ path.join(os.homedir(), '.cursor', 'credentials.json'),
+ path.join(os.homedir(), '.config', 'cursor', 'credentials.json'),
+ ];
+
+ for (const credPath of credentialPaths) {
+ if (fs.existsSync(credPath)) {
+ try {
+ const content = fs.readFileSync(credPath, 'utf8');
+ const creds = JSON.parse(content);
+ if (creds.accessToken || creds.token) {
+ return true;
+ }
+ } catch {
+ // Invalid credentials file, continue checking
+ }
+ }
+ }
+
+ return false;
+ };
+
+ if (isAlreadyAuthenticated()) {
+ res.json({
+ success: true,
+ message: 'Cursor CLI is now linked with the app',
+ wasAlreadyAuthenticated: true,
+ });
+ } else {
+ res.json({
+ success: true,
+ message:
+ 'Cursor CLI is now linked with the app. If prompted, please authenticate with "cursor auth" in your terminal.',
+ requiresManualAuth: true,
+ });
+ }
+ } catch (error) {
+ logError(error, 'Auth Cursor failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ message: 'Failed to link Cursor CLI with the app',
+ });
+ }
+ };
+}
diff --git a/apps/server/src/routes/setup/routes/auth-opencode.ts b/apps/server/src/routes/setup/routes/auth-opencode.ts
new file mode 100644
index 00000000..7d7f35e2
--- /dev/null
+++ b/apps/server/src/routes/setup/routes/auth-opencode.ts
@@ -0,0 +1,51 @@
+/**
+ * POST /auth-opencode endpoint - Authenticate OpenCode CLI
+ */
+
+import type { Request, Response } from 'express';
+import { logError, getErrorMessage } from '../common.js';
+import { exec } from 'child_process';
+import { promisify } from 'util';
+import * as fs from 'fs';
+import * as path from 'path';
+
+const execAsync = promisify(exec);
+
+export function createAuthOpencodeHandler() {
+ return async (_req: Request, res: Response): Promise => {
+ try {
+ // Remove the disconnected marker file to reconnect the app to the CLI
+ const markerPath = path.join(process.cwd(), '.automaker', '.opencode-disconnected');
+ if (fs.existsSync(markerPath)) {
+ fs.unlinkSync(markerPath);
+ }
+
+ // Check if OpenCode is already authenticated
+ // For OpenCode, check if there's an auth token or API key
+ const hasApiKey = !!process.env.OPENCODE_API_KEY;
+
+ if (hasApiKey) {
+ // Already has authentication, just reconnect
+ res.json({
+ success: true,
+ message: 'OpenCode CLI is now linked with the app',
+ wasAlreadyAuthenticated: true,
+ });
+ } else {
+ res.json({
+ success: true,
+ message:
+ 'OpenCode CLI is now linked with the app. If prompted, please authenticate with OpenCode.',
+ requiresManualAuth: true,
+ });
+ }
+ } catch (error) {
+ logError(error, 'Auth OpenCode failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ message: 'Failed to link OpenCode CLI with the app',
+ });
+ }
+ };
+}
diff --git a/apps/server/src/routes/setup/routes/codex-status.ts b/apps/server/src/routes/setup/routes/codex-status.ts
new file mode 100644
index 00000000..6e721e05
--- /dev/null
+++ b/apps/server/src/routes/setup/routes/codex-status.ts
@@ -0,0 +1,81 @@
+/**
+ * GET /codex-status endpoint - Get Codex CLI installation and auth status
+ */
+
+import type { Request, Response } from 'express';
+import { CodexProvider } from '../../../providers/codex-provider.js';
+import { getErrorMessage, logError } from '../common.js';
+import * as fs from 'fs';
+import * as path from 'path';
+
+const DISCONNECTED_MARKER_FILE = '.codex-disconnected';
+
+function isCodexDisconnectedFromApp(): boolean {
+ try {
+ const projectRoot = process.cwd();
+ const markerPath = path.join(projectRoot, '.automaker', DISCONNECTED_MARKER_FILE);
+ return fs.existsSync(markerPath);
+ } catch {
+ return false;
+ }
+}
+
+/**
+ * Creates handler for GET /api/setup/codex-status
+ * Returns Codex CLI installation and authentication status
+ */
+export function createCodexStatusHandler() {
+ const installCommand = 'npm install -g @openai/codex';
+ const loginCommand = 'codex login';
+
+ return async (_req: Request, res: Response): Promise => {
+ try {
+ // Check if user has manually disconnected from the app
+ if (isCodexDisconnectedFromApp()) {
+ res.json({
+ success: true,
+ installed: true,
+ version: null,
+ path: null,
+ auth: {
+ authenticated: false,
+ method: 'none',
+ hasApiKey: false,
+ },
+ installCommand,
+ loginCommand,
+ });
+ return;
+ }
+
+ const provider = new CodexProvider();
+ const status = await provider.detectInstallation();
+
+ // Derive auth method from authenticated status and API key presence
+ let authMethod = 'none';
+ if (status.authenticated) {
+ authMethod = status.hasApiKey ? 'api_key_env' : 'cli_authenticated';
+ }
+
+ res.json({
+ success: true,
+ installed: status.installed,
+ version: status.version || null,
+ path: status.path || null,
+ auth: {
+ authenticated: status.authenticated || false,
+ method: authMethod,
+ hasApiKey: status.hasApiKey || false,
+ },
+ installCommand,
+ loginCommand,
+ });
+ } catch (error) {
+ logError(error, 'Get Codex status failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
diff --git a/apps/server/src/routes/setup/routes/cursor-config.ts b/apps/server/src/routes/setup/routes/cursor-config.ts
new file mode 100644
index 00000000..8b9c05ce
--- /dev/null
+++ b/apps/server/src/routes/setup/routes/cursor-config.ts
@@ -0,0 +1,411 @@
+/**
+ * Cursor CLI configuration routes
+ *
+ * Provides endpoints for managing Cursor CLI configuration:
+ * - GET /api/setup/cursor-config - Get current configuration
+ * - POST /api/setup/cursor-config/default-model - Set default model
+ * - POST /api/setup/cursor-config/models - Set enabled models
+ *
+ * Cursor CLI Permissions endpoints:
+ * - GET /api/setup/cursor-permissions - Get permissions config
+ * - POST /api/setup/cursor-permissions/profile - Apply a permission profile
+ * - POST /api/setup/cursor-permissions/custom - Set custom permissions
+ * - DELETE /api/setup/cursor-permissions - Delete project permissions (use global)
+ */
+
+import type { Request, Response } from 'express';
+import path from 'path';
+import { CursorConfigManager } from '../../../providers/cursor-config-manager.js';
+import {
+ CURSOR_MODEL_MAP,
+ CURSOR_PERMISSION_PROFILES,
+ type CursorModelId,
+ type CursorPermissionProfile,
+ type CursorCliPermissions,
+} from '@automaker/types';
+import {
+ readGlobalConfig,
+ readProjectConfig,
+ getEffectivePermissions,
+ applyProfileToProject,
+ applyProfileGlobally,
+ writeProjectConfig,
+ deleteProjectConfig,
+ detectProfile,
+ hasProjectConfig,
+ getAvailableProfiles,
+ generateExampleConfig,
+} from '../../../services/cursor-config-service.js';
+import { getErrorMessage, logError } from '../common.js';
+
+/**
+ * Validate that a project path is safe (no path traversal)
+ * @throws Error if path contains traversal sequences
+ */
+function validateProjectPath(projectPath: string): void {
+ // Resolve to absolute path and check for traversal
+ const resolved = path.resolve(projectPath);
+ const normalized = path.normalize(projectPath);
+
+ // Check for obvious traversal attempts
+ if (normalized.includes('..') || projectPath.includes('..')) {
+ throw new Error('Invalid project path: path traversal not allowed');
+ }
+
+ // Ensure the resolved path doesn't escape intended boundaries
+ // by checking if it starts with the normalized path components
+ if (!resolved.startsWith(path.resolve(normalized))) {
+ throw new Error('Invalid project path: path traversal detected');
+ }
+}
+
+/**
+ * Creates handler for GET /api/setup/cursor-config
+ * Returns current Cursor configuration and available models
+ */
+export function createGetCursorConfigHandler() {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const projectPath = req.query.projectPath as string;
+
+ if (!projectPath) {
+ res.status(400).json({
+ success: false,
+ error: 'projectPath query parameter is required',
+ });
+ return;
+ }
+
+ // Validate path to prevent traversal attacks
+ validateProjectPath(projectPath);
+
+ const configManager = new CursorConfigManager(projectPath);
+
+ res.json({
+ success: true,
+ config: configManager.getConfig(),
+ availableModels: Object.values(CURSOR_MODEL_MAP),
+ });
+ } catch (error) {
+ logError(error, 'Get Cursor config failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
+
+/**
+ * Creates handler for POST /api/setup/cursor-config/default-model
+ * Sets the default Cursor model
+ */
+export function createSetCursorDefaultModelHandler() {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { model, projectPath } = req.body;
+
+ if (!projectPath) {
+ res.status(400).json({
+ success: false,
+ error: 'projectPath is required',
+ });
+ return;
+ }
+
+ // Validate path to prevent traversal attacks
+ validateProjectPath(projectPath);
+
+ if (!model || !(model in CURSOR_MODEL_MAP)) {
+ res.status(400).json({
+ success: false,
+ error: `Invalid model ID. Valid models: ${Object.keys(CURSOR_MODEL_MAP).join(', ')}`,
+ });
+ return;
+ }
+
+ const configManager = new CursorConfigManager(projectPath);
+ configManager.setDefaultModel(model as CursorModelId);
+
+ res.json({ success: true, model });
+ } catch (error) {
+ logError(error, 'Set Cursor default model failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
+
+/**
+ * Creates handler for POST /api/setup/cursor-config/models
+ * Sets the enabled Cursor models list
+ */
+export function createSetCursorModelsHandler() {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { models, projectPath } = req.body;
+
+ if (!projectPath) {
+ res.status(400).json({
+ success: false,
+ error: 'projectPath is required',
+ });
+ return;
+ }
+
+ // Validate path to prevent traversal attacks
+ validateProjectPath(projectPath);
+
+ if (!Array.isArray(models)) {
+ res.status(400).json({
+ success: false,
+ error: 'Models must be an array',
+ });
+ return;
+ }
+
+ // Filter to valid models only
+ const validModels = models.filter((m): m is CursorModelId => m in CURSOR_MODEL_MAP);
+
+ if (validModels.length === 0) {
+ res.status(400).json({
+ success: false,
+ error: 'No valid models provided',
+ });
+ return;
+ }
+
+ const configManager = new CursorConfigManager(projectPath);
+ configManager.setEnabledModels(validModels);
+
+ res.json({ success: true, models: validModels });
+ } catch (error) {
+ logError(error, 'Set Cursor models failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
+
+// =============================================================================
+// Cursor CLI Permissions Handlers
+// =============================================================================
+
+/**
+ * Creates handler for GET /api/setup/cursor-permissions
+ * Returns current permissions configuration and available profiles
+ */
+export function createGetCursorPermissionsHandler() {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const projectPath = req.query.projectPath as string | undefined;
+
+ // Validate path if provided
+ if (projectPath) {
+ validateProjectPath(projectPath);
+ }
+
+ // Get global config
+ const globalConfig = await readGlobalConfig();
+
+ // Get project config if path provided
+ const projectConfig = projectPath ? await readProjectConfig(projectPath) : null;
+
+ // Get effective permissions
+ const effectivePermissions = await getEffectivePermissions(projectPath);
+
+ // Detect which profile is active
+ const activeProfile = detectProfile(effectivePermissions);
+
+ // Check if project has its own config
+ const hasProject = projectPath ? await hasProjectConfig(projectPath) : false;
+
+ res.json({
+ success: true,
+ globalPermissions: globalConfig?.permissions || null,
+ projectPermissions: projectConfig?.permissions || null,
+ effectivePermissions,
+ activeProfile,
+ hasProjectConfig: hasProject,
+ availableProfiles: getAvailableProfiles(),
+ });
+ } catch (error) {
+ logError(error, 'Get Cursor permissions failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
+
+/**
+ * Creates handler for POST /api/setup/cursor-permissions/profile
+ * Applies a predefined permission profile
+ */
+export function createApplyPermissionProfileHandler() {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { profileId, projectPath, scope } = req.body as {
+ profileId: CursorPermissionProfile;
+ projectPath?: string;
+ scope: 'global' | 'project';
+ };
+
+ // Validate profile
+ const validProfiles = CURSOR_PERMISSION_PROFILES.map((p) => p.id);
+ if (!validProfiles.includes(profileId)) {
+ res.status(400).json({
+ success: false,
+ error: `Invalid profile. Valid profiles: ${validProfiles.join(', ')}`,
+ });
+ return;
+ }
+
+ if (scope === 'project') {
+ if (!projectPath) {
+ res.status(400).json({
+ success: false,
+ error: 'projectPath is required for project scope',
+ });
+ return;
+ }
+ // Validate path to prevent traversal attacks
+ validateProjectPath(projectPath);
+ await applyProfileToProject(projectPath, profileId);
+ } else {
+ await applyProfileGlobally(profileId);
+ }
+
+ res.json({
+ success: true,
+ message: `Applied "${profileId}" profile to ${scope}`,
+ scope,
+ profileId,
+ });
+ } catch (error) {
+ logError(error, 'Apply Cursor permission profile failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
+
+/**
+ * Creates handler for POST /api/setup/cursor-permissions/custom
+ * Sets custom permissions for a project
+ */
+export function createSetCustomPermissionsHandler() {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath, permissions } = req.body as {
+ projectPath: string;
+ permissions: CursorCliPermissions;
+ };
+
+ if (!projectPath) {
+ res.status(400).json({
+ success: false,
+ error: 'projectPath is required',
+ });
+ return;
+ }
+
+ // Validate path to prevent traversal attacks
+ validateProjectPath(projectPath);
+
+ if (!permissions || !Array.isArray(permissions.allow) || !Array.isArray(permissions.deny)) {
+ res.status(400).json({
+ success: false,
+ error: 'permissions must have allow and deny arrays',
+ });
+ return;
+ }
+
+ await writeProjectConfig(projectPath, {
+ version: 1,
+ permissions,
+ });
+
+ res.json({
+ success: true,
+ message: 'Custom permissions saved',
+ permissions,
+ });
+ } catch (error) {
+ logError(error, 'Set custom Cursor permissions failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
+
+/**
+ * Creates handler for DELETE /api/setup/cursor-permissions
+ * Deletes project-level permissions (falls back to global)
+ */
+export function createDeleteProjectPermissionsHandler() {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const projectPath = req.query.projectPath as string;
+
+ if (!projectPath) {
+ res.status(400).json({
+ success: false,
+ error: 'projectPath query parameter is required',
+ });
+ return;
+ }
+
+ // Validate path to prevent traversal attacks
+ validateProjectPath(projectPath);
+
+ await deleteProjectConfig(projectPath);
+
+ res.json({
+ success: true,
+ message: 'Project permissions deleted, using global config',
+ });
+ } catch (error) {
+ logError(error, 'Delete Cursor project permissions failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
+
+/**
+ * Creates handler for GET /api/setup/cursor-permissions/example
+ * Returns an example config file for a profile
+ */
+export function createGetExampleConfigHandler() {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const profileId = (req.query.profileId as CursorPermissionProfile) || 'development';
+
+ const exampleConfig = generateExampleConfig(profileId);
+
+ res.json({
+ success: true,
+ profileId,
+ config: exampleConfig,
+ });
+ } catch (error) {
+ logError(error, 'Get example Cursor config failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
diff --git a/apps/server/src/routes/setup/routes/cursor-status.ts b/apps/server/src/routes/setup/routes/cursor-status.ts
new file mode 100644
index 00000000..f9349aa7
--- /dev/null
+++ b/apps/server/src/routes/setup/routes/cursor-status.ts
@@ -0,0 +1,88 @@
+/**
+ * GET /cursor-status endpoint - Get Cursor CLI installation and auth status
+ */
+
+import type { Request, Response } from 'express';
+import { CursorProvider } from '../../../providers/cursor-provider.js';
+import { getErrorMessage, logError } from '../common.js';
+import * as fs from 'fs';
+import * as path from 'path';
+
+const DISCONNECTED_MARKER_FILE = '.cursor-disconnected';
+
+function isCursorDisconnectedFromApp(): boolean {
+ try {
+ const projectRoot = process.cwd();
+ const markerPath = path.join(projectRoot, '.automaker', DISCONNECTED_MARKER_FILE);
+ return fs.existsSync(markerPath);
+ } catch {
+ return false;
+ }
+}
+
+/**
+ * Creates handler for GET /api/setup/cursor-status
+ * Returns Cursor CLI installation and authentication status
+ */
+export function createCursorStatusHandler() {
+ const installCommand = 'curl https://cursor.com/install -fsS | bash';
+ const loginCommand = 'cursor-agent login';
+
+ return async (_req: Request, res: Response): Promise => {
+ try {
+ // Check if user has manually disconnected from the app
+ if (isCursorDisconnectedFromApp()) {
+ const provider = new CursorProvider();
+ const [installed, version] = await Promise.all([
+ provider.isInstalled(),
+ provider.getVersion(),
+ ]);
+ const cliPath = installed ? provider.getCliPath() : null;
+
+ res.json({
+ success: true,
+ installed,
+ version: version || null,
+ path: cliPath,
+ auth: {
+ authenticated: false,
+ method: 'none',
+ },
+ installCommand,
+ loginCommand,
+ });
+ return;
+ }
+
+ const provider = new CursorProvider();
+
+ const [installed, version, auth] = await Promise.all([
+ provider.isInstalled(),
+ provider.getVersion(),
+ provider.checkAuth(),
+ ]);
+
+ // Get CLI path from provider using public accessor
+ const cliPath = installed ? provider.getCliPath() : null;
+
+ res.json({
+ success: true,
+ installed,
+ version: version || null,
+ path: cliPath,
+ auth: {
+ authenticated: auth.authenticated,
+ method: auth.method,
+ },
+ installCommand,
+ loginCommand,
+ });
+ } catch (error) {
+ logError(error, 'Get Cursor status failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
diff --git a/apps/server/src/routes/setup/routes/deauth-claude.ts b/apps/server/src/routes/setup/routes/deauth-claude.ts
new file mode 100644
index 00000000..8f3c1930
--- /dev/null
+++ b/apps/server/src/routes/setup/routes/deauth-claude.ts
@@ -0,0 +1,44 @@
+/**
+ * POST /deauth-claude endpoint - Sign out from Claude CLI
+ */
+
+import type { Request, Response } from 'express';
+import { getErrorMessage, logError } from '../common.js';
+import * as fs from 'fs';
+import * as path from 'path';
+
+export function createDeauthClaudeHandler() {
+ return async (_req: Request, res: Response): Promise => {
+ try {
+ // Create a marker file to indicate the CLI is disconnected from the app
+ const automakerDir = path.join(process.cwd(), '.automaker');
+ const markerPath = path.join(automakerDir, '.claude-disconnected');
+
+ // Ensure .automaker directory exists
+ if (!fs.existsSync(automakerDir)) {
+ fs.mkdirSync(automakerDir, { recursive: true });
+ }
+
+ // Create the marker file with timestamp
+ fs.writeFileSync(
+ markerPath,
+ JSON.stringify({
+ disconnectedAt: new Date().toISOString(),
+ message: 'Claude CLI is disconnected from the app',
+ })
+ );
+
+ res.json({
+ success: true,
+ message: 'Claude CLI is now disconnected from the app',
+ });
+ } catch (error) {
+ logError(error, 'Deauth Claude failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ message: 'Failed to disconnect Claude CLI from the app',
+ });
+ }
+ };
+}
diff --git a/apps/server/src/routes/setup/routes/deauth-codex.ts b/apps/server/src/routes/setup/routes/deauth-codex.ts
new file mode 100644
index 00000000..f44a6e15
--- /dev/null
+++ b/apps/server/src/routes/setup/routes/deauth-codex.ts
@@ -0,0 +1,44 @@
+/**
+ * POST /deauth-codex endpoint - Sign out from Codex CLI
+ */
+
+import type { Request, Response } from 'express';
+import { logError, getErrorMessage } from '../common.js';
+import * as fs from 'fs';
+import * as path from 'path';
+
+export function createDeauthCodexHandler() {
+ return async (_req: Request, res: Response): Promise => {
+ try {
+ // Create a marker file to indicate the CLI is disconnected from the app
+ const automakerDir = path.join(process.cwd(), '.automaker');
+ const markerPath = path.join(automakerDir, '.codex-disconnected');
+
+ // Ensure .automaker directory exists
+ if (!fs.existsSync(automakerDir)) {
+ fs.mkdirSync(automakerDir, { recursive: true });
+ }
+
+ // Create the marker file with timestamp
+ fs.writeFileSync(
+ markerPath,
+ JSON.stringify({
+ disconnectedAt: new Date().toISOString(),
+ message: 'Codex CLI is disconnected from the app',
+ })
+ );
+
+ res.json({
+ success: true,
+ message: 'Codex CLI is now disconnected from the app',
+ });
+ } catch (error) {
+ logError(error, 'Deauth Codex failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ message: 'Failed to disconnect Codex CLI from the app',
+ });
+ }
+ };
+}
diff --git a/apps/server/src/routes/setup/routes/deauth-cursor.ts b/apps/server/src/routes/setup/routes/deauth-cursor.ts
new file mode 100644
index 00000000..303b2006
--- /dev/null
+++ b/apps/server/src/routes/setup/routes/deauth-cursor.ts
@@ -0,0 +1,44 @@
+/**
+ * POST /deauth-cursor endpoint - Sign out from Cursor CLI
+ */
+
+import type { Request, Response } from 'express';
+import { logError, getErrorMessage } from '../common.js';
+import * as fs from 'fs';
+import * as path from 'path';
+
+export function createDeauthCursorHandler() {
+ return async (_req: Request, res: Response): Promise => {
+ try {
+ // Create a marker file to indicate the CLI is disconnected from the app
+ const automakerDir = path.join(process.cwd(), '.automaker');
+ const markerPath = path.join(automakerDir, '.cursor-disconnected');
+
+ // Ensure .automaker directory exists
+ if (!fs.existsSync(automakerDir)) {
+ fs.mkdirSync(automakerDir, { recursive: true });
+ }
+
+ // Create the marker file with timestamp
+ fs.writeFileSync(
+ markerPath,
+ JSON.stringify({
+ disconnectedAt: new Date().toISOString(),
+ message: 'Cursor CLI is disconnected from the app',
+ })
+ );
+
+ res.json({
+ success: true,
+ message: 'Cursor CLI is now disconnected from the app',
+ });
+ } catch (error) {
+ logError(error, 'Deauth Cursor failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ message: 'Failed to disconnect Cursor CLI from the app',
+ });
+ }
+ };
+}
diff --git a/apps/server/src/routes/setup/routes/deauth-opencode.ts b/apps/server/src/routes/setup/routes/deauth-opencode.ts
new file mode 100644
index 00000000..b0567bc8
--- /dev/null
+++ b/apps/server/src/routes/setup/routes/deauth-opencode.ts
@@ -0,0 +1,40 @@
+import type { Request, Response } from 'express';
+import { logError, getErrorMessage } from '../common.js';
+import * as fs from 'fs';
+import * as path from 'path';
+
+export function createDeauthOpencodeHandler() {
+ return async (_req: Request, res: Response): Promise => {
+ try {
+ // Create a marker file to indicate the CLI is disconnected from the app
+ const automakerDir = path.join(process.cwd(), '.automaker');
+ const markerPath = path.join(automakerDir, '.opencode-disconnected');
+
+ // Ensure .automaker directory exists
+ if (!fs.existsSync(automakerDir)) {
+ fs.mkdirSync(automakerDir, { recursive: true });
+ }
+
+ // Create the marker file with timestamp
+ fs.writeFileSync(
+ markerPath,
+ JSON.stringify({
+ disconnectedAt: new Date().toISOString(),
+ message: 'OpenCode CLI is disconnected from the app',
+ })
+ );
+
+ res.json({
+ success: true,
+ message: 'OpenCode CLI is now disconnected from the app',
+ });
+ } catch (error) {
+ logError(error, 'Deauth OpenCode failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ message: 'Failed to disconnect OpenCode CLI from the app',
+ });
+ }
+ };
+}
diff --git a/apps/server/src/routes/setup/routes/delete-api-key.ts b/apps/server/src/routes/setup/routes/delete-api-key.ts
index 0fee1b8b..242425fb 100644
--- a/apps/server/src/routes/setup/routes/delete-api-key.ts
+++ b/apps/server/src/routes/setup/routes/delete-api-key.ts
@@ -46,13 +46,14 @@ export function createDeleteApiKeyHandler() {
// Map provider to env key name
const envKeyMap: Record = {
anthropic: 'ANTHROPIC_API_KEY',
+ openai: 'OPENAI_API_KEY',
};
const envKey = envKeyMap[provider];
if (!envKey) {
res.status(400).json({
success: false,
- error: `Unknown provider: ${provider}. Only anthropic is supported.`,
+ error: `Unknown provider: ${provider}. Only anthropic and openai are supported.`,
});
return;
}
diff --git a/apps/server/src/routes/setup/routes/install-codex.ts b/apps/server/src/routes/setup/routes/install-codex.ts
new file mode 100644
index 00000000..ea40e92d
--- /dev/null
+++ b/apps/server/src/routes/setup/routes/install-codex.ts
@@ -0,0 +1,33 @@
+/**
+ * POST /install-codex endpoint - Install Codex CLI
+ */
+
+import type { Request, Response } from 'express';
+import { logError, getErrorMessage } from '../common.js';
+
+/**
+ * Creates handler for POST /api/setup/install-codex
+ * Installs Codex CLI (currently returns instructions for manual install)
+ */
+export function createInstallCodexHandler() {
+ return async (_req: Request, res: Response): Promise => {
+ try {
+ // For now, return manual installation instructions
+ // In the future, this could potentially trigger npm global install
+ const installCommand = 'npm install -g @openai/codex';
+
+ res.json({
+ success: true,
+ message: `Please install Codex CLI manually by running: ${installCommand}`,
+ requiresManualInstall: true,
+ installCommand,
+ });
+ } catch (error) {
+ logError(error, 'Install Codex failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
diff --git a/apps/server/src/routes/setup/routes/opencode-models.ts b/apps/server/src/routes/setup/routes/opencode-models.ts
new file mode 100644
index 00000000..a3b2b7be
--- /dev/null
+++ b/apps/server/src/routes/setup/routes/opencode-models.ts
@@ -0,0 +1,189 @@
+/**
+ * OpenCode Dynamic Models API Routes
+ *
+ * Provides endpoints for:
+ * - GET /api/setup/opencode/models - Get available models (cached or refreshed)
+ * - POST /api/setup/opencode/models/refresh - Force refresh models from CLI
+ * - GET /api/setup/opencode/providers - Get authenticated providers
+ */
+
+import type { Request, Response } from 'express';
+import {
+ OpencodeProvider,
+ type OpenCodeProviderInfo,
+} from '../../../providers/opencode-provider.js';
+import { getErrorMessage, logError } from '../common.js';
+import type { ModelDefinition } from '@automaker/types';
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('OpenCodeModelsRoute');
+
+// Singleton provider instance for caching
+let providerInstance: OpencodeProvider | null = null;
+
+function getProvider(): OpencodeProvider {
+ if (!providerInstance) {
+ providerInstance = new OpencodeProvider();
+ }
+ return providerInstance;
+}
+
+/**
+ * Response type for models endpoint
+ */
+interface ModelsResponse {
+ success: boolean;
+ models?: ModelDefinition[];
+ count?: number;
+ cached?: boolean;
+ error?: string;
+}
+
+/**
+ * Response type for providers endpoint
+ */
+interface ProvidersResponse {
+ success: boolean;
+ providers?: OpenCodeProviderInfo[];
+ authenticated?: OpenCodeProviderInfo[];
+ error?: string;
+}
+
+/**
+ * Creates handler for GET /api/setup/opencode/models
+ *
+ * Returns currently available models (from cache if available).
+ * Query params:
+ * - refresh=true: Force refresh from CLI before returning
+ *
+ * Note: If cache is empty, this will trigger a refresh to get dynamic models.
+ */
+export function createGetOpencodeModelsHandler() {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const provider = getProvider();
+ const forceRefresh = req.query.refresh === 'true';
+
+ let models: ModelDefinition[];
+ let cached = true;
+
+ if (forceRefresh) {
+ models = await provider.refreshModels();
+ cached = false;
+ } else {
+ // Check if we have cached models
+ const cachedModels = provider.getAvailableModels();
+
+ // If cache only has default models (provider.hasCachedModels() would be false),
+ // trigger a refresh to get dynamic models
+ if (!provider.hasCachedModels()) {
+ models = await provider.refreshModels();
+ cached = false;
+ } else {
+ models = cachedModels;
+ }
+ }
+
+ const response: ModelsResponse = {
+ success: true,
+ models,
+ count: models.length,
+ cached,
+ };
+
+ res.json(response);
+ } catch (error) {
+ logError(error, 'Get OpenCode models failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ } as ModelsResponse);
+ }
+ };
+}
+
+/**
+ * Creates handler for POST /api/setup/opencode/models/refresh
+ *
+ * Forces a refresh of models from the OpenCode CLI.
+ */
+export function createRefreshOpencodeModelsHandler() {
+ return async (_req: Request, res: Response): Promise => {
+ try {
+ const provider = getProvider();
+ const models = await provider.refreshModels();
+
+ const response: ModelsResponse = {
+ success: true,
+ models,
+ count: models.length,
+ cached: false,
+ };
+
+ res.json(response);
+ } catch (error) {
+ logError(error, 'Refresh OpenCode models failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ } as ModelsResponse);
+ }
+ };
+}
+
+/**
+ * Creates handler for GET /api/setup/opencode/providers
+ *
+ * Returns authenticated providers from OpenCode CLI.
+ * This calls `opencode auth list` to get provider status.
+ */
+export function createGetOpencodeProvidersHandler() {
+ return async (_req: Request, res: Response): Promise => {
+ try {
+ const provider = getProvider();
+ const providers = await provider.fetchAuthenticatedProviders();
+
+ // Filter to only authenticated providers
+ const authenticated = providers.filter((p) => p.authenticated);
+
+ const response: ProvidersResponse = {
+ success: true,
+ providers,
+ authenticated,
+ };
+
+ res.json(response);
+ } catch (error) {
+ logError(error, 'Get OpenCode providers failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ } as ProvidersResponse);
+ }
+ };
+}
+
+/**
+ * Creates handler for POST /api/setup/opencode/cache/clear
+ *
+ * Clears the model cache, forcing a fresh fetch on next access.
+ */
+export function createClearOpencodeCacheHandler() {
+ return async (_req: Request, res: Response): Promise => {
+ try {
+ const provider = getProvider();
+ provider.clearModelCache();
+
+ res.json({
+ success: true,
+ message: 'OpenCode model cache cleared',
+ });
+ } catch (error) {
+ logError(error, 'Clear OpenCode cache failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
diff --git a/apps/server/src/routes/setup/routes/opencode-status.ts b/apps/server/src/routes/setup/routes/opencode-status.ts
new file mode 100644
index 00000000..f474cfb1
--- /dev/null
+++ b/apps/server/src/routes/setup/routes/opencode-status.ts
@@ -0,0 +1,59 @@
+/**
+ * GET /opencode-status endpoint - Get OpenCode CLI installation and auth status
+ */
+
+import type { Request, Response } from 'express';
+import { OpencodeProvider } from '../../../providers/opencode-provider.js';
+import { getErrorMessage, logError } from '../common.js';
+
+/**
+ * Creates handler for GET /api/setup/opencode-status
+ * Returns OpenCode CLI installation and authentication status
+ */
+export function createOpencodeStatusHandler() {
+ const installCommand = 'curl -fsSL https://opencode.ai/install | bash';
+ const loginCommand = 'opencode auth login';
+
+ return async (_req: Request, res: Response): Promise => {
+ try {
+ const provider = new OpencodeProvider();
+ const status = await provider.detectInstallation();
+
+ // Derive auth method from authenticated status and API key presence
+ let authMethod = 'none';
+ if (status.authenticated) {
+ authMethod = status.hasApiKey ? 'api_key_env' : 'cli_authenticated';
+ }
+
+ res.json({
+ success: true,
+ installed: status.installed,
+ version: status.version || null,
+ path: status.path || null,
+ auth: {
+ authenticated: status.authenticated || false,
+ method: authMethod,
+ hasApiKey: status.hasApiKey || false,
+ hasEnvApiKey: !!process.env.ANTHROPIC_API_KEY || !!process.env.OPENAI_API_KEY,
+ hasOAuthToken: status.hasOAuthToken || false,
+ },
+ recommendation: status.installed
+ ? undefined
+ : 'Install OpenCode CLI to use multi-provider AI models.',
+ installCommand,
+ loginCommand,
+ installCommands: {
+ macos: installCommand,
+ linux: installCommand,
+ npm: 'npm install -g opencode-ai',
+ },
+ });
+ } catch (error) {
+ logError(error, 'Get OpenCode status failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
diff --git a/apps/server/src/routes/setup/routes/verify-claude-auth.ts b/apps/server/src/routes/setup/routes/verify-claude-auth.ts
index 5debc5c7..df04d462 100644
--- a/apps/server/src/routes/setup/routes/verify-claude-auth.ts
+++ b/apps/server/src/routes/setup/routes/verify-claude-auth.ts
@@ -7,8 +7,16 @@ import type { Request, Response } from 'express';
import { query } from '@anthropic-ai/claude-agent-sdk';
import { createLogger } from '@automaker/utils';
import { getApiKey } from '../common.js';
+import {
+ createSecureAuthEnv,
+ AuthSessionManager,
+ AuthRateLimiter,
+ validateApiKey,
+ createTempEnvOverride,
+} from '../../../lib/auth-utils.js';
const logger = createLogger('Setup');
+const rateLimiter = new AuthRateLimiter();
// Known error patterns that indicate auth failure
const AUTH_ERROR_PATTERNS = [
@@ -71,10 +79,28 @@ function containsAuthError(text: string): boolean {
export function createVerifyClaudeAuthHandler() {
return async (req: Request, res: Response): Promise => {
try {
- // Get the auth method from the request body
- const { authMethod } = req.body as { authMethod?: 'cli' | 'api_key' };
+ // Get the auth method and optional API key from the request body
+ const { authMethod, apiKey } = req.body as {
+ authMethod?: 'cli' | 'api_key';
+ apiKey?: string;
+ };
- logger.info(`[Setup] Verifying Claude authentication using method: ${authMethod || 'auto'}`);
+ // Rate limiting to prevent abuse
+ const clientIp = req.ip || req.socket.remoteAddress || 'unknown';
+ if (!rateLimiter.canAttempt(clientIp)) {
+ const resetTime = rateLimiter.getResetTime(clientIp);
+ res.status(429).json({
+ success: false,
+ authenticated: false,
+ error: 'Too many authentication attempts. Please try again later.',
+ resetTime,
+ });
+ return;
+ }
+
+ logger.info(
+ `[Setup] Verifying Claude authentication using method: ${authMethod || 'auto'}${apiKey ? ' (with provided key)' : ''}`
+ );
// Create an AbortController with a 30-second timeout
const abortController = new AbortController();
@@ -84,34 +110,48 @@ export function createVerifyClaudeAuthHandler() {
let errorMessage = '';
let receivedAnyContent = false;
- // Save original env values
- const originalAnthropicKey = process.env.ANTHROPIC_API_KEY;
+ // Create secure auth session
+ const sessionId = `claude-auth-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
try {
- // Configure environment based on auth method
- if (authMethod === 'cli') {
- // For CLI verification, remove any API key so it uses CLI credentials only
- delete process.env.ANTHROPIC_API_KEY;
- logger.info('[Setup] Cleared API key environment for CLI verification');
- } else if (authMethod === 'api_key') {
- // For API key verification, ensure we're using the stored API key
- const storedApiKey = getApiKey('anthropic');
- if (storedApiKey) {
- process.env.ANTHROPIC_API_KEY = storedApiKey;
- logger.info('[Setup] Using stored API key for verification');
- } else {
- // Check env var
- if (!process.env.ANTHROPIC_API_KEY) {
- res.json({
- success: true,
- authenticated: false,
- error: 'No API key configured. Please enter an API key first.',
- });
- return;
- }
+ // For API key verification, validate the key first
+ if (authMethod === 'api_key' && apiKey) {
+ const validation = validateApiKey(apiKey, 'anthropic');
+ if (!validation.isValid) {
+ res.json({
+ success: true,
+ authenticated: false,
+ error: validation.error,
+ });
+ return;
}
}
+ // Create secure environment without modifying process.env
+ const authEnv = createSecureAuthEnv(authMethod || 'api_key', apiKey, 'anthropic');
+
+ // For API key verification without provided key, use stored key or env var
+ if (authMethod === 'api_key' && !apiKey) {
+ const storedApiKey = getApiKey('anthropic');
+ if (storedApiKey) {
+ authEnv.ANTHROPIC_API_KEY = storedApiKey;
+ logger.info('[Setup] Using stored API key for verification');
+ } else if (!authEnv.ANTHROPIC_API_KEY) {
+ res.json({
+ success: true,
+ authenticated: false,
+ error: 'No API key configured. Please enter an API key first.',
+ });
+ return;
+ }
+ }
+
+ // Store the secure environment in session manager
+ AuthSessionManager.createSession(sessionId, authMethod || 'api_key', apiKey, 'anthropic');
+
+ // Create temporary environment override for SDK call
+ const cleanupEnv = createTempEnvOverride(authEnv);
+
// Run a minimal query to verify authentication
const stream = query({
prompt: "Reply with only the word 'ok'",
@@ -270,13 +310,8 @@ export function createVerifyClaudeAuthHandler() {
}
} finally {
clearTimeout(timeoutId);
- // Restore original environment
- if (originalAnthropicKey !== undefined) {
- process.env.ANTHROPIC_API_KEY = originalAnthropicKey;
- } else if (authMethod === 'cli') {
- // If we cleared it and there was no original, keep it cleared
- delete process.env.ANTHROPIC_API_KEY;
- }
+ // Clean up the auth session
+ AuthSessionManager.destroySession(sessionId);
}
logger.info('[Setup] Verification result:', {
diff --git a/apps/server/src/routes/setup/routes/verify-codex-auth.ts b/apps/server/src/routes/setup/routes/verify-codex-auth.ts
new file mode 100644
index 00000000..00edd0f3
--- /dev/null
+++ b/apps/server/src/routes/setup/routes/verify-codex-auth.ts
@@ -0,0 +1,282 @@
+/**
+ * POST /verify-codex-auth endpoint - Verify Codex authentication
+ */
+
+import type { Request, Response } from 'express';
+import { createLogger } from '@automaker/utils';
+import { CODEX_MODEL_MAP } from '@automaker/types';
+import { ProviderFactory } from '../../../providers/provider-factory.js';
+import { getApiKey } from '../common.js';
+import { getCodexAuthIndicators } from '@automaker/platform';
+import {
+ createSecureAuthEnv,
+ AuthSessionManager,
+ AuthRateLimiter,
+ validateApiKey,
+ createTempEnvOverride,
+} from '../../../lib/auth-utils.js';
+
+const logger = createLogger('Setup');
+const rateLimiter = new AuthRateLimiter();
+const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
+const AUTH_PROMPT = "Reply with only the word 'ok'";
+const AUTH_TIMEOUT_MS = 30000;
+const ERROR_BILLING_MESSAGE =
+ 'Credit balance is too low. Please add credits to your OpenAI account.';
+const ERROR_RATE_LIMIT_MESSAGE =
+ 'Rate limit reached. Please wait a while before trying again or upgrade your plan.';
+const ERROR_CLI_AUTH_REQUIRED =
+ "CLI authentication failed. Please run 'codex login' to authenticate.";
+const ERROR_API_KEY_REQUIRED = 'No API key configured. Please enter an API key first.';
+const AUTH_ERROR_PATTERNS = [
+ 'authentication',
+ 'unauthorized',
+ 'invalid_api_key',
+ 'invalid api key',
+ 'api key is invalid',
+ 'not authenticated',
+ 'login',
+ 'auth(',
+ 'token refresh',
+ 'tokenrefresh',
+ 'failed to parse server response',
+ 'transport channel closed',
+];
+const BILLING_ERROR_PATTERNS = [
+ 'credit balance is too low',
+ 'credit balance too low',
+ 'insufficient credits',
+ 'insufficient balance',
+ 'no credits',
+ 'out of credits',
+ 'billing',
+ 'payment required',
+ 'add credits',
+];
+const RATE_LIMIT_PATTERNS = [
+ 'limit reached',
+ 'rate limit',
+ 'rate_limit',
+ 'too many requests',
+ 'resets',
+ '429',
+];
+
+function containsAuthError(text: string): boolean {
+ const lowerText = text.toLowerCase();
+ return AUTH_ERROR_PATTERNS.some((pattern) => lowerText.includes(pattern));
+}
+
+function isBillingError(text: string): boolean {
+ const lowerText = text.toLowerCase();
+ return BILLING_ERROR_PATTERNS.some((pattern) => lowerText.includes(pattern));
+}
+
+function isRateLimitError(text: string): boolean {
+ if (isBillingError(text)) {
+ return false;
+ }
+ const lowerText = text.toLowerCase();
+ return RATE_LIMIT_PATTERNS.some((pattern) => lowerText.includes(pattern));
+}
+
+export function createVerifyCodexAuthHandler() {
+ return async (req: Request, res: Response): Promise => {
+ const { authMethod, apiKey } = req.body as {
+ authMethod?: 'cli' | 'api_key';
+ apiKey?: string;
+ };
+
+ // Create session ID for cleanup
+ const sessionId = `codex-auth-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
+
+ // Rate limiting
+ const clientIp = req.ip || req.socket.remoteAddress || 'unknown';
+ if (!rateLimiter.canAttempt(clientIp)) {
+ const resetTime = rateLimiter.getResetTime(clientIp);
+ res.status(429).json({
+ success: false,
+ authenticated: false,
+ error: 'Too many authentication attempts. Please try again later.',
+ resetTime,
+ });
+ return;
+ }
+
+ const abortController = new AbortController();
+ const timeoutId = setTimeout(() => abortController.abort(), AUTH_TIMEOUT_MS);
+
+ try {
+ // Create secure environment without modifying process.env
+ const authEnv = createSecureAuthEnv(authMethod || 'api_key', apiKey, 'openai');
+
+ // For API key auth, validate and use the provided key or stored key
+ if (authMethod === 'api_key') {
+ if (apiKey) {
+ // Use the provided API key
+ const validation = validateApiKey(apiKey, 'openai');
+ if (!validation.isValid) {
+ res.json({ success: true, authenticated: false, error: validation.error });
+ return;
+ }
+ authEnv[OPENAI_API_KEY_ENV] = validation.normalizedKey;
+ } else {
+ // Try stored key
+ const storedApiKey = getApiKey('openai');
+ if (storedApiKey) {
+ const validation = validateApiKey(storedApiKey, 'openai');
+ if (!validation.isValid) {
+ res.json({ success: true, authenticated: false, error: validation.error });
+ return;
+ }
+ authEnv[OPENAI_API_KEY_ENV] = validation.normalizedKey;
+ } else if (!authEnv[OPENAI_API_KEY_ENV]) {
+ res.json({ success: true, authenticated: false, error: ERROR_API_KEY_REQUIRED });
+ return;
+ }
+ }
+ }
+
+ // Create session and temporary environment override
+ AuthSessionManager.createSession(sessionId, authMethod || 'api_key', undefined, 'openai');
+ const cleanupEnv = createTempEnvOverride(authEnv);
+
+ try {
+ if (authMethod === 'cli') {
+ const authIndicators = await getCodexAuthIndicators();
+ if (!authIndicators.hasOAuthToken && !authIndicators.hasApiKey) {
+ res.json({
+ success: true,
+ authenticated: false,
+ error: ERROR_CLI_AUTH_REQUIRED,
+ });
+ return;
+ }
+ }
+
+ // Use Codex provider explicitly (not ProviderFactory.getProviderForModel)
+ // because Cursor also supports GPT models and has higher priority
+ const provider = ProviderFactory.getProviderByName('codex');
+ if (!provider) {
+ throw new Error('Codex provider not available');
+ }
+ const stream = provider.executeQuery({
+ prompt: AUTH_PROMPT,
+ model: CODEX_MODEL_MAP.gpt52Codex,
+ cwd: process.cwd(),
+ maxTurns: 1,
+ allowedTools: [],
+ abortController,
+ });
+
+ let receivedAnyContent = false;
+ let errorMessage = '';
+
+ for await (const msg of stream) {
+ if (msg.type === 'error' && msg.error) {
+ if (isBillingError(msg.error)) {
+ errorMessage = ERROR_BILLING_MESSAGE;
+ } else if (isRateLimitError(msg.error)) {
+ errorMessage = ERROR_RATE_LIMIT_MESSAGE;
+ } else {
+ errorMessage = msg.error;
+ }
+ break;
+ }
+
+ if (msg.type === 'assistant' && msg.message?.content) {
+ for (const block of msg.message.content) {
+ if (block.type === 'text' && block.text) {
+ receivedAnyContent = true;
+ if (isBillingError(block.text)) {
+ errorMessage = ERROR_BILLING_MESSAGE;
+ break;
+ }
+ if (isRateLimitError(block.text)) {
+ errorMessage = ERROR_RATE_LIMIT_MESSAGE;
+ break;
+ }
+ if (containsAuthError(block.text)) {
+ errorMessage = block.text;
+ break;
+ }
+ }
+ }
+ }
+
+ if (msg.type === 'result' && msg.result) {
+ receivedAnyContent = true;
+ if (isBillingError(msg.result)) {
+ errorMessage = ERROR_BILLING_MESSAGE;
+ } else if (isRateLimitError(msg.result)) {
+ errorMessage = ERROR_RATE_LIMIT_MESSAGE;
+ } else if (containsAuthError(msg.result)) {
+ errorMessage = msg.result;
+ break;
+ }
+ }
+ }
+
+ if (errorMessage) {
+ // Rate limit and billing errors mean auth succeeded but usage is limited
+ const isUsageLimitError =
+ errorMessage === ERROR_BILLING_MESSAGE || errorMessage === ERROR_RATE_LIMIT_MESSAGE;
+
+ const response: {
+ success: boolean;
+ authenticated: boolean;
+ error: string;
+ details?: string;
+ } = {
+ success: true,
+ authenticated: isUsageLimitError ? true : false,
+ error: isUsageLimitError
+ ? errorMessage
+ : authMethod === 'cli'
+ ? ERROR_CLI_AUTH_REQUIRED
+ : 'API key is invalid or has been revoked.',
+ };
+
+ // Include detailed error for auth failures so users can debug
+ if (!isUsageLimitError && errorMessage !== response.error) {
+ response.details = errorMessage;
+ }
+
+ res.json(response);
+ return;
+ }
+
+ if (!receivedAnyContent) {
+ res.json({
+ success: true,
+ authenticated: false,
+ error: 'No response received from Codex. Please check your authentication.',
+ });
+ return;
+ }
+
+ res.json({ success: true, authenticated: true });
+ } finally {
+ // Clean up environment override
+ cleanupEnv();
+ }
+ } catch (error: unknown) {
+ const errMessage = error instanceof Error ? error.message : String(error);
+ logger.error('[Setup] Codex auth verification error:', errMessage);
+ const normalizedError = isBillingError(errMessage)
+ ? ERROR_BILLING_MESSAGE
+ : isRateLimitError(errMessage)
+ ? ERROR_RATE_LIMIT_MESSAGE
+ : errMessage;
+ res.json({
+ success: true,
+ authenticated: false,
+ error: normalizedError,
+ });
+ } finally {
+ clearTimeout(timeoutId);
+ // Clean up session
+ AuthSessionManager.destroySession(sessionId);
+ }
+ };
+}
diff --git a/apps/server/src/routes/suggestions/generate-suggestions.ts b/apps/server/src/routes/suggestions/generate-suggestions.ts
index 2af01a42..0766c41b 100644
--- a/apps/server/src/routes/suggestions/generate-suggestions.ts
+++ b/apps/server/src/routes/suggestions/generate-suggestions.ts
@@ -1,11 +1,16 @@
/**
* Business logic for generating suggestions
+ *
+ * Model is configurable via phaseModels.suggestionsModel in settings
+ * (AI Suggestions in the UI). Supports both Claude and Cursor models.
*/
-import { query } from '@anthropic-ai/claude-agent-sdk';
import type { EventEmitter } from '../../lib/events.js';
import { createLogger } from '@automaker/utils';
-import { createSuggestionsOptions } from '../../lib/sdk-options.js';
+import { DEFAULT_PHASE_MODELS, isCursorModel, type ThinkingLevel } from '@automaker/types';
+import { resolvePhaseModel } from '@automaker/model-resolver';
+import { extractJsonWithArray } from '../../lib/json-extractor.js';
+import { streamingQuery } from '../../providers/simple-query-service.js';
import { FeatureLoader } from '../../services/feature-loader.js';
import { getAppSpecPath } from '@automaker/platform';
import * as secureFs from '../../lib/secure-fs.js';
@@ -128,7 +133,9 @@ export async function generateSuggestions(
suggestionType: string,
events: EventEmitter,
abortController: AbortController,
- settingsService?: SettingsService
+ settingsService?: SettingsService,
+ modelOverride?: string,
+ thinkingLevelOverride?: ThinkingLevel
): Promise {
const typePrompts: Record = {
features: 'Analyze this project and suggest new features that would add value.',
@@ -164,61 +171,103 @@ The response will be automatically formatted as structured JSON.`;
'[Suggestions]'
);
- const options = createSuggestionsOptions({
+ // Get model from phase settings (AI Suggestions = suggestionsModel)
+ // Use override if provided, otherwise fall back to settings
+ const settings = await settingsService?.getGlobalSettings();
+ let model: string;
+ let thinkingLevel: ThinkingLevel | undefined;
+
+ if (modelOverride) {
+ // Use explicit override - resolve the model string
+ const resolved = resolvePhaseModel({
+ model: modelOverride,
+ thinkingLevel: thinkingLevelOverride,
+ });
+ model = resolved.model;
+ thinkingLevel = resolved.thinkingLevel;
+ } else {
+ // Use settings-based model
+ const phaseModelEntry =
+ settings?.phaseModels?.suggestionsModel || DEFAULT_PHASE_MODELS.suggestionsModel;
+ const resolved = resolvePhaseModel(phaseModelEntry);
+ model = resolved.model;
+ thinkingLevel = resolved.thinkingLevel;
+ }
+
+ logger.info('[Suggestions] Using model:', model);
+
+ let responseText = '';
+
+ // Determine if we should use structured output (Claude supports it, Cursor doesn't)
+ const useStructuredOutput = !isCursorModel(model);
+
+ // Build the final prompt - for Cursor, include JSON schema instructions
+ let finalPrompt = prompt;
+ if (!useStructuredOutput) {
+ finalPrompt = `${prompt}
+
+CRITICAL INSTRUCTIONS:
+1. DO NOT write any files. Return the JSON in your response only.
+2. After analyzing the project, respond with ONLY a JSON object - no explanations, no markdown, just raw JSON.
+3. The JSON must match this exact schema:
+
+${JSON.stringify(suggestionsSchema, null, 2)}
+
+Your entire response should be valid JSON starting with { and ending with }. No text before or after.`;
+ }
+
+ // Use streamingQuery with event callbacks
+ const result = await streamingQuery({
+ prompt: finalPrompt,
+ model,
cwd: projectPath,
+ maxTurns: 250,
+ allowedTools: ['Read', 'Glob', 'Grep'],
abortController,
- autoLoadClaudeMd,
- outputFormat: {
- type: 'json_schema',
- schema: suggestionsSchema,
+ thinkingLevel,
+ readOnly: true, // Suggestions only reads code, doesn't write
+ settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
+ outputFormat: useStructuredOutput
+ ? {
+ type: 'json_schema',
+ schema: suggestionsSchema,
+ }
+ : undefined,
+ onText: (text) => {
+ responseText += text;
+ events.emit('suggestions:event', {
+ type: 'suggestions_progress',
+ content: text,
+ });
+ },
+ onToolUse: (tool, input) => {
+ events.emit('suggestions:event', {
+ type: 'suggestions_tool',
+ tool,
+ input,
+ });
},
});
- const stream = query({ prompt, options });
- let responseText = '';
- let structuredOutput: { suggestions: Array> } | null = null;
-
- for await (const msg of stream) {
- if (msg.type === 'assistant' && msg.message.content) {
- for (const block of msg.message.content) {
- if (block.type === 'text') {
- responseText += block.text;
- events.emit('suggestions:event', {
- type: 'suggestions_progress',
- content: block.text,
- });
- } else if (block.type === 'tool_use') {
- events.emit('suggestions:event', {
- type: 'suggestions_tool',
- tool: block.name,
- input: block.input,
- });
- }
- }
- } else if (msg.type === 'result' && msg.subtype === 'success') {
- // Check for structured output
- const resultMsg = msg as any;
- if (resultMsg.structured_output) {
- structuredOutput = resultMsg.structured_output as {
- suggestions: Array>;
- };
- logger.debug('Received structured output:', structuredOutput);
- }
- } else if (msg.type === 'result') {
- const resultMsg = msg as any;
- if (resultMsg.subtype === 'error_max_structured_output_retries') {
- logger.error('Failed to produce valid structured output after retries');
- throw new Error('Could not produce valid suggestions output');
- } else if (resultMsg.subtype === 'error_max_turns') {
- logger.error('Hit max turns limit before completing suggestions generation');
- logger.warn(`Response text length: ${responseText.length} chars`);
- // Still try to parse what we have
- }
- }
- }
-
// Use structured output if available, otherwise fall back to parsing text
try {
+ let structuredOutput: { suggestions: Array> } | null = null;
+
+ if (result.structured_output) {
+ structuredOutput = result.structured_output as {
+ suggestions: Array>;
+ };
+ logger.debug('Received structured output:', structuredOutput);
+ } else if (responseText) {
+ // Fallback: try to parse from text using shared extraction utility
+ logger.warn('No structured output received, attempting to parse from text');
+ structuredOutput = extractJsonWithArray<{ suggestions: Array> }>(
+ responseText,
+ 'suggestions',
+ { logger }
+ );
+ }
+
if (structuredOutput && structuredOutput.suggestions) {
// Use structured output directly
events.emit('suggestions:event', {
@@ -229,21 +278,7 @@ The response will be automatically formatted as structured JSON.`;
})),
});
} else {
- // Fallback: try to parse from text (for backwards compatibility)
- logger.warn('No structured output received, attempting to parse from text');
- const jsonMatch = responseText.match(/\{[\s\S]*"suggestions"[\s\S]*\}/);
- if (jsonMatch) {
- const parsed = JSON.parse(jsonMatch[0]);
- events.emit('suggestions:event', {
- type: 'suggestions_complete',
- suggestions: parsed.suggestions.map((s: Record, i: number) => ({
- ...s,
- id: s.id || `suggestion-${Date.now()}-${i}`,
- })),
- });
- } else {
- throw new Error('No valid JSON found in response');
- }
+ throw new Error('No valid JSON found in response');
}
} catch (error) {
// Log the parsing error for debugging
diff --git a/apps/server/src/routes/suggestions/routes/generate.ts b/apps/server/src/routes/suggestions/routes/generate.ts
index da57ed76..6ce2427b 100644
--- a/apps/server/src/routes/suggestions/routes/generate.ts
+++ b/apps/server/src/routes/suggestions/routes/generate.ts
@@ -5,6 +5,7 @@
import type { Request, Response } from 'express';
import type { EventEmitter } from '../../../lib/events.js';
import { createLogger } from '@automaker/utils';
+import type { ThinkingLevel } from '@automaker/types';
import { getSuggestionsStatus, setRunningState, getErrorMessage, logError } from '../common.js';
import { generateSuggestions } from '../generate-suggestions.js';
import type { SettingsService } from '../../../services/settings-service.js';
@@ -14,9 +15,16 @@ const logger = createLogger('Suggestions');
export function createGenerateHandler(events: EventEmitter, settingsService?: SettingsService) {
return async (req: Request, res: Response): Promise => {
try {
- const { projectPath, suggestionType = 'features' } = req.body as {
+ const {
+ projectPath,
+ suggestionType = 'features',
+ model,
+ thinkingLevel,
+ } = req.body as {
projectPath: string;
suggestionType?: string;
+ model?: string;
+ thinkingLevel?: ThinkingLevel;
};
if (!projectPath) {
@@ -38,7 +46,15 @@ export function createGenerateHandler(events: EventEmitter, settingsService?: Se
setRunningState(true, abortController);
// Start generation in background
- generateSuggestions(projectPath, suggestionType, events, abortController, settingsService)
+ generateSuggestions(
+ projectPath,
+ suggestionType,
+ events,
+ abortController,
+ settingsService,
+ model,
+ thinkingLevel
+ )
.catch((error) => {
logError(error, 'Generate suggestions failed (background)');
events.emit('suggestions:event', {
diff --git a/apps/server/src/routes/worktree/common.ts b/apps/server/src/routes/worktree/common.ts
index 4f63a382..75c3a437 100644
--- a/apps/server/src/routes/worktree/common.ts
+++ b/apps/server/src/routes/worktree/common.ts
@@ -3,15 +3,51 @@
*/
import { createLogger } from '@automaker/utils';
+import { spawnProcess } from '@automaker/platform';
import { exec } from 'child_process';
import { promisify } from 'util';
-import path from 'path';
import { getErrorMessage as getErrorMessageShared, createLogError } from '../common.js';
-import { FeatureLoader } from '../../services/feature-loader.js';
const logger = createLogger('Worktree');
export const execAsync = promisify(exec);
-const featureLoader = new FeatureLoader();
+
+// ============================================================================
+// Secure Command Execution
+// ============================================================================
+
+/**
+ * Execute git command with array arguments to prevent command injection.
+ * Uses spawnProcess from @automaker/platform for secure, cross-platform execution.
+ *
+ * @param args - Array of git command arguments (e.g., ['worktree', 'add', path])
+ * @param cwd - Working directory to execute the command in
+ * @returns Promise resolving to stdout output
+ * @throws Error with stderr message if command fails
+ *
+ * @example
+ * ```typescript
+ * // Safe: no injection possible
+ * await execGitCommand(['branch', '-D', branchName], projectPath);
+ *
+ * // Instead of unsafe:
+ * // await execAsync(`git branch -D ${branchName}`, { cwd });
+ * ```
+ */
+export async function execGitCommand(args: string[], cwd: string): Promise {
+ const result = await spawnProcess({
+ command: 'git',
+ args,
+ cwd,
+ });
+
+ // spawnProcess returns { stdout, stderr, exitCode }
+ if (result.exitCode === 0) {
+ return result.stdout;
+ } else {
+ const errorMessage = result.stderr || `Git command failed with code ${result.exitCode}`;
+ throw new Error(errorMessage);
+ }
+}
// ============================================================================
// Constants
@@ -99,18 +135,6 @@ export function normalizePath(p: string): string {
return p.replace(/\\/g, '/');
}
-/**
- * Check if a path is a git repo
- */
-export async function isGitRepo(repoPath: string): Promise {
- try {
- await execAsync('git rev-parse --is-inside-work-tree', { cwd: repoPath });
- return true;
- } catch {
- return false;
- }
-}
-
/**
* Check if a git repository has at least one commit (i.e., HEAD exists)
* Returns false for freshly initialized repos with no commits
diff --git a/apps/server/src/routes/worktree/index.ts b/apps/server/src/routes/worktree/index.ts
index 7fef5c6e..a00e0bfe 100644
--- a/apps/server/src/routes/worktree/index.ts
+++ b/apps/server/src/routes/worktree/index.ts
@@ -3,6 +3,7 @@
*/
import { Router } from 'express';
+import type { EventEmitter } from '../../lib/events.js';
import { validatePathParams } from '../../middleware/validate-paths.js';
import { requireValidWorktree, requireValidProject, requireGitRepoOnly } from './middleware.js';
import { createInfoHandler } from './routes/info.js';
@@ -24,14 +25,22 @@ import { createSwitchBranchHandler } from './routes/switch-branch.js';
import {
createOpenInEditorHandler,
createGetDefaultEditorHandler,
+ createGetAvailableEditorsHandler,
+ createRefreshEditorsHandler,
} from './routes/open-in-editor.js';
import { createInitGitHandler } from './routes/init-git.js';
import { createMigrateHandler } from './routes/migrate.js';
import { createStartDevHandler } from './routes/start-dev.js';
import { createStopDevHandler } from './routes/stop-dev.js';
import { createListDevServersHandler } from './routes/list-dev-servers.js';
+import {
+ createGetInitScriptHandler,
+ createPutInitScriptHandler,
+ createDeleteInitScriptHandler,
+ createRunInitScriptHandler,
+} from './routes/init-script.js';
-export function createWorktreeRoutes(): Router {
+export function createWorktreeRoutes(events: EventEmitter): Router {
const router = Router();
router.post('/info', validatePathParams('projectPath'), createInfoHandler());
@@ -45,7 +54,7 @@ export function createWorktreeRoutes(): Router {
requireValidProject,
createMergeHandler()
);
- router.post('/create', validatePathParams('projectPath'), createCreateHandler());
+ router.post('/create', validatePathParams('projectPath'), createCreateHandler(events));
router.post('/delete', validatePathParams('projectPath', 'worktreePath'), createDeleteHandler());
router.post('/create-pr', createCreatePRHandler());
router.post('/pr-info', createPRInfoHandler());
@@ -77,6 +86,8 @@ export function createWorktreeRoutes(): Router {
router.post('/switch-branch', requireValidWorktree, createSwitchBranchHandler());
router.post('/open-in-editor', validatePathParams('worktreePath'), createOpenInEditorHandler());
router.get('/default-editor', createGetDefaultEditorHandler());
+ router.get('/available-editors', createGetAvailableEditorsHandler());
+ router.post('/refresh-editors', createRefreshEditorsHandler());
router.post('/init-git', validatePathParams('projectPath'), createInitGitHandler());
router.post('/migrate', createMigrateHandler());
router.post(
@@ -87,5 +98,15 @@ export function createWorktreeRoutes(): Router {
router.post('/stop-dev', createStopDevHandler());
router.post('/list-dev-servers', createListDevServersHandler());
+ // Init script routes
+ router.get('/init-script', createGetInitScriptHandler());
+ router.put('/init-script', validatePathParams('projectPath'), createPutInitScriptHandler());
+ router.delete('/init-script', validatePathParams('projectPath'), createDeleteInitScriptHandler());
+ router.post(
+ '/run-init-script',
+ validatePathParams('projectPath', 'worktreePath'),
+ createRunInitScriptHandler(events)
+ );
+
return router;
}
diff --git a/apps/server/src/routes/worktree/middleware.ts b/apps/server/src/routes/worktree/middleware.ts
index d933fff4..eb83377f 100644
--- a/apps/server/src/routes/worktree/middleware.ts
+++ b/apps/server/src/routes/worktree/middleware.ts
@@ -3,7 +3,8 @@
*/
import type { Request, Response, NextFunction } from 'express';
-import { isGitRepo, hasCommits } from './common.js';
+import { isGitRepo } from '@automaker/git-utils';
+import { hasCommits } from './common.js';
interface ValidationOptions {
/** Check if the path is a git repository (default: true) */
diff --git a/apps/server/src/routes/worktree/routes/branch-tracking.ts b/apps/server/src/routes/worktree/routes/branch-tracking.ts
index 478ebc06..1c9f069a 100644
--- a/apps/server/src/routes/worktree/routes/branch-tracking.ts
+++ b/apps/server/src/routes/worktree/routes/branch-tracking.ts
@@ -8,6 +8,9 @@
import * as secureFs from '../../../lib/secure-fs.js';
import path from 'path';
import { getBranchTrackingPath, ensureAutomakerDir } from '@automaker/platform';
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('BranchTracking');
export interface TrackedBranch {
name: string;
@@ -32,7 +35,7 @@ export async function getTrackedBranches(projectPath: string): Promise => {
@@ -56,15 +59,15 @@ export function createCreatePRHandler() {
}
// Check for uncommitted changes
- console.log(`[CreatePR] Checking for uncommitted changes in: ${worktreePath}`);
+ logger.debug(`Checking for uncommitted changes in: ${worktreePath}`);
const { stdout: status } = await execAsync('git status --porcelain', {
cwd: worktreePath,
env: execEnv,
});
const hasChanges = status.trim().length > 0;
- console.log(`[CreatePR] Has uncommitted changes: ${hasChanges}`);
+ logger.debug(`Has uncommitted changes: ${hasChanges}`);
if (hasChanges) {
- console.log(`[CreatePR] Changed files:\n${status}`);
+ logger.debug(`Changed files:\n${status}`);
}
// If there are changes, commit them
@@ -72,15 +75,15 @@ export function createCreatePRHandler() {
let commitError: string | null = null;
if (hasChanges) {
const message = commitMessage || `Changes from ${branchName}`;
- console.log(`[CreatePR] Committing changes with message: ${message}`);
+ logger.debug(`Committing changes with message: ${message}`);
try {
// Stage all changes
- console.log(`[CreatePR] Running: git add -A`);
+ logger.debug(`Running: git add -A`);
await execAsync('git add -A', { cwd: worktreePath, env: execEnv });
// Create commit
- console.log(`[CreatePR] Running: git commit`);
+ logger.debug(`Running: git commit`);
await execAsync(`git commit -m "${message.replace(/"/g, '\\"')}"`, {
cwd: worktreePath,
env: execEnv,
@@ -92,11 +95,11 @@ export function createCreatePRHandler() {
env: execEnv,
});
commitHash = hashOutput.trim().substring(0, 8);
- console.log(`[CreatePR] Commit successful: ${commitHash}`);
+ logger.info(`Commit successful: ${commitHash}`);
} catch (commitErr: unknown) {
const err = commitErr as { stderr?: string; message?: string };
commitError = err.stderr || err.message || 'Commit failed';
- console.error(`[CreatePR] Commit failed: ${commitError}`);
+ logger.error(`Commit failed: ${commitError}`);
// Return error immediately - don't proceed with push/PR if commit fails
res.status(500).json({
@@ -126,7 +129,7 @@ export function createCreatePRHandler() {
// Capture push error for reporting
const err = error2 as { stderr?: string; message?: string };
pushError = err.stderr || err.message || 'Push failed';
- console.error('[CreatePR] Push failed:', pushError);
+ logger.error('Push failed:', pushError);
}
}
@@ -246,26 +249,22 @@ export function createCreatePRHandler() {
const headRef = upstreamRepo && originOwner ? `${originOwner}:${branchName}` : branchName;
const repoArg = upstreamRepo ? ` --repo "${upstreamRepo}"` : '';
- console.log(
- `[CreatePR] Checking for existing PR for branch: ${branchName} (headRef: ${headRef})`
- );
+ logger.debug(`Checking for existing PR for branch: ${branchName} (headRef: ${headRef})`);
try {
const listCmd = `gh pr list${repoArg} --head "${headRef}" --json number,title,url,state --limit 1`;
- console.log(`[CreatePR] Running: ${listCmd}`);
+ logger.debug(`Running: ${listCmd}`);
const { stdout: existingPrOutput } = await execAsync(listCmd, {
cwd: worktreePath,
env: execEnv,
});
- console.log(`[CreatePR] gh pr list output: ${existingPrOutput}`);
+ logger.debug(`gh pr list output: ${existingPrOutput}`);
const existingPrs = JSON.parse(existingPrOutput);
if (Array.isArray(existingPrs) && existingPrs.length > 0) {
const existingPr = existingPrs[0];
// PR already exists - use it and store metadata
- console.log(
- `[CreatePR] PR already exists for branch ${branchName}: PR #${existingPr.number}`
- );
+ logger.info(`PR already exists for branch ${branchName}: PR #${existingPr.number}`);
prUrl = existingPr.url;
prNumber = existingPr.number;
prAlreadyExisted = true;
@@ -278,15 +277,15 @@ export function createCreatePRHandler() {
state: existingPr.state || 'open',
createdAt: new Date().toISOString(),
});
- console.log(
- `[CreatePR] Stored existing PR info for branch ${branchName}: PR #${existingPr.number}`
+ logger.debug(
+ `Stored existing PR info for branch ${branchName}: PR #${existingPr.number}`
);
} else {
- console.log(`[CreatePR] No existing PR found for branch ${branchName}`);
+ logger.debug(`No existing PR found for branch ${branchName}`);
}
} catch (listError) {
// gh pr list failed - log but continue to try creating
- console.log(`[CreatePR] gh pr list failed (this is ok, will try to create):`, listError);
+ logger.debug(`gh pr list failed (this is ok, will try to create):`, listError);
}
// Only create a new PR if one doesn't already exist
@@ -307,13 +306,13 @@ export function createCreatePRHandler() {
prCmd += ` --title "${title.replace(/"/g, '\\"')}" --body "${body.replace(/"/g, '\\"')}" ${draftFlag}`;
prCmd = prCmd.trim();
- console.log(`[CreatePR] Creating PR with command: ${prCmd}`);
+ logger.debug(`Creating PR with command: ${prCmd}`);
const { stdout: prOutput } = await execAsync(prCmd, {
cwd: worktreePath,
env: execEnv,
});
prUrl = prOutput.trim();
- console.log(`[CreatePR] PR created: ${prUrl}`);
+ logger.info(`PR created: ${prUrl}`);
// Extract PR number and store metadata for newly created PR
if (prUrl) {
@@ -329,11 +328,9 @@ export function createCreatePRHandler() {
state: draft ? 'draft' : 'open',
createdAt: new Date().toISOString(),
});
- console.log(
- `[CreatePR] Stored PR info for branch ${branchName}: PR #${prNumber}`
- );
+ logger.debug(`Stored PR info for branch ${branchName}: PR #${prNumber}`);
} catch (metadataError) {
- console.error('[CreatePR] Failed to store PR metadata:', metadataError);
+ logger.error('Failed to store PR metadata:', metadataError);
}
}
}
@@ -341,11 +338,11 @@ export function createCreatePRHandler() {
// gh CLI failed - check if it's "already exists" error and try to fetch the PR
const err = ghError as { stderr?: string; message?: string };
const errorMessage = err.stderr || err.message || 'PR creation failed';
- console.log(`[CreatePR] gh pr create failed: ${errorMessage}`);
+ logger.debug(`gh pr create failed: ${errorMessage}`);
// If error indicates PR already exists, try to fetch it
if (errorMessage.toLowerCase().includes('already exists')) {
- console.log(`[CreatePR] PR already exists error - trying to fetch existing PR`);
+ logger.debug(`PR already exists error - trying to fetch existing PR`);
try {
const { stdout: viewOutput } = await execAsync(
`gh pr view --json number,title,url,state`,
@@ -364,10 +361,10 @@ export function createCreatePRHandler() {
state: existingPr.state || 'open',
createdAt: new Date().toISOString(),
});
- console.log(`[CreatePR] Fetched and stored existing PR: #${existingPr.number}`);
+ logger.debug(`Fetched and stored existing PR: #${existingPr.number}`);
}
} catch (viewError) {
- console.error('[CreatePR] Failed to fetch existing PR:', viewError);
+ logger.error('Failed to fetch existing PR:', viewError);
prError = errorMessage;
}
} else {
diff --git a/apps/server/src/routes/worktree/routes/create.ts b/apps/server/src/routes/worktree/routes/create.ts
index 4eb2b2c9..061fa801 100644
--- a/apps/server/src/routes/worktree/routes/create.ts
+++ b/apps/server/src/routes/worktree/routes/create.ts
@@ -12,14 +12,21 @@ import { exec } from 'child_process';
import { promisify } from 'util';
import path from 'path';
import * as secureFs from '../../../lib/secure-fs.js';
+import type { EventEmitter } from '../../../lib/events.js';
+import { isGitRepo } from '@automaker/git-utils';
import {
- isGitRepo,
getErrorMessage,
logError,
normalizePath,
ensureInitialCommit,
+ isValidBranchName,
+ execGitCommand,
} from '../common.js';
import { trackBranch } from './branch-tracking.js';
+import { createLogger } from '@automaker/utils';
+import { runInitScript } from '../../../services/init-script-service.js';
+
+const logger = createLogger('Worktree');
const execAsync = promisify(exec);
@@ -74,7 +81,7 @@ async function findExistingWorktreeForBranch(
}
}
-export function createCreateHandler() {
+export function createCreateHandler(events: EventEmitter) {
return async (req: Request, res: Response): Promise => {
try {
const { projectPath, branchName, baseBranch } = req.body as {
@@ -91,6 +98,26 @@ export function createCreateHandler() {
return;
}
+ // Validate branch name to prevent command injection
+ if (!isValidBranchName(branchName)) {
+ res.status(400).json({
+ success: false,
+ error:
+ 'Invalid branch name. Branch names must contain only letters, numbers, dots, hyphens, underscores, and forward slashes.',
+ });
+ return;
+ }
+
+ // Validate base branch if provided
+ if (baseBranch && !isValidBranchName(baseBranch) && baseBranch !== 'HEAD') {
+ res.status(400).json({
+ success: false,
+ error:
+ 'Invalid base branch name. Branch names must contain only letters, numbers, dots, hyphens, underscores, and forward slashes.',
+ });
+ return;
+ }
+
if (!(await isGitRepo(projectPath))) {
res.status(400).json({
success: false,
@@ -114,8 +141,8 @@ export function createCreateHandler() {
if (existingWorktree) {
// Worktree already exists, return it as success (not an error)
// This handles manually created worktrees or worktrees from previous runs
- console.log(
- `[Worktree] Found existing worktree for branch "${branchName}" at: ${existingWorktree.path}`
+ logger.info(
+ `Found existing worktree for branch "${branchName}" at: ${existingWorktree.path}`
);
// Track the branch so it persists in the UI
@@ -140,30 +167,28 @@ export function createCreateHandler() {
// Create worktrees directory if it doesn't exist
await secureFs.mkdir(worktreesDir, { recursive: true });
- // Check if branch exists
+ // Check if branch exists (using array arguments to prevent injection)
let branchExists = false;
try {
- await execAsync(`git rev-parse --verify ${branchName}`, {
- cwd: projectPath,
- });
+ await execGitCommand(['rev-parse', '--verify', branchName], projectPath);
branchExists = true;
} catch {
// Branch doesn't exist
}
- // Create worktree
- let createCmd: string;
+ // Create worktree (using array arguments to prevent injection)
if (branchExists) {
// Use existing branch
- createCmd = `git worktree add "${worktreePath}" ${branchName}`;
+ await execGitCommand(['worktree', 'add', worktreePath, branchName], projectPath);
} else {
// Create new branch from base or HEAD
const base = baseBranch || 'HEAD';
- createCmd = `git worktree add -b ${branchName} "${worktreePath}" ${base}`;
+ await execGitCommand(
+ ['worktree', 'add', '-b', branchName, worktreePath, base],
+ projectPath
+ );
}
- await execAsync(createCmd, { cwd: projectPath });
-
// Note: We intentionally do NOT symlink .automaker to worktrees
// Features and config are always accessed from the main project path
// This avoids symlink loop issues when activating worktrees
@@ -174,6 +199,8 @@ export function createCreateHandler() {
// Resolve to absolute path for cross-platform compatibility
// normalizePath converts to forward slashes for API consistency
const absoluteWorktreePath = path.resolve(worktreePath);
+
+ // Respond immediately (non-blocking)
res.json({
success: true,
worktree: {
@@ -182,6 +209,17 @@ export function createCreateHandler() {
isNew: !branchExists,
},
});
+
+ // Trigger init script asynchronously after response
+ // runInitScript internally checks if script exists and hasn't already run
+ runInitScript({
+ projectPath,
+ worktreePath: absoluteWorktreePath,
+ branch: branchName,
+ emitter: events,
+ }).catch((err) => {
+ logger.error(`Init script failed for ${branchName}:`, err);
+ });
} catch (error) {
logError(error, 'Create worktree failed');
res.status(500).json({ success: false, error: getErrorMessage(error) });
diff --git a/apps/server/src/routes/worktree/routes/delete.ts b/apps/server/src/routes/worktree/routes/delete.ts
index 93857f78..6814add9 100644
--- a/apps/server/src/routes/worktree/routes/delete.ts
+++ b/apps/server/src/routes/worktree/routes/delete.ts
@@ -6,9 +6,11 @@ import type { Request, Response } from 'express';
import { exec } from 'child_process';
import { promisify } from 'util';
import { isGitRepo } from '@automaker/git-utils';
-import { getErrorMessage, logError } from '../common.js';
+import { getErrorMessage, logError, isValidBranchName, execGitCommand } from '../common.js';
+import { createLogger } from '@automaker/utils';
const execAsync = promisify(exec);
+const logger = createLogger('Worktree');
export function createDeleteHandler() {
return async (req: Request, res: Response): Promise => {
@@ -46,22 +48,28 @@ export function createDeleteHandler() {
// Could not get branch name
}
- // Remove the worktree
+ // Remove the worktree (using array arguments to prevent injection)
try {
- await execAsync(`git worktree remove "${worktreePath}" --force`, {
- cwd: projectPath,
- });
+ await execGitCommand(['worktree', 'remove', worktreePath, '--force'], projectPath);
} catch (error) {
// Try with prune if remove fails
- await execAsync('git worktree prune', { cwd: projectPath });
+ await execGitCommand(['worktree', 'prune'], projectPath);
}
// Optionally delete the branch
+ let branchDeleted = false;
if (deleteBranch && branchName && branchName !== 'main' && branchName !== 'master') {
- try {
- await execAsync(`git branch -D ${branchName}`, { cwd: projectPath });
- } catch {
- // Branch deletion failed, not critical
+ // Validate branch name to prevent command injection
+ if (!isValidBranchName(branchName)) {
+ logger.warn(`Invalid branch name detected, skipping deletion: ${branchName}`);
+ } else {
+ try {
+ await execGitCommand(['branch', '-D', branchName], projectPath);
+ branchDeleted = true;
+ } catch {
+ // Branch deletion failed, not critical
+ logger.warn(`Failed to delete branch: ${branchName}`);
+ }
}
}
@@ -69,7 +77,8 @@ export function createDeleteHandler() {
success: true,
deleted: {
worktreePath,
- branch: deleteBranch ? branchName : null,
+ branch: branchDeleted ? branchName : null,
+ branchDeleted,
},
});
} catch (error) {
diff --git a/apps/server/src/routes/worktree/routes/diffs.ts b/apps/server/src/routes/worktree/routes/diffs.ts
index 801dd514..75f43d7f 100644
--- a/apps/server/src/routes/worktree/routes/diffs.ts
+++ b/apps/server/src/routes/worktree/routes/diffs.ts
@@ -11,9 +11,10 @@ import { getGitRepositoryDiffs } from '../../common.js';
export function createDiffsHandler() {
return async (req: Request, res: Response): Promise => {
try {
- const { projectPath, featureId } = req.body as {
+ const { projectPath, featureId, useWorktrees } = req.body as {
projectPath: string;
featureId: string;
+ useWorktrees?: boolean;
};
if (!projectPath || !featureId) {
@@ -24,6 +25,19 @@ export function createDiffsHandler() {
return;
}
+ // If worktrees aren't enabled, don't probe .worktrees at all.
+ // This avoids noisy logs that make it look like features are "running in worktrees".
+ if (useWorktrees === false) {
+ const result = await getGitRepositoryDiffs(projectPath);
+ res.json({
+ success: true,
+ diff: result.diff,
+ files: result.files,
+ hasChanges: result.hasChanges,
+ });
+ return;
+ }
+
// Git worktrees are stored in project directory
const worktreePath = path.join(projectPath, '.worktrees', featureId);
@@ -41,7 +55,11 @@ export function createDiffsHandler() {
});
} catch (innerError) {
// Worktree doesn't exist - fallback to main project path
- logError(innerError, 'Worktree access failed, falling back to main project');
+ const code = (innerError as NodeJS.ErrnoException | undefined)?.code;
+ // ENOENT is expected when a feature has no worktree; don't log as an error.
+ if (code && code !== 'ENOENT') {
+ logError(innerError, 'Worktree access failed, falling back to main project');
+ }
try {
const result = await getGitRepositoryDiffs(projectPath);
diff --git a/apps/server/src/routes/worktree/routes/file-diff.ts b/apps/server/src/routes/worktree/routes/file-diff.ts
index 82ed79bd..4d29eb26 100644
--- a/apps/server/src/routes/worktree/routes/file-diff.ts
+++ b/apps/server/src/routes/worktree/routes/file-diff.ts
@@ -15,10 +15,11 @@ const execAsync = promisify(exec);
export function createFileDiffHandler() {
return async (req: Request, res: Response): Promise => {
try {
- const { projectPath, featureId, filePath } = req.body as {
+ const { projectPath, featureId, filePath, useWorktrees } = req.body as {
projectPath: string;
featureId: string;
filePath: string;
+ useWorktrees?: boolean;
};
if (!projectPath || !featureId || !filePath) {
@@ -29,6 +30,12 @@ export function createFileDiffHandler() {
return;
}
+ // If worktrees aren't enabled, don't probe .worktrees at all.
+ if (useWorktrees === false) {
+ res.json({ success: true, diff: '', filePath });
+ return;
+ }
+
// Git worktrees are stored in project directory
const worktreePath = path.join(projectPath, '.worktrees', featureId);
@@ -57,7 +64,11 @@ export function createFileDiffHandler() {
res.json({ success: true, diff, filePath });
} catch (innerError) {
- logError(innerError, 'Worktree file diff failed');
+ const code = (innerError as NodeJS.ErrnoException | undefined)?.code;
+ // ENOENT is expected when a feature has no worktree; don't log as an error.
+ if (code && code !== 'ENOENT') {
+ logError(innerError, 'Worktree file diff failed');
+ }
res.json({ success: true, diff: '', filePath });
}
} catch (error) {
diff --git a/apps/server/src/routes/worktree/routes/init-script.ts b/apps/server/src/routes/worktree/routes/init-script.ts
new file mode 100644
index 00000000..e11dfd53
--- /dev/null
+++ b/apps/server/src/routes/worktree/routes/init-script.ts
@@ -0,0 +1,280 @@
+/**
+ * Init Script routes - Read/write/run the worktree-init.sh file
+ *
+ * POST /init-script - Read the init script content
+ * PUT /init-script - Write content to the init script file
+ * DELETE /init-script - Delete the init script file
+ * POST /run-init-script - Run the init script for a worktree
+ */
+
+import type { Request, Response } from 'express';
+import path from 'path';
+import * as secureFs from '../../../lib/secure-fs.js';
+import { getErrorMessage, logError, isValidBranchName } from '../common.js';
+import { createLogger } from '@automaker/utils';
+import type { EventEmitter } from '../../../lib/events.js';
+import { forceRunInitScript } from '../../../services/init-script-service.js';
+
+const logger = createLogger('InitScript');
+
+/** Fixed path for init script within .automaker directory */
+const INIT_SCRIPT_FILENAME = 'worktree-init.sh';
+
+/** Maximum allowed size for init scripts (1MB) */
+const MAX_SCRIPT_SIZE_BYTES = 1024 * 1024;
+
+/**
+ * Get the full path to the init script for a project
+ */
+function getInitScriptPath(projectPath: string): string {
+ return path.join(projectPath, '.automaker', INIT_SCRIPT_FILENAME);
+}
+
+/**
+ * GET /init-script - Read the init script content
+ */
+export function createGetInitScriptHandler() {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const rawProjectPath = req.query.projectPath;
+
+ // Validate projectPath is a non-empty string (not an array or undefined)
+ if (!rawProjectPath || typeof rawProjectPath !== 'string') {
+ res.status(400).json({
+ success: false,
+ error: 'projectPath query parameter is required',
+ });
+ return;
+ }
+
+ const projectPath = rawProjectPath.trim();
+ if (!projectPath) {
+ res.status(400).json({
+ success: false,
+ error: 'projectPath cannot be empty',
+ });
+ return;
+ }
+
+ const scriptPath = getInitScriptPath(projectPath);
+
+ try {
+ const content = await secureFs.readFile(scriptPath, 'utf-8');
+ res.json({
+ success: true,
+ exists: true,
+ content: content as string,
+ path: scriptPath,
+ });
+ } catch {
+ // File doesn't exist
+ res.json({
+ success: true,
+ exists: false,
+ content: '',
+ path: scriptPath,
+ });
+ }
+ } catch (error) {
+ logError(error, 'Read init script failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
+
+/**
+ * PUT /init-script - Write content to the init script file
+ */
+export function createPutInitScriptHandler() {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath, content } = req.body as {
+ projectPath: string;
+ content: string;
+ };
+
+ if (!projectPath) {
+ res.status(400).json({
+ success: false,
+ error: 'projectPath is required',
+ });
+ return;
+ }
+
+ if (typeof content !== 'string') {
+ res.status(400).json({
+ success: false,
+ error: 'content must be a string',
+ });
+ return;
+ }
+
+ // Validate script size to prevent disk exhaustion
+ const sizeBytes = Buffer.byteLength(content, 'utf-8');
+ if (sizeBytes > MAX_SCRIPT_SIZE_BYTES) {
+ res.status(400).json({
+ success: false,
+ error: `Script size (${Math.round(sizeBytes / 1024)}KB) exceeds maximum allowed size (${Math.round(MAX_SCRIPT_SIZE_BYTES / 1024)}KB)`,
+ });
+ return;
+ }
+
+ // Log warning if potentially dangerous patterns are detected (non-blocking)
+ const dangerousPatterns = [
+ /rm\s+-rf\s+\/(?!\s*\$)/i, // rm -rf / (not followed by variable)
+ /curl\s+.*\|\s*(?:bash|sh)/i, // curl | bash
+ /wget\s+.*\|\s*(?:bash|sh)/i, // wget | sh
+ ];
+
+ for (const pattern of dangerousPatterns) {
+ if (pattern.test(content)) {
+ logger.warn(
+ `Init script contains potentially dangerous pattern: ${pattern.source}. User responsibility to verify script safety.`
+ );
+ }
+ }
+
+ const scriptPath = getInitScriptPath(projectPath);
+ const automakerDir = path.dirname(scriptPath);
+
+ // Ensure .automaker directory exists
+ await secureFs.mkdir(automakerDir, { recursive: true });
+
+ // Write the script content
+ await secureFs.writeFile(scriptPath, content, 'utf-8');
+
+ logger.info(`Wrote init script to ${scriptPath}`);
+
+ res.json({
+ success: true,
+ path: scriptPath,
+ });
+ } catch (error) {
+ logError(error, 'Write init script failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
+
+/**
+ * DELETE /init-script - Delete the init script file
+ */
+export function createDeleteInitScriptHandler() {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath } = req.body as { projectPath: string };
+
+ if (!projectPath) {
+ res.status(400).json({
+ success: false,
+ error: 'projectPath is required',
+ });
+ return;
+ }
+
+ const scriptPath = getInitScriptPath(projectPath);
+
+ await secureFs.rm(scriptPath, { force: true });
+ logger.info(`Deleted init script at ${scriptPath}`);
+ res.json({
+ success: true,
+ });
+ } catch (error) {
+ logError(error, 'Delete init script failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
+
+/**
+ * POST /run-init-script - Run (or re-run) the init script for a worktree
+ */
+export function createRunInitScriptHandler(events: EventEmitter) {
+ return async (req: Request, res: Response): Promise => {
+ try {
+ const { projectPath, worktreePath, branch } = req.body as {
+ projectPath: string;
+ worktreePath: string;
+ branch: string;
+ };
+
+ if (!projectPath) {
+ res.status(400).json({
+ success: false,
+ error: 'projectPath is required',
+ });
+ return;
+ }
+
+ if (!worktreePath) {
+ res.status(400).json({
+ success: false,
+ error: 'worktreePath is required',
+ });
+ return;
+ }
+
+ if (!branch) {
+ res.status(400).json({
+ success: false,
+ error: 'branch is required',
+ });
+ return;
+ }
+
+ // Validate branch name to prevent injection via environment variables
+ if (!isValidBranchName(branch)) {
+ res.status(400).json({
+ success: false,
+ error:
+ 'Invalid branch name. Branch names must contain only letters, numbers, dots, hyphens, underscores, and forward slashes.',
+ });
+ return;
+ }
+
+ const scriptPath = getInitScriptPath(projectPath);
+
+ // Check if script exists
+ try {
+ await secureFs.access(scriptPath);
+ } catch {
+ res.status(404).json({
+ success: false,
+ error: 'No init script found. Create one in Settings > Worktrees.',
+ });
+ return;
+ }
+
+ logger.info(`Running init script for branch "${branch}" (forced)`);
+
+ // Run the script asynchronously (non-blocking)
+ forceRunInitScript({
+ projectPath,
+ worktreePath,
+ branch,
+ emitter: events,
+ });
+
+ // Return immediately - progress will be streamed via WebSocket events
+ res.json({
+ success: true,
+ message: 'Init script started',
+ });
+ } catch (error) {
+ logError(error, 'Run init script failed');
+ res.status(500).json({
+ success: false,
+ error: getErrorMessage(error),
+ });
+ }
+ };
+}
diff --git a/apps/server/src/routes/worktree/routes/list.ts b/apps/server/src/routes/worktree/routes/list.ts
index 93d93dad..bc70a341 100644
--- a/apps/server/src/routes/worktree/routes/list.ts
+++ b/apps/server/src/routes/worktree/routes/list.ts
@@ -2,18 +2,23 @@
* POST /list endpoint - List all git worktrees
*
* Returns actual git worktrees from `git worktree list`.
+ * Also scans .worktrees/ directory to discover worktrees that may have been
+ * created externally or whose git state was corrupted.
* Does NOT include tracked branches - only real worktrees with separate directories.
*/
import type { Request, Response } from 'express';
import { exec } from 'child_process';
import { promisify } from 'util';
+import path from 'path';
import * as secureFs from '../../../lib/secure-fs.js';
import { isGitRepo } from '@automaker/git-utils';
import { getErrorMessage, logError, normalizePath } from '../common.js';
import { readAllWorktreeMetadata, type WorktreePRInfo } from '../../../lib/worktree-metadata.js';
+import { createLogger } from '@automaker/utils';
const execAsync = promisify(exec);
+const logger = createLogger('Worktree');
interface WorktreeInfo {
path: string;
@@ -35,6 +40,87 @@ async function getCurrentBranch(cwd: string): Promise {
}
}
+/**
+ * Scan the .worktrees directory to discover worktrees that may exist on disk
+ * but are not registered with git (e.g., created externally or corrupted state).
+ */
+async function scanWorktreesDirectory(
+ projectPath: string,
+ knownWorktreePaths: Set
+): Promise> {
+ const discovered: Array<{ path: string; branch: string }> = [];
+ const worktreesDir = path.join(projectPath, '.worktrees');
+
+ try {
+ // Check if .worktrees directory exists
+ await secureFs.access(worktreesDir);
+ } catch {
+ // .worktrees directory doesn't exist
+ return discovered;
+ }
+
+ try {
+ const entries = await secureFs.readdir(worktreesDir, { withFileTypes: true });
+
+ for (const entry of entries) {
+ if (!entry.isDirectory()) continue;
+
+ const worktreePath = path.join(worktreesDir, entry.name);
+ const normalizedPath = normalizePath(worktreePath);
+
+ // Skip if already known from git worktree list
+ if (knownWorktreePaths.has(normalizedPath)) continue;
+
+ // Check if this is a valid git repository
+ const gitPath = path.join(worktreePath, '.git');
+ try {
+ const gitStat = await secureFs.stat(gitPath);
+
+ // Git worktrees have a .git FILE (not directory) that points to the parent repo
+ // Regular repos have a .git DIRECTORY
+ if (gitStat.isFile() || gitStat.isDirectory()) {
+ // Try to get the branch name
+ const branch = await getCurrentBranch(worktreePath);
+ if (branch) {
+ logger.info(
+ `Discovered worktree in .worktrees/ not in git worktree list: ${entry.name} (branch: ${branch})`
+ );
+ discovered.push({
+ path: normalizedPath,
+ branch,
+ });
+ } else {
+ // Try to get branch from HEAD if branch --show-current fails (detached HEAD)
+ try {
+ const { stdout: headRef } = await execAsync('git rev-parse --abbrev-ref HEAD', {
+ cwd: worktreePath,
+ });
+ const headBranch = headRef.trim();
+ if (headBranch && headBranch !== 'HEAD') {
+ logger.info(
+ `Discovered worktree in .worktrees/ not in git worktree list: ${entry.name} (branch: ${headBranch})`
+ );
+ discovered.push({
+ path: normalizedPath,
+ branch: headBranch,
+ });
+ }
+ } catch {
+ // Can't determine branch, skip this directory
+ }
+ }
+ }
+ } catch {
+ // Not a git repo, skip
+ }
+ }
+ } catch (error) {
+ logger.warn(`Failed to scan .worktrees directory: ${getErrorMessage(error)}`);
+ }
+
+ return discovered;
+}
+
export function createListHandler() {
return async (req: Request, res: Response): Promise => {
try {
@@ -116,6 +202,22 @@ export function createListHandler() {
}
}
+ // Scan .worktrees directory to discover worktrees that exist on disk
+ // but are not registered with git (e.g., created externally)
+ const knownPaths = new Set(worktrees.map((w) => w.path));
+ const discoveredWorktrees = await scanWorktreesDirectory(projectPath, knownPaths);
+
+ // Add discovered worktrees to the list
+ for (const discovered of discoveredWorktrees) {
+ worktrees.push({
+ path: discovered.path,
+ branch: discovered.branch,
+ isMain: false,
+ isCurrent: discovered.branch === currentBranch,
+ hasWorktree: true,
+ });
+ }
+
// Read all worktree metadata to get PR info
const allMetadata = await readAllWorktreeMetadata(projectPath);
diff --git a/apps/server/src/routes/worktree/routes/open-in-editor.ts b/apps/server/src/routes/worktree/routes/open-in-editor.ts
index 40e71b00..c5ea6f9e 100644
--- a/apps/server/src/routes/worktree/routes/open-in-editor.ts
+++ b/apps/server/src/routes/worktree/routes/open-in-editor.ts
@@ -1,78 +1,40 @@
/**
* POST /open-in-editor endpoint - Open a worktree directory in the default code editor
* GET /default-editor endpoint - Get the name of the default code editor
+ * POST /refresh-editors endpoint - Clear editor cache and re-detect available editors
+ *
+ * This module uses @automaker/platform for cross-platform editor detection and launching.
*/
import type { Request, Response } from 'express';
-import { exec } from 'child_process';
-import { promisify } from 'util';
+import { isAbsolute } from 'path';
+import {
+ clearEditorCache,
+ detectAllEditors,
+ detectDefaultEditor,
+ openInEditor,
+ openInFileManager,
+} from '@automaker/platform';
+import { createLogger } from '@automaker/utils';
import { getErrorMessage, logError } from '../common.js';
-const execAsync = promisify(exec);
+const logger = createLogger('open-in-editor');
-// Editor detection with caching
-interface EditorInfo {
- name: string;
- command: string;
-}
-
-let cachedEditor: EditorInfo | null = null;
-
-/**
- * Detect which code editor is available on the system
- */
-async function detectDefaultEditor(): Promise {
- // Return cached result if available
- if (cachedEditor) {
- return cachedEditor;
- }
-
- // Try Cursor first (if user has Cursor, they probably prefer it)
- try {
- await execAsync('which cursor || where cursor');
- cachedEditor = { name: 'Cursor', command: 'cursor' };
- return cachedEditor;
- } catch {
- // Cursor not found
- }
-
- // Try VS Code
- try {
- await execAsync('which code || where code');
- cachedEditor = { name: 'VS Code', command: 'code' };
- return cachedEditor;
- } catch {
- // VS Code not found
- }
-
- // Try Zed
- try {
- await execAsync('which zed || where zed');
- cachedEditor = { name: 'Zed', command: 'zed' };
- return cachedEditor;
- } catch {
- // Zed not found
- }
-
- // Try Sublime Text
- try {
- await execAsync('which subl || where subl');
- cachedEditor = { name: 'Sublime Text', command: 'subl' };
- return cachedEditor;
- } catch {
- // Sublime not found
- }
-
- // Fallback to file manager
- const platform = process.platform;
- if (platform === 'darwin') {
- cachedEditor = { name: 'Finder', command: 'open' };
- } else if (platform === 'win32') {
- cachedEditor = { name: 'Explorer', command: 'explorer' };
- } else {
- cachedEditor = { name: 'File Manager', command: 'xdg-open' };
- }
- return cachedEditor;
+export function createGetAvailableEditorsHandler() {
+ return async (_req: Request, res: Response): Promise => {
+ try {
+ const editors = await detectAllEditors();
+ res.json({
+ success: true,
+ result: {
+ editors,
+ },
+ });
+ } catch (error) {
+ logError(error, 'Get available editors failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
}
export function createGetDefaultEditorHandler() {
@@ -93,11 +55,41 @@ export function createGetDefaultEditorHandler() {
};
}
+/**
+ * Handler to refresh the editor cache and re-detect available editors
+ * Useful when the user has installed/uninstalled editors
+ */
+export function createRefreshEditorsHandler() {
+ return async (_req: Request, res: Response): Promise => {
+ try {
+ // Clear the cache
+ clearEditorCache();
+
+ // Re-detect editors (this will repopulate the cache)
+ const editors = await detectAllEditors();
+
+ logger.info(`Editor cache refreshed, found ${editors.length} editors`);
+
+ res.json({
+ success: true,
+ result: {
+ editors,
+ message: `Found ${editors.length} available editors`,
+ },
+ });
+ } catch (error) {
+ logError(error, 'Refresh editors failed');
+ res.status(500).json({ success: false, error: getErrorMessage(error) });
+ }
+ };
+}
+
export function createOpenInEditorHandler() {
return async (req: Request, res: Response): Promise => {
try {
- const { worktreePath } = req.body as {
+ const { worktreePath, editorCommand } = req.body as {
worktreePath: string;
+ editorCommand?: string;
};
if (!worktreePath) {
@@ -108,42 +100,44 @@ export function createOpenInEditorHandler() {
return;
}
- const editor = await detectDefaultEditor();
+ // Security: Validate that worktreePath is an absolute path
+ if (!isAbsolute(worktreePath)) {
+ res.status(400).json({
+ success: false,
+ error: 'worktreePath must be an absolute path',
+ });
+ return;
+ }
try {
- await execAsync(`${editor.command} "${worktreePath}"`);
+ // Use the platform utility to open in editor
+ const result = await openInEditor(worktreePath, editorCommand);
res.json({
success: true,
result: {
- message: `Opened ${worktreePath} in ${editor.name}`,
- editorName: editor.name,
+ message: `Opened ${worktreePath} in ${result.editorName}`,
+ editorName: result.editorName,
},
});
} catch (editorError) {
- // If the detected editor fails, try opening in default file manager as fallback
- const platform = process.platform;
- let openCommand: string;
- let fallbackName: string;
+ // If the specified editor fails, try opening in default file manager as fallback
+ logger.warn(
+ `Failed to open in editor, falling back to file manager: ${getErrorMessage(editorError)}`
+ );
- if (platform === 'darwin') {
- openCommand = `open "${worktreePath}"`;
- fallbackName = 'Finder';
- } else if (platform === 'win32') {
- openCommand = `explorer "${worktreePath}"`;
- fallbackName = 'Explorer';
- } else {
- openCommand = `xdg-open "${worktreePath}"`;
- fallbackName = 'File Manager';
+ try {
+ const result = await openInFileManager(worktreePath);
+ res.json({
+ success: true,
+ result: {
+ message: `Opened ${worktreePath} in ${result.editorName}`,
+ editorName: result.editorName,
+ },
+ });
+ } catch (fallbackError) {
+ // Both editor and file manager failed
+ throw fallbackError;
}
-
- await execAsync(openCommand);
- res.json({
- success: true,
- result: {
- message: `Opened ${worktreePath} in ${fallbackName}`,
- editorName: fallbackName,
- },
- });
}
} catch (error) {
logError(error, 'Open in editor failed');
diff --git a/apps/server/src/routes/worktree/routes/pr-info.ts b/apps/server/src/routes/worktree/routes/pr-info.ts
index cb64ccd9..3d1f6b16 100644
--- a/apps/server/src/routes/worktree/routes/pr-info.ts
+++ b/apps/server/src/routes/worktree/routes/pr-info.ts
@@ -11,6 +11,9 @@ import {
isValidBranchName,
isGhCliAvailable,
} from '../common.js';
+import { createLogger } from '@automaker/utils';
+
+const logger = createLogger('PRInfo');
export interface PRComment {
id: number;
@@ -174,7 +177,7 @@ export function createPRInfoHandler() {
})
);
} catch (error) {
- console.warn('[PRInfo] Failed to fetch PR comments:', error);
+ logger.warn('Failed to fetch PR comments:', error);
}
// Get review comments (inline code comments)
@@ -209,10 +212,10 @@ export function createPRInfoHandler() {
})
);
} catch (error) {
- console.warn('[PRInfo] Failed to fetch review comments:', error);
+ logger.warn('Failed to fetch review comments:', error);
}
} else {
- console.warn('[PRInfo] Cannot fetch review comments: repository info not available');
+ logger.warn('Cannot fetch review comments: repository info not available');
}
const prInfo: PRInfo = {
diff --git a/apps/server/src/services/agent-service.ts b/apps/server/src/services/agent-service.ts
index c507d81b..359719d3 100644
--- a/apps/server/src/services/agent-service.ts
+++ b/apps/server/src/services/agent-service.ts
@@ -6,13 +6,16 @@
import path from 'path';
import * as secureFs from '../lib/secure-fs.js';
import type { EventEmitter } from '../lib/events.js';
-import type { ExecuteOptions } from '@automaker/types';
+import type { ExecuteOptions, ThinkingLevel, ReasoningEffort } from '@automaker/types';
+import { stripProviderPrefix } from '@automaker/types';
import {
readImageAsBase64,
buildPromptWithImages,
isAbortError,
loadContextFiles,
createLogger,
+ classifyError,
+ getUserFriendlyErrorMessage,
} from '@automaker/utils';
import { ProviderFactory } from '../providers/provider-factory.js';
import { createChatOptions, validateWorkingDirectory } from '../lib/sdk-options.js';
@@ -20,11 +23,12 @@ import { PathNotAllowedError } from '@automaker/platform';
import type { SettingsService } from './settings-service.js';
import {
getAutoLoadClaudeMdSetting,
- getEnableSandboxModeSetting,
filterClaudeMdFromContext,
getMCPServersFromSettings,
- getMCPPermissionSettings,
getPromptCustomization,
+ getSkillsConfiguration,
+ getSubagentsConfiguration,
+ getCustomSubagents,
} from '../lib/settings-helpers.js';
interface Message {
@@ -45,6 +49,7 @@ interface QueuedPrompt {
message: string;
imagePaths?: string[];
model?: string;
+ thinkingLevel?: ThinkingLevel;
addedAt: string;
}
@@ -54,6 +59,8 @@ interface Session {
abortController: AbortController | null;
workingDirectory: string;
model?: string;
+ thinkingLevel?: ThinkingLevel; // Thinking level for Claude models
+ reasoningEffort?: ReasoningEffort; // Reasoning effort for Codex models
sdkSessionId?: string; // Claude SDK session ID for conversation continuity
promptQueue: QueuedPrompt[]; // Queue of prompts to auto-run after current task
}
@@ -142,12 +149,16 @@ export class AgentService {
workingDirectory,
imagePaths,
model,
+ thinkingLevel,
+ reasoningEffort,
}: {
sessionId: string;
message: string;
workingDirectory?: string;
imagePaths?: string[];
model?: string;
+ thinkingLevel?: ThinkingLevel;
+ reasoningEffort?: ReasoningEffort;
}) {
const session = this.sessions.get(sessionId);
if (!session) {
@@ -160,11 +171,29 @@ export class AgentService {
throw new Error('Agent is already processing a message');
}
- // Update session model if provided
+ // Update session model, thinking level, and reasoning effort if provided
if (model) {
session.model = model;
await this.updateSession(sessionId, { model });
}
+ if (thinkingLevel !== undefined) {
+ session.thinkingLevel = thinkingLevel;
+ }
+ if (reasoningEffort !== undefined) {
+ session.reasoningEffort = reasoningEffort;
+ }
+
+ // Validate vision support before processing images
+ const effectiveModel = model || session.model;
+ if (imagePaths && imagePaths.length > 0 && effectiveModel) {
+ const supportsVision = ProviderFactory.modelSupportsVision(effectiveModel);
+ if (!supportsVision) {
+ throw new Error(
+ `This model (${effectiveModel}) does not support image input. ` +
+ `Please switch to a model that supports vision, or remove the images and try again.`
+ );
+ }
+ }
// Read images and convert to base64
const images: Message['images'] = [];
@@ -226,22 +255,34 @@ export class AgentService {
'[AgentService]'
);
- // Load enableSandboxMode setting (global setting only)
- const enableSandboxMode = await getEnableSandboxModeSetting(
- this.settingsService,
- '[AgentService]'
- );
-
// Load MCP servers from settings (global setting only)
const mcpServers = await getMCPServersFromSettings(this.settingsService, '[AgentService]');
- // Load MCP permission settings (global setting only)
- const mcpPermissions = await getMCPPermissionSettings(this.settingsService, '[AgentService]');
+ // Get Skills configuration from settings
+ const skillsConfig = this.settingsService
+ ? await getSkillsConfiguration(this.settingsService)
+ : { enabled: false, sources: [] as Array<'user' | 'project'>, shouldIncludeInTools: false };
- // Load project context files (CLAUDE.md, CODE_QUALITY.md, etc.)
+ // Get Subagents configuration from settings
+ const subagentsConfig = this.settingsService
+ ? await getSubagentsConfiguration(this.settingsService)
+ : { enabled: false, sources: [] as Array<'user' | 'project'>, shouldIncludeInTools: false };
+
+ // Get custom subagents from settings (merge global + project-level) only if enabled
+ const customSubagents =
+ this.settingsService && subagentsConfig.enabled
+ ? await getCustomSubagents(this.settingsService, effectiveWorkDir)
+ : undefined;
+
+ // Load project context files (CLAUDE.md, CODE_QUALITY.md, etc.) and memory files
+ // Use the user's message as task context for smart memory selection
const contextResult = await loadContextFiles({
projectPath: effectiveWorkDir,
fsModule: secureFs as Parameters[0]['fsModule'],
+ taskContext: {
+ title: message.substring(0, 200), // Use first 200 chars as title
+ description: message,
+ },
});
// When autoLoadClaudeMd is enabled, filter out CLAUDE.md to avoid duplication
@@ -255,6 +296,9 @@ export class AgentService {
: baseSystemPrompt;
// Build SDK options using centralized configuration
+ // Use thinking level and reasoning effort from request, or fall back to session's stored values
+ const effectiveThinkingLevel = thinkingLevel ?? session.thinkingLevel;
+ const effectiveReasoningEffort = reasoningEffort ?? session.reasoningEffort;
const sdkOptions = createChatOptions({
cwd: effectiveWorkDir,
model: model,
@@ -262,36 +306,78 @@ export class AgentService {
systemPrompt: combinedSystemPrompt,
abortController: session.abortController!,
autoLoadClaudeMd,
- enableSandboxMode,
+ thinkingLevel: effectiveThinkingLevel, // Pass thinking level for Claude models
mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined,
- mcpAutoApproveTools: mcpPermissions.mcpAutoApproveTools,
- mcpUnrestrictedTools: mcpPermissions.mcpUnrestrictedTools,
});
// Extract model, maxTurns, and allowedTools from SDK options
const effectiveModel = sdkOptions.model!;
const maxTurns = sdkOptions.maxTurns;
- const allowedTools = sdkOptions.allowedTools as string[] | undefined;
+ let allowedTools = sdkOptions.allowedTools as string[] | undefined;
- // Get provider for this model
+ // Build merged settingSources array using Set for automatic deduplication
+ const sdkSettingSources = (sdkOptions.settingSources ?? []).filter(
+ (source): source is 'user' | 'project' => source === 'user' || source === 'project'
+ );
+ const skillSettingSources = skillsConfig.enabled ? skillsConfig.sources : [];
+ const settingSources = [...new Set([...sdkSettingSources, ...skillSettingSources])];
+
+ // Enhance allowedTools with Skills and Subagents tools
+ // These tools are not in the provider's default set - they're added dynamically based on settings
+ const needsSkillTool = skillsConfig.shouldIncludeInTools;
+ const needsTaskTool =
+ subagentsConfig.shouldIncludeInTools &&
+ customSubagents &&
+ Object.keys(customSubagents).length > 0;
+
+ // Base tools that match the provider's default set
+ const baseTools = ['Read', 'Write', 'Edit', 'Glob', 'Grep', 'Bash', 'WebSearch', 'WebFetch'];
+
+ if (allowedTools) {
+ allowedTools = [...allowedTools]; // Create a copy to avoid mutating SDK options
+ // Add Skill tool if skills are enabled
+ if (needsSkillTool && !allowedTools.includes('Skill')) {
+ allowedTools.push('Skill');
+ }
+ // Add Task tool if custom subagents are configured
+ if (needsTaskTool && !allowedTools.includes('Task')) {
+ allowedTools.push('Task');
+ }
+ } else if (needsSkillTool || needsTaskTool) {
+ // If no allowedTools specified but we need to add Skill/Task tools,
+ // build the full list including base tools
+ allowedTools = [...baseTools];
+ if (needsSkillTool) {
+ allowedTools.push('Skill');
+ }
+ if (needsTaskTool) {
+ allowedTools.push('Task');
+ }
+ }
+
+ // Get provider for this model (with prefix)
const provider = ProviderFactory.getProviderForModel(effectiveModel);
+ // Strip provider prefix - providers should receive bare model IDs
+ const bareModel = stripProviderPrefix(effectiveModel);
+
// Build options for provider
const options: ExecuteOptions = {
prompt: '', // Will be set below based on images
- model: effectiveModel,
+ model: bareModel, // Bare model ID (e.g., "gpt-5.1-codex-max", "composer-1")
+ originalModel: effectiveModel, // Original with prefix for logging (e.g., "codex-gpt-5.1-codex-max")
cwd: effectiveWorkDir,
systemPrompt: sdkOptions.systemPrompt,
maxTurns: maxTurns,
allowedTools: allowedTools,
abortController: session.abortController!,
conversationHistory: conversationHistory.length > 0 ? conversationHistory : undefined,
- settingSources: sdkOptions.settingSources,
- sandbox: sdkOptions.sandbox, // Pass sandbox configuration
+ settingSources: settingSources.length > 0 ? settingSources : undefined,
sdkSessionId: session.sdkSessionId, // Pass SDK session ID for resuming
mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined, // Pass MCP servers configuration
- mcpAutoApproveTools: mcpPermissions.mcpAutoApproveTools, // Pass MCP auto-approve setting
- mcpUnrestrictedTools: mcpPermissions.mcpUnrestrictedTools, // Pass MCP unrestricted tools setting
+ agents: customSubagents, // Pass custom subagents for task delegation
+ thinkingLevel: effectiveThinkingLevel, // Pass thinking level for Claude models
+ reasoningEffort: effectiveReasoningEffort, // Pass reasoning effort for Codex models
};
// Build prompt content with images
@@ -372,6 +458,53 @@ export class AgentService {
content: responseText,
toolUses,
});
+ } else if (msg.type === 'error') {
+ // Some providers (like Codex CLI/SaaS or Cursor CLI) surface failures as
+ // streamed error messages instead of throwing. Handle these here so the
+ // Agent Runner UX matches the Claude/Cursor behavior without changing
+ // their provider implementations.
+ const rawErrorText =
+ (typeof msg.error === 'string' && msg.error.trim()) ||
+ 'Unexpected error from provider during agent execution.';
+
+ const errorInfo = classifyError(new Error(rawErrorText));
+
+ // Keep the provider-supplied text intact (Codex already includes helpful tips),
+ // only add a small rate-limit hint when we can detect it.
+ const enhancedText = errorInfo.isRateLimit
+ ? `${rawErrorText}\n\nTip: It looks like you hit a rate limit. Try waiting a bit or reducing concurrent Agent Runner / Auto Mode tasks.`
+ : rawErrorText;
+
+ this.logger.error('Provider error during agent execution:', {
+ type: errorInfo.type,
+ message: errorInfo.message,
+ });
+
+ // Mark session as no longer running so the UI and queue stay in sync
+ session.isRunning = false;
+ session.abortController = null;
+
+ const errorMessage: Message = {
+ id: this.generateId(),
+ role: 'assistant',
+ content: `Error: ${enhancedText}`,
+ timestamp: new Date().toISOString(),
+ isError: true,
+ };
+
+ session.messages.push(errorMessage);
+ await this.saveSession(sessionId, session.messages);
+
+ this.emitAgentEvent(sessionId, {
+ type: 'error',
+ error: enhancedText,
+ message: errorMessage,
+ });
+
+ // Don't continue streaming after an error message
+ return {
+ success: false,
+ };
}
}
@@ -628,7 +761,12 @@ export class AgentService {
*/
async addToQueue(
sessionId: string,
- prompt: { message: string; imagePaths?: string[]; model?: string }
+ prompt: {
+ message: string;
+ imagePaths?: string[];
+ model?: string;
+ thinkingLevel?: ThinkingLevel;
+ }
): Promise<{ success: boolean; queuedPrompt?: QueuedPrompt; error?: string }> {
const session = this.sessions.get(sessionId);
if (!session) {
@@ -640,6 +778,7 @@ export class AgentService {
message: prompt.message,
imagePaths: prompt.imagePaths,
model: prompt.model,
+ thinkingLevel: prompt.thinkingLevel,
addedAt: new Date().toISOString(),
};
@@ -769,6 +908,7 @@ export class AgentService {
message: nextPrompt.message,
imagePaths: nextPrompt.imagePaths,
model: nextPrompt.model,
+ thinkingLevel: nextPrompt.thinkingLevel,
});
} catch (error) {
this.logger.error('Failed to process queued prompt:', error);
diff --git a/apps/server/src/services/auto-mode-service.ts b/apps/server/src/services/auto-mode-service.ts
index d5f7d051..a587e1eb 100644
--- a/apps/server/src/services/auto-mode-service.ts
+++ b/apps/server/src/services/auto-mode-service.ts
@@ -10,22 +10,37 @@
*/
import { ProviderFactory } from '../providers/provider-factory.js';
+import { simpleQuery } from '../providers/simple-query-service.js';
import type {
ExecuteOptions,
Feature,
+ ModelProvider,
+ PipelineStep,
FeatureStatusWithPipeline,
PipelineConfig,
- PipelineStep,
+ ThinkingLevel,
+ PlanningMode,
} from '@automaker/types';
+import { DEFAULT_PHASE_MODELS, stripProviderPrefix } from '@automaker/types';
import {
buildPromptWithImages,
- isAbortError,
classifyError,
loadContextFiles,
+ appendLearning,
+ recordMemoryUsage,
+ createLogger,
} from '@automaker/utils';
-import { resolveModelString, DEFAULT_MODELS } from '@automaker/model-resolver';
+
+const logger = createLogger('AutoMode');
+import { resolveModelString, resolvePhaseModel, DEFAULT_MODELS } from '@automaker/model-resolver';
import { resolveDependencies, areDependenciesSatisfied } from '@automaker/dependency-resolver';
-import { getFeatureDir, getAutomakerDir, getFeaturesDir } from '@automaker/platform';
+import {
+ getFeatureDir,
+ getAutomakerDir,
+ getFeaturesDir,
+ getExecutionStatePath,
+ ensureAutomakerDir,
+} from '@automaker/platform';
import { exec } from 'child_process';
import { promisify } from 'util';
import path from 'path';
@@ -41,17 +56,14 @@ import type { SettingsService } from './settings-service.js';
import { pipelineService, PipelineService } from './pipeline-service.js';
import {
getAutoLoadClaudeMdSetting,
- getEnableSandboxModeSetting,
filterClaudeMdFromContext,
getMCPServersFromSettings,
- getMCPPermissionSettings,
getPromptCustomization,
} from '../lib/settings-helpers.js';
const execAsync = promisify(exec);
-// Planning mode types for spec-driven development
-type PlanningMode = 'skip' | 'lite' | 'spec' | 'full';
+// PlanningMode type is imported from @automaker/types
interface ParsedTask {
id: string; // e.g., "T001"
@@ -194,6 +206,8 @@ interface RunningFeature {
abortController: AbortController;
isAutoMode: boolean;
startTime: number;
+ model?: string;
+ provider?: ModelProvider;
}
interface AutoLoopState {
@@ -216,6 +230,29 @@ interface AutoModeConfig {
projectPath: string;
}
+/**
+ * Execution state for recovery after server restart
+ * Tracks which features were running and auto-loop configuration
+ */
+interface ExecutionState {
+ version: 1;
+ autoLoopWasRunning: boolean;
+ maxConcurrency: number;
+ projectPath: string;
+ runningFeatureIds: string[];
+ savedAt: string;
+}
+
+// Default empty execution state
+const DEFAULT_EXECUTION_STATE: ExecutionState = {
+ version: 1,
+ autoLoopWasRunning: false,
+ maxConcurrency: 3,
+ projectPath: '',
+ runningFeatureIds: [],
+ savedAt: '',
+};
+
// Constants for consecutive failure tracking
const CONSECUTIVE_FAILURE_THRESHOLD = 3; // Pause after 3 consecutive failures
const FAILURE_WINDOW_MS = 60000; // Failures within 1 minute count as consecutive
@@ -278,8 +315,8 @@ export class AutoModeService {
this.pausedDueToFailures = true;
const failureCount = this.consecutiveFailures.length;
- console.log(
- `[AutoMode] Pausing auto loop after ${failureCount} consecutive failures. Last error: ${errorInfo.type}`
+ logger.info(
+ `Pausing auto loop after ${failureCount} consecutive failures. Last error: ${errorInfo.type}`
);
// Emit event to notify UI
@@ -337,9 +374,14 @@ export class AutoModeService {
projectPath,
});
+ // Save execution state for recovery after restart
+ await this.saveExecutionState(projectPath);
+
+ // Note: Memory folder initialization is now handled by loadContextFiles
+
// Run the loop in the background
this.runAutoLoop().catch((error) => {
- console.error('[AutoMode] Loop error:', error);
+ logger.error('Loop error:', error);
const errorInfo = classifyError(error);
this.emitAutoModeEvent('auto_mode_error', {
error: errorInfo.message,
@@ -384,13 +426,13 @@ export class AutoModeService {
this.config!.useWorktrees,
true
).catch((error) => {
- console.error(`[AutoMode] Feature ${nextFeature.id} error:`, error);
+ logger.error(`Feature ${nextFeature.id} error:`, error);
});
}
await this.sleep(2000);
} catch (error) {
- console.error('[AutoMode] Loop iteration error:', error);
+ logger.error('Loop iteration error:', error);
await this.sleep(5000);
}
}
@@ -403,17 +445,23 @@ export class AutoModeService {
*/
async stopAutoLoop(): Promise {
const wasRunning = this.autoLoopRunning;
+ const projectPath = this.config?.projectPath;
this.autoLoopRunning = false;
if (this.autoLoopAbortController) {
this.autoLoopAbortController.abort();
this.autoLoopAbortController = null;
}
+ // Clear execution state when auto-loop is explicitly stopped
+ if (projectPath) {
+ await this.clearExecutionState(projectPath);
+ }
+
// Emit stop event immediately when user explicitly stops
if (wasRunning) {
this.emitAutoModeEvent('auto_mode_stopped', {
message: 'Auto mode stopped',
- projectPath: this.config?.projectPath,
+ projectPath,
});
}
@@ -454,6 +502,11 @@ export class AutoModeService {
};
this.runningFeatures.set(featureId, tempRunningFeature);
+ // Save execution state when feature starts
+ if (isAutoMode) {
+ await this.saveExecutionState(projectPath);
+ }
+
try {
// Validate that project path is allowed using centralized validation
validateWorkingDirectory(projectPath);
@@ -463,8 +516,8 @@ export class AutoModeService {
if (!options?.continuationPrompt) {
const hasExistingContext = await this.contextExists(projectPath, featureId);
if (hasExistingContext) {
- console.log(
- `[AutoMode] Feature ${featureId} has existing context, resuming instead of starting fresh`
+ logger.info(
+ `Feature ${featureId} has existing context, resuming instead of starting fresh`
);
// Remove from running features temporarily, resumeFeature will add it back
this.runningFeatures.delete(featureId);
@@ -499,12 +552,10 @@ export class AutoModeService {
worktreePath = await this.findExistingWorktreeForBranch(projectPath, branchName);
if (worktreePath) {
- console.log(`[AutoMode] Using worktree for branch "${branchName}": ${worktreePath}`);
+ logger.info(`Using worktree for branch "${branchName}": ${worktreePath}`);
} else {
// Worktree doesn't exist - log warning and continue with project path
- console.warn(
- `[AutoMode] Worktree for branch "${branchName}" not found, using project path`
- );
+ logger.warn(`Worktree for branch "${branchName}" not found, using project path`);
}
}
@@ -530,21 +581,27 @@ export class AutoModeService {
// Build the prompt - use continuation prompt if provided (for recovery after plan approval)
let prompt: string;
- // Load project context files (CLAUDE.md, CODE_QUALITY.md, etc.) - passed as system prompt
+ // Load project context files (CLAUDE.md, CODE_QUALITY.md, etc.) and memory files
+ // Context loader uses task context to select relevant memory files
const contextResult = await loadContextFiles({
projectPath,
fsModule: secureFs as Parameters[0]['fsModule'],
+ taskContext: {
+ title: feature.title ?? '',
+ description: feature.description ?? '',
+ },
});
// When autoLoadClaudeMd is enabled, filter out CLAUDE.md to avoid duplication
// (SDK handles CLAUDE.md via settingSources), but keep other context files like CODE_QUALITY.md
- const contextFilesPrompt = filterClaudeMdFromContext(contextResult, autoLoadClaudeMd);
+ // Note: contextResult.formattedPrompt now includes both context AND memory
+ const combinedSystemPrompt = filterClaudeMdFromContext(contextResult, autoLoadClaudeMd);
if (options?.continuationPrompt) {
// Continuation prompt is used when recovering from a plan approval
// The plan was already approved, so skip the planning phase
prompt = options.continuationPrompt;
- console.log(`[AutoMode] Using continuation prompt for feature ${featureId}`);
+ logger.info(`Using continuation prompt for feature ${featureId}`);
} else {
// Normal flow: build prompt with planning phase
const featurePrompt = this.buildFeaturePrompt(feature);
@@ -566,9 +623,16 @@ export class AutoModeService {
typeof img === 'string' ? img : img.path
);
- // Get model from feature
+ // Get model from feature and determine provider
const model = resolveModelString(feature.model, DEFAULT_MODELS.claude);
- console.log(`[AutoMode] Executing feature ${featureId} with model: ${model} in ${workDir}`);
+ const provider = ProviderFactory.getProviderNameForModel(model);
+ logger.info(
+ `Executing feature ${featureId} with model: ${model}, provider: ${provider} in ${workDir}`
+ );
+
+ // Store model and provider in running feature for tracking
+ tempRunningFeature.model = model;
+ tempRunningFeature.provider = provider;
// Run the agent with the feature's model and images
// Context files are passed as system prompt for higher priority
@@ -584,8 +648,9 @@ export class AutoModeService {
projectPath,
planningMode: feature.planningMode,
requirePlanApproval: feature.requirePlanApproval,
- systemPrompt: contextFilesPrompt || undefined,
+ systemPrompt: combinedSystemPrompt || undefined,
autoLoadClaudeMd,
+ thinkingLevel: feature.thinkingLevel,
}
);
@@ -615,6 +680,36 @@ export class AutoModeService {
// Record success to reset consecutive failure tracking
this.recordSuccess();
+ // Record learnings and memory usage after successful feature completion
+ try {
+ const featureDir = getFeatureDir(projectPath, featureId);
+ const outputPath = path.join(featureDir, 'agent-output.md');
+ let agentOutput = '';
+ try {
+ const outputContent = await secureFs.readFile(outputPath, 'utf-8');
+ agentOutput =
+ typeof outputContent === 'string' ? outputContent : outputContent.toString();
+ } catch {
+ // Agent output might not exist yet
+ }
+
+ // Record memory usage if we loaded any memory files
+ if (contextResult.memoryFiles.length > 0 && agentOutput) {
+ await recordMemoryUsage(
+ projectPath,
+ contextResult.memoryFiles,
+ agentOutput,
+ true, // success
+ secureFs as Parameters[4]
+ );
+ }
+
+ // Extract and record learnings from the agent output
+ await this.recordLearningsFromFeature(projectPath, feature, agentOutput);
+ } catch (learningError) {
+ console.warn('[AutoMode] Failed to record learnings:', learningError);
+ }
+
this.emitAutoModeEvent('auto_mode_feature_complete', {
featureId,
passes: true,
@@ -622,6 +717,8 @@ export class AutoModeService {
(Date.now() - tempRunningFeature.startTime) / 1000
)}s${finalStatus === 'verified' ? ' - auto-verified' : ''}`,
projectPath,
+ model: tempRunningFeature.model,
+ provider: tempRunningFeature.provider,
});
} catch (error) {
const errorInfo = classifyError(error);
@@ -634,7 +731,7 @@ export class AutoModeService {
projectPath,
});
} else {
- console.error(`[AutoMode] Feature ${featureId} failed:`, error);
+ logger.error(`Feature ${featureId} failed:`, error);
await this.updateFeatureStatus(projectPath, featureId, 'backlog');
this.emitAutoModeEvent('auto_mode_error', {
featureId,
@@ -659,11 +756,16 @@ export class AutoModeService {
}
}
} finally {
- console.log(`[AutoMode] Feature ${featureId} execution ended, cleaning up runningFeatures`);
- console.log(
- `[AutoMode] Pending approvals at cleanup: ${Array.from(this.pendingApprovals.keys()).join(', ') || 'none'}`
+ logger.info(`Feature ${featureId} execution ended, cleaning up runningFeatures`);
+ logger.info(
+ `Pending approvals at cleanup: ${Array.from(this.pendingApprovals.keys()).join(', ') || 'none'}`
);
this.runningFeatures.delete(featureId);
+
+ // Update execution state after feature completes
+ if (this.autoLoopRunning && projectPath) {
+ await this.saveExecutionState(projectPath);
+ }
}
}
@@ -679,12 +781,16 @@ export class AutoModeService {
abortController: AbortController,
autoLoadClaudeMd: boolean
): Promise {
- console.log(`[AutoMode] Executing ${steps.length} pipeline step(s) for feature ${featureId}`);
+ logger.info(`Executing ${steps.length} pipeline step(s) for feature ${featureId}`);
- // Load context files once
+ // Load context files once with feature context for smart memory selection
const contextResult = await loadContextFiles({
projectPath,
fsModule: secureFs as Parameters[0]['fsModule'],
+ taskContext: {
+ title: feature.title ?? '',
+ description: feature.description ?? '',
+ },
});
const contextFilesPrompt = filterClaudeMdFromContext(contextResult, autoLoadClaudeMd);
@@ -742,6 +848,7 @@ export class AutoModeService {
previousContent: previousContext,
systemPrompt: contextFilesPrompt || undefined,
autoLoadClaudeMd,
+ thinkingLevel: feature.thinkingLevel,
}
);
@@ -761,12 +868,12 @@ export class AutoModeService {
projectPath,
});
- console.log(
- `[AutoMode] Pipeline step ${i + 1}/${steps.length} (${step.name}) completed for feature ${featureId}`
+ logger.info(
+ `Pipeline step ${i + 1}/${steps.length} (${step.name}) completed for feature ${featureId}`
);
}
- console.log(`[AutoMode] All pipeline steps completed for feature ${featureId}`);
+ logger.info(`All pipeline steps completed for feature ${featureId}`);
}
/**
@@ -1151,7 +1258,7 @@ Complete the pipeline step instructions above. Review the previous work and appl
if (worktreePath) {
workDir = worktreePath;
- console.log(`[AutoMode] Follow-up using worktree for branch "${branchName}": ${workDir}`);
+ logger.info(`Follow-up using worktree for branch "${branchName}": ${workDir}`);
}
}
@@ -1176,6 +1283,10 @@ Complete the pipeline step instructions above. Review the previous work and appl
const contextResult = await loadContextFiles({
projectPath,
fsModule: secureFs as Parameters[0]['fsModule'],
+ taskContext: {
+ title: feature?.title ?? prompt.substring(0, 200),
+ description: feature?.description ?? prompt,
+ },
});
// When autoLoadClaudeMd is enabled, filter out CLAUDE.md to avoid duplication
@@ -1204,6 +1315,11 @@ ${prompt}
## Task
Address the follow-up instructions above. Review the previous work and make the requested changes or fixes.`;
+ // Get model from feature and determine provider early for tracking
+ const model = resolveModelString(feature?.model, DEFAULT_MODELS.claude);
+ const provider = ProviderFactory.getProviderNameForModel(model);
+ logger.info(`Follow-up for feature ${featureId} using model: ${model}, provider: ${provider}`);
+
this.runningFeatures.set(featureId, {
featureId,
projectPath,
@@ -1212,6 +1328,8 @@ Address the follow-up instructions above. Review the previous work and make the
abortController,
isAutoMode: false,
startTime: Date.now(),
+ model,
+ provider,
});
this.emitAutoModeEvent('auto_mode_feature_start', {
@@ -1222,13 +1340,11 @@ Address the follow-up instructions above. Review the previous work and make the
title: 'Follow-up',
description: prompt.substring(0, 100),
},
+ model,
+ provider,
});
try {
- // Get model from feature (already loaded above)
- const model = resolveModelString(feature?.model, DEFAULT_MODELS.claude);
- console.log(`[AutoMode] Follow-up for feature ${featureId} using model: ${model}`);
-
// Update feature status to in_progress
await this.updateFeatureStatus(projectPath, featureId, 'in_progress');
@@ -1252,7 +1368,7 @@ Address the follow-up instructions above. Review the previous work and make the
// Store the absolute path (external storage uses absolute paths)
copiedImagePaths.push(destPath);
} catch (error) {
- console.error(`[AutoMode] Failed to copy follow-up image ${imagePath}:`, error);
+ logger.error(`Failed to copy follow-up image ${imagePath}:`, error);
}
}
}
@@ -1288,7 +1404,7 @@ Address the follow-up instructions above. Review the previous work and make the
try {
await secureFs.writeFile(featurePath, JSON.stringify(feature, null, 2));
} catch (error) {
- console.error(`[AutoMode] Failed to save feature.json:`, error);
+ logger.error(`Failed to save feature.json:`, error);
}
}
@@ -1310,6 +1426,7 @@ Address the follow-up instructions above. Review the previous work and make the
previousContent: previousContext || undefined,
systemPrompt: contextFilesPrompt || undefined,
autoLoadClaudeMd,
+ thinkingLevel: feature?.thinkingLevel,
}
);
@@ -1327,6 +1444,8 @@ Address the follow-up instructions above. Review the previous work and make the
passes: true,
message: `Follow-up completed successfully${finalStatus === 'verified' ? ' - auto-verified' : ''}`,
projectPath,
+ model,
+ provider,
});
} catch (error) {
const errorInfo = classifyError(error);
@@ -1433,10 +1552,10 @@ Address the follow-up instructions above. Review the previous work and make the
try {
await secureFs.access(providedWorktreePath);
workDir = providedWorktreePath;
- console.log(`[AutoMode] Committing in provided worktree: ${workDir}`);
+ logger.info(`Committing in provided worktree: ${workDir}`);
} catch {
- console.log(
- `[AutoMode] Provided worktree path doesn't exist: ${providedWorktreePath}, using project path`
+ logger.info(
+ `Provided worktree path doesn't exist: ${providedWorktreePath}, using project path`
);
}
} else {
@@ -1445,9 +1564,9 @@ Address the follow-up instructions above. Review the previous work and make the
try {
await secureFs.access(legacyWorktreePath);
workDir = legacyWorktreePath;
- console.log(`[AutoMode] Committing in legacy worktree: ${workDir}`);
+ logger.info(`Committing in legacy worktree: ${workDir}`);
} catch {
- console.log(`[AutoMode] No worktree found, committing in project path: ${workDir}`);
+ logger.info(`No worktree found, committing in project path: ${workDir}`);
}
}
@@ -1487,7 +1606,7 @@ Address the follow-up instructions above. Review the previous work and make the
return hash.trim();
} catch (error) {
- console.error(`[AutoMode] Commit failed for ${featureId}:`, error);
+ logger.error(`Commit failed for ${featureId}:`, error);
return null;
}
}
@@ -1535,8 +1654,14 @@ Address the follow-up instructions above. Review the previous work and make the
Format your response as a structured markdown document.`;
try {
- // Use default Claude model for analysis (can be overridden in the future)
- const analysisModel = resolveModelString(undefined, DEFAULT_MODELS.claude);
+ // Get model from phase settings
+ const settings = await this.settingsService?.getGlobalSettings();
+ const phaseModelEntry =
+ settings?.phaseModels?.projectAnalysisModel || DEFAULT_PHASE_MODELS.projectAnalysisModel;
+ const { model: analysisModel, thinkingLevel: analysisThinkingLevel } =
+ resolvePhaseModel(phaseModelEntry);
+ logger.info('Using model for project analysis:', analysisModel);
+
const provider = ProviderFactory.getProviderForModel(analysisModel);
// Load autoLoadClaudeMd setting
@@ -1554,6 +1679,7 @@ Format your response as a structured markdown document.`;
allowedTools: ['Read', 'Glob', 'Grep'],
abortController,
autoLoadClaudeMd,
+ thinkingLevel: analysisThinkingLevel,
});
const options: ExecuteOptions = {
@@ -1564,7 +1690,7 @@ Format your response as a structured markdown document.`;
allowedTools: sdkOptions.allowedTools as string[],
abortController,
settingSources: sdkOptions.settingSources,
- sandbox: sdkOptions.sandbox, // Pass sandbox configuration
+ thinkingLevel: analysisThinkingLevel, // Pass thinking level
};
const stream = provider.executeQuery(options);
@@ -1634,6 +1760,8 @@ Format your response as a structured markdown document.`;
projectPath: string;
projectName: string;
isAutoMode: boolean;
+ model?: string;
+ provider?: ModelProvider;
title?: string;
description?: string;
}>
@@ -1659,6 +1787,8 @@ Format your response as a structured markdown document.`;
projectPath: rf.projectPath,
projectName: path.basename(rf.projectPath),
isAutoMode: rf.isAutoMode,
+ model: rf.model,
+ provider: rf.provider,
title,
description,
};
@@ -1670,23 +1800,53 @@ Format your response as a structured markdown document.`;
/**
* Wait for plan approval from the user.
* Returns a promise that resolves when the user approves/rejects the plan.
+ * Times out after 30 minutes to prevent indefinite memory retention.
*/
waitForPlanApproval(
featureId: string,
projectPath: string
): Promise<{ approved: boolean; editedPlan?: string; feedback?: string }> {
- console.log(`[AutoMode] Registering pending approval for feature ${featureId}`);
- console.log(
- `[AutoMode] Current pending approvals: ${Array.from(this.pendingApprovals.keys()).join(', ') || 'none'}`
+ const APPROVAL_TIMEOUT_MS = 30 * 60 * 1000; // 30 minutes
+
+ logger.info(`Registering pending approval for feature ${featureId}`);
+ logger.info(
+ `Current pending approvals: ${Array.from(this.pendingApprovals.keys()).join(', ') || 'none'}`
);
return new Promise((resolve, reject) => {
+ // Set up timeout to prevent indefinite waiting and memory leaks
+ const timeoutId = setTimeout(() => {
+ const pending = this.pendingApprovals.get(featureId);
+ if (pending) {
+ logger.warn(`Plan approval for feature ${featureId} timed out after 30 minutes`);
+ this.pendingApprovals.delete(featureId);
+ reject(
+ new Error('Plan approval timed out after 30 minutes - feature execution cancelled')
+ );
+ }
+ }, APPROVAL_TIMEOUT_MS);
+
+ // Wrap resolve/reject to clear timeout when approval is resolved
+ const wrappedResolve = (result: {
+ approved: boolean;
+ editedPlan?: string;
+ feedback?: string;
+ }) => {
+ clearTimeout(timeoutId);
+ resolve(result);
+ };
+
+ const wrappedReject = (error: Error) => {
+ clearTimeout(timeoutId);
+ reject(error);
+ };
+
this.pendingApprovals.set(featureId, {
- resolve,
- reject,
+ resolve: wrappedResolve,
+ reject: wrappedReject,
featureId,
projectPath,
});
- console.log(`[AutoMode] Pending approval registered for feature ${featureId}`);
+ logger.info(`Pending approval registered for feature ${featureId} (timeout: 30 minutes)`);
});
}
@@ -1701,27 +1861,23 @@ Format your response as a structured markdown document.`;
feedback?: string,
projectPathFromClient?: string
): Promise<{ success: boolean; error?: string }> {
- console.log(
- `[AutoMode] resolvePlanApproval called for feature ${featureId}, approved=${approved}`
- );
- console.log(
- `[AutoMode] Current pending approvals: ${Array.from(this.pendingApprovals.keys()).join(', ') || 'none'}`
+ logger.info(`resolvePlanApproval called for feature ${featureId}, approved=${approved}`);
+ logger.info(
+ `Current pending approvals: ${Array.from(this.pendingApprovals.keys()).join(', ') || 'none'}`
);
const pending = this.pendingApprovals.get(featureId);
if (!pending) {
- console.log(`[AutoMode] No pending approval in Map for feature ${featureId}`);
+ logger.info(`No pending approval in Map for feature ${featureId}`);
// RECOVERY: If no pending approval but we have projectPath from client,
// check if feature's planSpec.status is 'generated' and handle recovery
if (projectPathFromClient) {
- console.log(`[AutoMode] Attempting recovery with projectPath: ${projectPathFromClient}`);
+ logger.info(`Attempting recovery with projectPath: ${projectPathFromClient}`);
const feature = await this.loadFeature(projectPathFromClient, featureId);
if (feature?.planSpec?.status === 'generated') {
- console.log(
- `[AutoMode] Feature ${featureId} has planSpec.status='generated', performing recovery`
- );
+ logger.info(`Feature ${featureId} has planSpec.status='generated', performing recovery`);
if (approved) {
// Update planSpec to approved
@@ -1740,17 +1896,14 @@ Format your response as a structured markdown document.`;
}
continuationPrompt += `Now proceed with the implementation as specified in the plan:\n\n${planContent}\n\nImplement the feature now.`;
- console.log(`[AutoMode] Starting recovery execution for feature ${featureId}`);
+ logger.info(`Starting recovery execution for feature ${featureId}`);
// Start feature execution with the continuation prompt (async, don't await)
// Pass undefined for providedWorktreePath, use options for continuation prompt
this.executeFeature(projectPathFromClient, featureId, true, false, undefined, {
continuationPrompt,
}).catch((error) => {
- console.error(
- `[AutoMode] Recovery execution failed for feature ${featureId}:`,
- error
- );
+ logger.error(`Recovery execution failed for feature ${featureId}:`, error);
});
return { success: true };
@@ -1774,15 +1927,15 @@ Format your response as a structured markdown document.`;
}
}
- console.log(
- `[AutoMode] ERROR: No pending approval found for feature ${featureId} and recovery not possible`
+ logger.info(
+ `ERROR: No pending approval found for feature ${featureId} and recovery not possible`
);
return {
success: false,
error: `No pending approval for feature ${featureId}`,
};
}
- console.log(`[AutoMode] Found pending approval for feature ${featureId}, proceeding...`);
+ logger.info(`Found pending approval for feature ${featureId}, proceeding...`);
const { projectPath } = pending;
@@ -1815,17 +1968,17 @@ Format your response as a structured markdown document.`;
* Cancel a pending plan approval (e.g., when feature is stopped).
*/
cancelPlanApproval(featureId: string): void {
- console.log(`[AutoMode] cancelPlanApproval called for feature ${featureId}`);
- console.log(
- `[AutoMode] Current pending approvals: ${Array.from(this.pendingApprovals.keys()).join(', ') || 'none'}`
+ logger.info(`cancelPlanApproval called for feature ${featureId}`);
+ logger.info(
+ `Current pending approvals: ${Array.from(this.pendingApprovals.keys()).join(', ') || 'none'}`
);
const pending = this.pendingApprovals.get(featureId);
if (pending) {
- console.log(`[AutoMode] Found and cancelling pending approval for feature ${featureId}`);
+ logger.info(`Found and cancelling pending approval for feature ${featureId}`);
pending.reject(new Error('Plan approval cancelled - feature was stopped'));
this.pendingApprovals.delete(featureId);
} else {
- console.log(`[AutoMode] No pending approval to cancel for feature ${featureId}`);
+ logger.info(`No pending approval to cancel for feature ${featureId}`);
}
}
@@ -1965,7 +2118,7 @@ Format your response as a structured markdown document.`;
feature.updatedAt = new Date().toISOString();
await secureFs.writeFile(featurePath, JSON.stringify(feature, null, 2));
} catch (error) {
- console.error(`[AutoMode] Failed to update planSpec for ${featureId}:`, error);
+ logger.error(`Failed to update planSpec for ${featureId}:`, error);
}
}
@@ -2006,9 +2159,13 @@ Format your response as a structured markdown document.`;
// Apply dependency-aware ordering
const { orderedFeatures } = resolveDependencies(pendingFeatures);
+ // Get skipVerificationInAutoMode setting
+ const settings = await this.settingsService?.getGlobalSettings();
+ const skipVerification = settings?.skipVerificationInAutoMode ?? false;
+
// Filter to only features with satisfied dependencies
const readyFeatures = orderedFeatures.filter((feature: Feature) =>
- areDependenciesSatisfied(feature, allFeatures)
+ areDependenciesSatisfied(feature, allFeatures, { skipVerification })
);
return readyFeatures;
@@ -2204,12 +2361,25 @@ This helps parse your summary correctly in the output logs.`;
previousContent?: string;
systemPrompt?: string;
autoLoadClaudeMd?: boolean;
+ thinkingLevel?: ThinkingLevel;
}
): Promise {
const finalProjectPath = options?.projectPath || projectPath;
const planningMode = options?.planningMode || 'skip';
const previousContent = options?.previousContent;
+ // Validate vision support before processing images
+ const effectiveModel = model || 'claude-sonnet-4-20250514';
+ if (imagePaths && imagePaths.length > 0) {
+ const supportsVision = ProviderFactory.modelSupportsVision(effectiveModel);
+ if (!supportsVision) {
+ throw new Error(
+ `This model (${effectiveModel}) does not support image input. ` +
+ `Please switch to a model that supports vision (like Claude models), or remove the images and try again.`
+ );
+ }
+ }
+
// Check if this planning mode can generate a spec/plan that needs approval
// - spec and full always generate specs
// - lite only generates approval-ready content when requirePlanApproval is true
@@ -2222,7 +2392,7 @@ This helps parse your summary correctly in the output logs.`;
// CI/CD Mock Mode: Return early with mock response when AUTOMAKER_MOCK_AGENT is set
// This prevents actual API calls during automated testing
if (process.env.AUTOMAKER_MOCK_AGENT === 'true') {
- console.log(`[AutoMode] MOCK MODE: Skipping real agent execution for feature ${featureId}`);
+ logger.info(`MOCK MODE: Skipping real agent execution for feature ${featureId}`);
// Simulate some work being done
await this.sleep(500);
@@ -2272,7 +2442,7 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
await secureFs.mkdir(path.dirname(outputPath), { recursive: true });
await secureFs.writeFile(outputPath, mockOutput);
- console.log(`[AutoMode] MOCK MODE: Completed mock execution for feature ${featureId}`);
+ logger.info(`MOCK MODE: Completed mock execution for feature ${featureId}`);
return;
}
@@ -2283,14 +2453,10 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
? options.autoLoadClaudeMd
: await getAutoLoadClaudeMdSetting(finalProjectPath, this.settingsService, '[AutoMode]');
- // Load enableSandboxMode setting (global setting only)
- const enableSandboxMode = await getEnableSandboxModeSetting(this.settingsService, '[AutoMode]');
-
// Load MCP servers from settings (global setting only)
const mcpServers = await getMCPServersFromSettings(this.settingsService, '[AutoMode]');
// Load MCP permission settings (global setting only)
- const mcpPermissions = await getMCPPermissionSettings(this.settingsService, '[AutoMode]');
// Build SDK options using centralized configuration for feature implementation
const sdkOptions = createAutoModeOptions({
@@ -2298,10 +2464,8 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
model: model,
abortController,
autoLoadClaudeMd,
- enableSandboxMode,
mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined,
- mcpAutoApproveTools: mcpPermissions.mcpAutoApproveTools,
- mcpUnrestrictedTools: mcpPermissions.mcpUnrestrictedTools,
+ thinkingLevel: options?.thinkingLevel,
});
// Extract model, maxTurns, and allowedTools from SDK options
@@ -2309,14 +2473,19 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
const maxTurns = sdkOptions.maxTurns;
const allowedTools = sdkOptions.allowedTools as string[] | undefined;
- console.log(
- `[AutoMode] runAgent called for feature ${featureId} with model: ${finalModel}, planningMode: ${planningMode}, requiresApproval: ${requiresApproval}`
+ logger.info(
+ `runAgent called for feature ${featureId} with model: ${finalModel}, planningMode: ${planningMode}, requiresApproval: ${requiresApproval}`
);
// Get provider for this model
const provider = ProviderFactory.getProviderForModel(finalModel);
- console.log(`[AutoMode] Using provider "${provider.getName()}" for model "${finalModel}"`);
+ // Strip provider prefix - providers should receive bare model IDs
+ const bareModel = stripProviderPrefix(finalModel);
+
+ logger.info(
+ `Using provider "${provider.getName()}" for model "${finalModel}" (bare: ${bareModel})`
+ );
// Build prompt content with images using utility
const { content: promptContent } = await buildPromptWithImages(
@@ -2328,30 +2497,28 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
// Debug: Log if system prompt is provided
if (options?.systemPrompt) {
- console.log(
- `[AutoMode] System prompt provided (${options.systemPrompt.length} chars), first 200 chars:\n${options.systemPrompt.substring(0, 200)}...`
+ logger.info(
+ `System prompt provided (${options.systemPrompt.length} chars), first 200 chars:\n${options.systemPrompt.substring(0, 200)}...`
);
}
const executeOptions: ExecuteOptions = {
prompt: promptContent,
- model: finalModel,
+ model: bareModel,
maxTurns: maxTurns,
cwd: workDir,
allowedTools: allowedTools,
abortController,
systemPrompt: sdkOptions.systemPrompt,
settingSources: sdkOptions.settingSources,
- sandbox: sdkOptions.sandbox, // Pass sandbox configuration
mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined, // Pass MCP servers configuration
- mcpAutoApproveTools: mcpPermissions.mcpAutoApproveTools, // Pass MCP auto-approve setting
- mcpUnrestrictedTools: mcpPermissions.mcpUnrestrictedTools, // Pass MCP unrestricted tools setting
+ thinkingLevel: options?.thinkingLevel, // Pass thinking level for extended thinking
};
// Execute via provider
- console.log(`[AutoMode] Starting stream for feature ${featureId}...`);
+ logger.info(`Starting stream for feature ${featureId}...`);
const stream = provider.executeQuery(executeOptions);
- console.log(`[AutoMode] Stream created, starting to iterate...`);
+ logger.info(`Stream created, starting to iterate...`);
// Initialize with previous content if this is a follow-up, with a separator
let responseText = previousContent
? `${previousContent}\n\n---\n\n## Follow-up Session\n\n`
@@ -2362,11 +2529,49 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
// Note: We use projectPath here, not workDir, because workDir might be a worktree path
const featureDirForOutput = getFeatureDir(projectPath, featureId);
const outputPath = path.join(featureDirForOutput, 'agent-output.md');
+ const rawOutputPath = path.join(featureDirForOutput, 'raw-output.jsonl');
+
+ // Raw output logging is configurable via environment variable
+ // Set AUTOMAKER_DEBUG_RAW_OUTPUT=true to enable raw stream event logging
+ const enableRawOutput =
+ process.env.AUTOMAKER_DEBUG_RAW_OUTPUT === 'true' ||
+ process.env.AUTOMAKER_DEBUG_RAW_OUTPUT === '1';
// Incremental file writing state
let writeTimeout: ReturnType | null = null;
const WRITE_DEBOUNCE_MS = 500; // Batch writes every 500ms
+ // Raw output accumulator for debugging (NDJSON format)
+ let rawOutputLines: string[] = [];
+ let rawWriteTimeout: ReturnType | null = null;
+
+ // Helper to append raw stream event for debugging (only when enabled)
+ const appendRawEvent = (event: unknown): void => {
+ if (!enableRawOutput) return;
+
+ try {
+ const timestamp = new Date().toISOString();
+ const rawLine = JSON.stringify({ timestamp, event }, null, 4); // Pretty print for readability
+ rawOutputLines.push(rawLine);
+
+ // Debounced write of raw output
+ if (rawWriteTimeout) {
+ clearTimeout(rawWriteTimeout);
+ }
+ rawWriteTimeout = setTimeout(async () => {
+ try {
+ await secureFs.mkdir(path.dirname(rawOutputPath), { recursive: true });
+ await secureFs.appendFile(rawOutputPath, rawOutputLines.join('\n') + '\n');
+ rawOutputLines = []; // Clear after writing
+ } catch (error) {
+ logger.error(`Failed to write raw output for ${featureId}:`, error);
+ }
+ }, WRITE_DEBOUNCE_MS);
+ } catch {
+ // Ignore serialization errors
+ }
+ };
+
// Helper to write current responseText to file
const writeToFile = async (): Promise => {
try {
@@ -2374,7 +2579,7 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
await secureFs.writeFile(outputPath, responseText);
} catch (error) {
// Log but don't crash - file write errors shouldn't stop execution
- console.error(`[AutoMode] Failed to write agent output for ${featureId}:`, error);
+ logger.error(`Failed to write agent output for ${featureId}:`, error);
}
};
@@ -2388,166 +2593,199 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
}, WRITE_DEBOUNCE_MS);
};
- streamLoop: for await (const msg of stream) {
- console.log(`[AutoMode] Stream message received:`, msg.type, msg.subtype || '');
- if (msg.type === 'assistant' && msg.message?.content) {
- for (const block of msg.message.content) {
- if (block.type === 'text') {
- // Add separator before new text if we already have content and it doesn't end with newlines
- if (responseText.length > 0 && !responseText.endsWith('\n\n')) {
- if (responseText.endsWith('\n')) {
- responseText += '\n';
- } else {
- responseText += '\n\n';
+ // Heartbeat logging so "silent" model calls are visible.
+ // Some runs can take a while before the first streamed message arrives.
+ const streamStartTime = Date.now();
+ let receivedAnyStreamMessage = false;
+ const STREAM_HEARTBEAT_MS = 15_000;
+ const streamHeartbeat = setInterval(() => {
+ if (receivedAnyStreamMessage) return;
+ const elapsedSeconds = Math.round((Date.now() - streamStartTime) / 1000);
+ logger.info(
+ `Waiting for first model response for feature ${featureId} (${elapsedSeconds}s elapsed)...`
+ );
+ }, STREAM_HEARTBEAT_MS);
+
+ // Wrap stream processing in try/finally to ensure timeout cleanup on any error/abort
+ try {
+ streamLoop: for await (const msg of stream) {
+ receivedAnyStreamMessage = true;
+ // Log raw stream event for debugging
+ appendRawEvent(msg);
+
+ logger.info(`Stream message received:`, msg.type, msg.subtype || '');
+ if (msg.type === 'assistant' && msg.message?.content) {
+ for (const block of msg.message.content) {
+ if (block.type === 'text') {
+ const newText = block.text || '';
+
+ // Skip empty text
+ if (!newText) continue;
+
+ // Note: Cursor-specific dedup (duplicate blocks, accumulated text) is now
+ // handled in CursorProvider.deduplicateTextBlocks() for cleaner separation
+
+ // Only add separator when we're at a natural paragraph break:
+ // - Previous text ends with sentence terminator AND new text starts a new thought
+ // - Don't add separators mid-word or mid-sentence (for streaming providers like Cursor)
+ if (responseText.length > 0 && newText.length > 0) {
+ const lastChar = responseText.slice(-1);
+ const endsWithSentence = /[.!?:]\s*$/.test(responseText);
+ const endsWithNewline = /\n\s*$/.test(responseText);
+ const startsNewParagraph = /^[\n#\-*>]/.test(newText);
+
+ // Add paragraph break only at natural boundaries
+ if (
+ !endsWithNewline &&
+ (endsWithSentence || startsNewParagraph) &&
+ !/[a-zA-Z0-9]/.test(lastChar) // Not mid-word
+ ) {
+ responseText += '\n\n';
+ }
}
- }
- responseText += block.text || '';
+ responseText += newText;
- // Check for authentication errors in the response
- if (
- block.text &&
- (block.text.includes('Invalid API key') ||
- block.text.includes('authentication_failed') ||
- block.text.includes('Fix external API key'))
- ) {
- throw new Error(
- 'Authentication failed: Invalid or expired API key. ' +
- "Please check your ANTHROPIC_API_KEY, or run 'claude login' to re-authenticate."
- );
- }
-
- // Schedule incremental file write (debounced)
- scheduleWrite();
-
- // Check for [SPEC_GENERATED] marker in planning modes (spec or full)
- if (
- planningModeRequiresApproval &&
- !specDetected &&
- responseText.includes('[SPEC_GENERATED]')
- ) {
- specDetected = true;
-
- // Extract plan content (everything before the marker)
- const markerIndex = responseText.indexOf('[SPEC_GENERATED]');
- const planContent = responseText.substring(0, markerIndex).trim();
-
- // Parse tasks from the generated spec (for spec and full modes)
- // Use let since we may need to update this after plan revision
- let parsedTasks = parseTasksFromSpec(planContent);
- const tasksTotal = parsedTasks.length;
-
- console.log(
- `[AutoMode] Parsed ${tasksTotal} tasks from spec for feature ${featureId}`
- );
- if (parsedTasks.length > 0) {
- console.log(`[AutoMode] Tasks: ${parsedTasks.map((t) => t.id).join(', ')}`);
+ // Check for authentication errors in the response
+ if (
+ block.text &&
+ (block.text.includes('Invalid API key') ||
+ block.text.includes('authentication_failed') ||
+ block.text.includes('Fix external API key'))
+ ) {
+ throw new Error(
+ 'Authentication failed: Invalid or expired API key. ' +
+ "Please check your ANTHROPIC_API_KEY, or run 'claude login' to re-authenticate."
+ );
}
- // Update planSpec status to 'generated' and save content with parsed tasks
- await this.updateFeaturePlanSpec(projectPath, featureId, {
- status: 'generated',
- content: planContent,
- version: 1,
- generatedAt: new Date().toISOString(),
- reviewedByUser: false,
- tasks: parsedTasks,
- tasksTotal,
- tasksCompleted: 0,
- });
+ // Schedule incremental file write (debounced)
+ scheduleWrite();
- let approvedPlanContent = planContent;
- let userFeedback: string | undefined;
- let currentPlanContent = planContent;
- let planVersion = 1;
+ // Check for [SPEC_GENERATED] marker in planning modes (spec or full)
+ if (
+ planningModeRequiresApproval &&
+ !specDetected &&
+ responseText.includes('[SPEC_GENERATED]')
+ ) {
+ specDetected = true;
- // Only pause for approval if requirePlanApproval is true
- if (requiresApproval) {
- // ========================================
- // PLAN REVISION LOOP
- // Keep regenerating plan until user approves
- // ========================================
- let planApproved = false;
+ // Extract plan content (everything before the marker)
+ const markerIndex = responseText.indexOf('[SPEC_GENERATED]');
+ const planContent = responseText.substring(0, markerIndex).trim();
- while (!planApproved) {
- console.log(
- `[AutoMode] Spec v${planVersion} generated for feature ${featureId}, waiting for approval`
- );
+ // Parse tasks from the generated spec (for spec and full modes)
+ // Use let since we may need to update this after plan revision
+ let parsedTasks = parseTasksFromSpec(planContent);
+ const tasksTotal = parsedTasks.length;
- // CRITICAL: Register pending approval BEFORE emitting event
- const approvalPromise = this.waitForPlanApproval(featureId, projectPath);
+ logger.info(`Parsed ${tasksTotal} tasks from spec for feature ${featureId}`);
+ if (parsedTasks.length > 0) {
+ logger.info(`Tasks: ${parsedTasks.map((t) => t.id).join(', ')}`);
+ }
- // Emit plan_approval_required event
- this.emitAutoModeEvent('plan_approval_required', {
- featureId,
- projectPath,
- planContent: currentPlanContent,
- planningMode,
- planVersion,
- });
+ // Update planSpec status to 'generated' and save content with parsed tasks
+ await this.updateFeaturePlanSpec(projectPath, featureId, {
+ status: 'generated',
+ content: planContent,
+ version: 1,
+ generatedAt: new Date().toISOString(),
+ reviewedByUser: false,
+ tasks: parsedTasks,
+ tasksTotal,
+ tasksCompleted: 0,
+ });
- // Wait for user response
- try {
- const approvalResult = await approvalPromise;
+ let approvedPlanContent = planContent;
+ let userFeedback: string | undefined;
+ let currentPlanContent = planContent;
+ let planVersion = 1;
- if (approvalResult.approved) {
- // User approved the plan
- console.log(
- `[AutoMode] Plan v${planVersion} approved for feature ${featureId}`
- );
- planApproved = true;
+ // Only pause for approval if requirePlanApproval is true
+ if (requiresApproval) {
+ // ========================================
+ // PLAN REVISION LOOP
+ // Keep regenerating plan until user approves
+ // ========================================
+ let planApproved = false;
- // If user provided edits, use the edited version
- if (approvalResult.editedPlan) {
- approvedPlanContent = approvalResult.editedPlan;
- await this.updateFeaturePlanSpec(projectPath, featureId, {
- content: approvalResult.editedPlan,
+ while (!planApproved) {
+ logger.info(
+ `Spec v${planVersion} generated for feature ${featureId}, waiting for approval`
+ );
+
+ // CRITICAL: Register pending approval BEFORE emitting event
+ const approvalPromise = this.waitForPlanApproval(featureId, projectPath);
+
+ // Emit plan_approval_required event
+ this.emitAutoModeEvent('plan_approval_required', {
+ featureId,
+ projectPath,
+ planContent: currentPlanContent,
+ planningMode,
+ planVersion,
+ });
+
+ // Wait for user response
+ try {
+ const approvalResult = await approvalPromise;
+
+ if (approvalResult.approved) {
+ // User approved the plan
+ logger.info(`Plan v${planVersion} approved for feature ${featureId}`);
+ planApproved = true;
+
+ // If user provided edits, use the edited version
+ if (approvalResult.editedPlan) {
+ approvedPlanContent = approvalResult.editedPlan;
+ await this.updateFeaturePlanSpec(projectPath, featureId, {
+ content: approvalResult.editedPlan,
+ });
+ } else {
+ approvedPlanContent = currentPlanContent;
+ }
+
+ // Capture any additional feedback for implementation
+ userFeedback = approvalResult.feedback;
+
+ // Emit approval event
+ this.emitAutoModeEvent('plan_approved', {
+ featureId,
+ projectPath,
+ hasEdits: !!approvalResult.editedPlan,
+ planVersion,
});
} else {
- approvedPlanContent = currentPlanContent;
- }
+ // User rejected - check if they provided feedback for revision
+ const hasFeedback =
+ approvalResult.feedback && approvalResult.feedback.trim().length > 0;
+ const hasEdits =
+ approvalResult.editedPlan && approvalResult.editedPlan.trim().length > 0;
- // Capture any additional feedback for implementation
- userFeedback = approvalResult.feedback;
+ if (!hasFeedback && !hasEdits) {
+ // No feedback or edits = explicit cancel
+ logger.info(
+ `Plan rejected without feedback for feature ${featureId}, cancelling`
+ );
+ throw new Error('Plan cancelled by user');
+ }
- // Emit approval event
- this.emitAutoModeEvent('plan_approved', {
- featureId,
- projectPath,
- hasEdits: !!approvalResult.editedPlan,
- planVersion,
- });
- } else {
- // User rejected - check if they provided feedback for revision
- const hasFeedback =
- approvalResult.feedback && approvalResult.feedback.trim().length > 0;
- const hasEdits =
- approvalResult.editedPlan && approvalResult.editedPlan.trim().length > 0;
-
- if (!hasFeedback && !hasEdits) {
- // No feedback or edits = explicit cancel
- console.log(
- `[AutoMode] Plan rejected without feedback for feature ${featureId}, cancelling`
+ // User wants revisions - regenerate the plan
+ logger.info(
+ `Plan v${planVersion} rejected with feedback for feature ${featureId}, regenerating...`
);
- throw new Error('Plan cancelled by user');
- }
+ planVersion++;
- // User wants revisions - regenerate the plan
- console.log(
- `[AutoMode] Plan v${planVersion} rejected with feedback for feature ${featureId}, regenerating...`
- );
- planVersion++;
+ // Emit revision event
+ this.emitAutoModeEvent('plan_revision_requested', {
+ featureId,
+ projectPath,
+ feedback: approvalResult.feedback,
+ hasEdits: !!hasEdits,
+ planVersion,
+ });
- // Emit revision event
- this.emitAutoModeEvent('plan_revision_requested', {
- featureId,
- projectPath,
- feedback: approvalResult.feedback,
- hasEdits: !!hasEdits,
- planVersion,
- });
-
- // Build revision prompt
- let revisionPrompt = `The user has requested revisions to the plan/specification.
+ // Build revision prompt
+ let revisionPrompt = `The user has requested revisions to the plan/specification.
## Previous Plan (v${planVersion - 1})
${hasEdits ? approvalResult.editedPlan : currentPlanContent}
@@ -2562,173 +2800,253 @@ After generating the revised spec, output:
"[SPEC_GENERATED] Please review the revised specification above."
`;
- // Update status to regenerating
- await this.updateFeaturePlanSpec(projectPath, featureId, {
- status: 'generating',
- version: planVersion,
- });
+ // Update status to regenerating
+ await this.updateFeaturePlanSpec(projectPath, featureId, {
+ status: 'generating',
+ version: planVersion,
+ });
- // Make revision call
- const revisionStream = provider.executeQuery({
- prompt: revisionPrompt,
- model: finalModel,
- maxTurns: maxTurns || 100,
- cwd: workDir,
- allowedTools: allowedTools,
- abortController,
- mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined,
- mcpAutoApproveTools: mcpPermissions.mcpAutoApproveTools,
- mcpUnrestrictedTools: mcpPermissions.mcpUnrestrictedTools,
- });
+ // Make revision call
+ const revisionStream = provider.executeQuery({
+ prompt: revisionPrompt,
+ model: bareModel,
+ maxTurns: maxTurns || 100,
+ cwd: workDir,
+ allowedTools: allowedTools,
+ abortController,
+ mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined,
+ });
- let revisionText = '';
- for await (const msg of revisionStream) {
- if (msg.type === 'assistant' && msg.message?.content) {
- for (const block of msg.message.content) {
- if (block.type === 'text') {
- revisionText += block.text || '';
- this.emitAutoModeEvent('auto_mode_progress', {
- featureId,
- content: block.text,
- });
+ let revisionText = '';
+ for await (const msg of revisionStream) {
+ if (msg.type === 'assistant' && msg.message?.content) {
+ for (const block of msg.message.content) {
+ if (block.type === 'text') {
+ revisionText += block.text || '';
+ this.emitAutoModeEvent('auto_mode_progress', {
+ featureId,
+ content: block.text,
+ });
+ }
}
+ } else if (msg.type === 'error') {
+ throw new Error(msg.error || 'Error during plan revision');
+ } else if (msg.type === 'result' && msg.subtype === 'success') {
+ revisionText += msg.result || '';
}
- } else if (msg.type === 'error') {
- throw new Error(msg.error || 'Error during plan revision');
- } else if (msg.type === 'result' && msg.subtype === 'success') {
- revisionText += msg.result || '';
}
+
+ // Extract new plan content
+ const markerIndex = revisionText.indexOf('[SPEC_GENERATED]');
+ if (markerIndex > 0) {
+ currentPlanContent = revisionText.substring(0, markerIndex).trim();
+ } else {
+ currentPlanContent = revisionText.trim();
+ }
+
+ // Re-parse tasks from revised plan
+ const revisedTasks = parseTasksFromSpec(currentPlanContent);
+ logger.info(`Revised plan has ${revisedTasks.length} tasks`);
+
+ // Update planSpec with revised content
+ await this.updateFeaturePlanSpec(projectPath, featureId, {
+ status: 'generated',
+ content: currentPlanContent,
+ version: planVersion,
+ tasks: revisedTasks,
+ tasksTotal: revisedTasks.length,
+ tasksCompleted: 0,
+ });
+
+ // Update parsedTasks for implementation
+ parsedTasks = revisedTasks;
+
+ responseText += revisionText;
}
-
- // Extract new plan content
- const markerIndex = revisionText.indexOf('[SPEC_GENERATED]');
- if (markerIndex > 0) {
- currentPlanContent = revisionText.substring(0, markerIndex).trim();
- } else {
- currentPlanContent = revisionText.trim();
+ } catch (error) {
+ if ((error as Error).message.includes('cancelled')) {
+ throw error;
}
-
- // Re-parse tasks from revised plan
- const revisedTasks = parseTasksFromSpec(currentPlanContent);
- console.log(`[AutoMode] Revised plan has ${revisedTasks.length} tasks`);
-
- // Update planSpec with revised content
- await this.updateFeaturePlanSpec(projectPath, featureId, {
- status: 'generated',
- content: currentPlanContent,
- version: planVersion,
- tasks: revisedTasks,
- tasksTotal: revisedTasks.length,
- tasksCompleted: 0,
- });
-
- // Update parsedTasks for implementation
- parsedTasks = revisedTasks;
-
- responseText += revisionText;
+ throw new Error(`Plan approval failed: ${(error as Error).message}`);
}
- } catch (error) {
- if ((error as Error).message.includes('cancelled')) {
- throw error;
- }
- throw new Error(`Plan approval failed: ${(error as Error).message}`);
}
- }
- } else {
- // Auto-approve: requirePlanApproval is false, just continue without pausing
- console.log(
- `[AutoMode] Spec generated for feature ${featureId}, auto-approving (requirePlanApproval=false)`
- );
-
- // Emit info event for frontend
- this.emitAutoModeEvent('plan_auto_approved', {
- featureId,
- projectPath,
- planContent,
- planningMode,
- });
-
- approvedPlanContent = planContent;
- }
-
- // CRITICAL: After approval, we need to make a second call to continue implementation
- // The agent is waiting for "approved" - we need to send it and continue
- console.log(
- `[AutoMode] Making continuation call after plan approval for feature ${featureId}`
- );
-
- // Update planSpec status to approved (handles both manual and auto-approval paths)
- await this.updateFeaturePlanSpec(projectPath, featureId, {
- status: 'approved',
- approvedAt: new Date().toISOString(),
- reviewedByUser: requiresApproval,
- });
-
- // ========================================
- // MULTI-AGENT TASK EXECUTION
- // Each task gets its own focused agent call
- // ========================================
-
- if (parsedTasks.length > 0) {
- console.log(
- `[AutoMode] Starting multi-agent execution: ${parsedTasks.length} tasks for feature ${featureId}`
- );
-
- // Execute each task with a separate agent
- for (let taskIndex = 0; taskIndex < parsedTasks.length; taskIndex++) {
- const task = parsedTasks[taskIndex];
-
- // Check for abort
- if (abortController.signal.aborted) {
- throw new Error('Feature execution aborted');
- }
-
- // Emit task started
- console.log(`[AutoMode] Starting task ${task.id}: ${task.description}`);
- this.emitAutoModeEvent('auto_mode_task_started', {
- featureId,
- projectPath,
- taskId: task.id,
- taskDescription: task.description,
- taskIndex,
- tasksTotal: parsedTasks.length,
- });
-
- // Update planSpec with current task
- await this.updateFeaturePlanSpec(projectPath, featureId, {
- currentTaskId: task.id,
- });
-
- // Build focused prompt for this specific task
- const taskPrompt = this.buildTaskPrompt(
- task,
- parsedTasks,
- taskIndex,
- approvedPlanContent,
- userFeedback
+ } else {
+ // Auto-approve: requirePlanApproval is false, just continue without pausing
+ logger.info(
+ `Spec generated for feature ${featureId}, auto-approving (requirePlanApproval=false)`
);
- // Execute task with dedicated agent
- const taskStream = provider.executeQuery({
- prompt: taskPrompt,
- model: finalModel,
- maxTurns: Math.min(maxTurns || 100, 50), // Limit turns per task
+ // Emit info event for frontend
+ this.emitAutoModeEvent('plan_auto_approved', {
+ featureId,
+ projectPath,
+ planContent,
+ planningMode,
+ });
+
+ approvedPlanContent = planContent;
+ }
+
+ // CRITICAL: After approval, we need to make a second call to continue implementation
+ // The agent is waiting for "approved" - we need to send it and continue
+ logger.info(
+ `Making continuation call after plan approval for feature ${featureId}`
+ );
+
+ // Update planSpec status to approved (handles both manual and auto-approval paths)
+ await this.updateFeaturePlanSpec(projectPath, featureId, {
+ status: 'approved',
+ approvedAt: new Date().toISOString(),
+ reviewedByUser: requiresApproval,
+ });
+
+ // ========================================
+ // MULTI-AGENT TASK EXECUTION
+ // Each task gets its own focused agent call
+ // ========================================
+
+ if (parsedTasks.length > 0) {
+ logger.info(
+ `Starting multi-agent execution: ${parsedTasks.length} tasks for feature ${featureId}`
+ );
+
+ // Execute each task with a separate agent
+ for (let taskIndex = 0; taskIndex < parsedTasks.length; taskIndex++) {
+ const task = parsedTasks[taskIndex];
+
+ // Check for abort
+ if (abortController.signal.aborted) {
+ throw new Error('Feature execution aborted');
+ }
+
+ // Emit task started
+ logger.info(`Starting task ${task.id}: ${task.description}`);
+ this.emitAutoModeEvent('auto_mode_task_started', {
+ featureId,
+ projectPath,
+ taskId: task.id,
+ taskDescription: task.description,
+ taskIndex,
+ tasksTotal: parsedTasks.length,
+ });
+
+ // Update planSpec with current task
+ await this.updateFeaturePlanSpec(projectPath, featureId, {
+ currentTaskId: task.id,
+ });
+
+ // Build focused prompt for this specific task
+ const taskPrompt = this.buildTaskPrompt(
+ task,
+ parsedTasks,
+ taskIndex,
+ approvedPlanContent,
+ userFeedback
+ );
+
+ // Execute task with dedicated agent
+ const taskStream = provider.executeQuery({
+ prompt: taskPrompt,
+ model: bareModel,
+ maxTurns: Math.min(maxTurns || 100, 50), // Limit turns per task
+ cwd: workDir,
+ allowedTools: allowedTools,
+ abortController,
+ mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined,
+ });
+
+ let taskOutput = '';
+
+ // Process task stream
+ for await (const msg of taskStream) {
+ if (msg.type === 'assistant' && msg.message?.content) {
+ for (const block of msg.message.content) {
+ if (block.type === 'text') {
+ taskOutput += block.text || '';
+ responseText += block.text || '';
+ this.emitAutoModeEvent('auto_mode_progress', {
+ featureId,
+ content: block.text,
+ });
+ } else if (block.type === 'tool_use') {
+ this.emitAutoModeEvent('auto_mode_tool', {
+ featureId,
+ tool: block.name,
+ input: block.input,
+ });
+ }
+ }
+ } else if (msg.type === 'error') {
+ throw new Error(msg.error || `Error during task ${task.id}`);
+ } else if (msg.type === 'result' && msg.subtype === 'success') {
+ taskOutput += msg.result || '';
+ responseText += msg.result || '';
+ }
+ }
+
+ // Emit task completed
+ logger.info(`Task ${task.id} completed for feature ${featureId}`);
+ this.emitAutoModeEvent('auto_mode_task_complete', {
+ featureId,
+ projectPath,
+ taskId: task.id,
+ tasksCompleted: taskIndex + 1,
+ tasksTotal: parsedTasks.length,
+ });
+
+ // Update planSpec with progress
+ await this.updateFeaturePlanSpec(projectPath, featureId, {
+ tasksCompleted: taskIndex + 1,
+ });
+
+ // Check for phase completion (group tasks by phase)
+ if (task.phase) {
+ const nextTask = parsedTasks[taskIndex + 1];
+ if (!nextTask || nextTask.phase !== task.phase) {
+ // Phase changed, emit phase complete
+ const phaseMatch = task.phase.match(/Phase\s*(\d+)/i);
+ if (phaseMatch) {
+ this.emitAutoModeEvent('auto_mode_phase_complete', {
+ featureId,
+ projectPath,
+ phaseNumber: parseInt(phaseMatch[1], 10),
+ });
+ }
+ }
+ }
+ }
+
+ logger.info(`All ${parsedTasks.length} tasks completed for feature ${featureId}`);
+ } else {
+ // No parsed tasks - fall back to single-agent execution
+ logger.info(
+ `No parsed tasks, using single-agent execution for feature ${featureId}`
+ );
+
+ const continuationPrompt = `The plan/specification has been approved. Now implement it.
+${userFeedback ? `\n## User Feedback\n${userFeedback}\n` : ''}
+## Approved Plan
+
+${approvedPlanContent}
+
+## Instructions
+
+Implement all the changes described in the plan above.`;
+
+ const continuationStream = provider.executeQuery({
+ prompt: continuationPrompt,
+ model: bareModel,
+ maxTurns: maxTurns,
cwd: workDir,
allowedTools: allowedTools,
abortController,
mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined,
- mcpAutoApproveTools: mcpPermissions.mcpAutoApproveTools,
- mcpUnrestrictedTools: mcpPermissions.mcpUnrestrictedTools,
});
- let taskOutput = '';
-
- // Process task stream
- for await (const msg of taskStream) {
+ for await (const msg of continuationStream) {
if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text') {
- taskOutput += block.text || '';
responseText += block.text || '';
this.emitAutoModeEvent('auto_mode_progress', {
featureId,
@@ -2743,152 +3061,83 @@ After generating the revised spec, output:
}
}
} else if (msg.type === 'error') {
- throw new Error(msg.error || `Error during task ${task.id}`);
+ throw new Error(msg.error || 'Unknown error during implementation');
} else if (msg.type === 'result' && msg.subtype === 'success') {
- taskOutput += msg.result || '';
responseText += msg.result || '';
}
}
-
- // Emit task completed
- console.log(`[AutoMode] Task ${task.id} completed for feature ${featureId}`);
- this.emitAutoModeEvent('auto_mode_task_complete', {
- featureId,
- projectPath,
- taskId: task.id,
- tasksCompleted: taskIndex + 1,
- tasksTotal: parsedTasks.length,
- });
-
- // Update planSpec with progress
- await this.updateFeaturePlanSpec(projectPath, featureId, {
- tasksCompleted: taskIndex + 1,
- });
-
- // Check for phase completion (group tasks by phase)
- if (task.phase) {
- const nextTask = parsedTasks[taskIndex + 1];
- if (!nextTask || nextTask.phase !== task.phase) {
- // Phase changed, emit phase complete
- const phaseMatch = task.phase.match(/Phase\s*(\d+)/i);
- if (phaseMatch) {
- this.emitAutoModeEvent('auto_mode_phase_complete', {
- featureId,
- projectPath,
- phaseNumber: parseInt(phaseMatch[1], 10),
- });
- }
- }
- }
}
- console.log(
- `[AutoMode] All ${parsedTasks.length} tasks completed for feature ${featureId}`
- );
- } else {
- // No parsed tasks - fall back to single-agent execution
- console.log(
- `[AutoMode] No parsed tasks, using single-agent execution for feature ${featureId}`
- );
-
- const continuationPrompt = `The plan/specification has been approved. Now implement it.
-${userFeedback ? `\n## User Feedback\n${userFeedback}\n` : ''}
-## Approved Plan
-
-${approvedPlanContent}
-
-## Instructions
-
-Implement all the changes described in the plan above.`;
-
- const continuationStream = provider.executeQuery({
- prompt: continuationPrompt,
- model: finalModel,
- maxTurns: maxTurns,
- cwd: workDir,
- allowedTools: allowedTools,
- abortController,
- mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined,
- mcpAutoApproveTools: mcpPermissions.mcpAutoApproveTools,
- mcpUnrestrictedTools: mcpPermissions.mcpUnrestrictedTools,
- });
-
- for await (const msg of continuationStream) {
- if (msg.type === 'assistant' && msg.message?.content) {
- for (const block of msg.message.content) {
- if (block.type === 'text') {
- responseText += block.text || '';
- this.emitAutoModeEvent('auto_mode_progress', {
- featureId,
- content: block.text,
- });
- } else if (block.type === 'tool_use') {
- this.emitAutoModeEvent('auto_mode_tool', {
- featureId,
- tool: block.name,
- input: block.input,
- });
- }
- }
- } else if (msg.type === 'error') {
- throw new Error(msg.error || 'Unknown error during implementation');
- } else if (msg.type === 'result' && msg.subtype === 'success') {
- responseText += msg.result || '';
- }
- }
+ logger.info(`Implementation completed for feature ${featureId}`);
+ // Exit the original stream loop since continuation is done
+ break streamLoop;
}
- console.log(`[AutoMode] Implementation completed for feature ${featureId}`);
- // Exit the original stream loop since continuation is done
- break streamLoop;
- }
-
- // Only emit progress for non-marker text (marker was already handled above)
- if (!specDetected) {
- console.log(
- `[AutoMode] Emitting progress event for ${featureId}, content length: ${block.text?.length || 0}`
- );
- this.emitAutoModeEvent('auto_mode_progress', {
+ // Only emit progress for non-marker text (marker was already handled above)
+ if (!specDetected) {
+ logger.info(
+ `Emitting progress event for ${featureId}, content length: ${block.text?.length || 0}`
+ );
+ this.emitAutoModeEvent('auto_mode_progress', {
+ featureId,
+ content: block.text,
+ });
+ }
+ } else if (block.type === 'tool_use') {
+ // Emit event for real-time UI
+ this.emitAutoModeEvent('auto_mode_tool', {
featureId,
- content: block.text,
+ tool: block.name,
+ input: block.input,
});
- }
- } else if (block.type === 'tool_use') {
- // Emit event for real-time UI
- this.emitAutoModeEvent('auto_mode_tool', {
- featureId,
- tool: block.name,
- input: block.input,
- });
- // Also add to file output for persistence
- if (responseText.length > 0 && !responseText.endsWith('\n')) {
- responseText += '\n';
+ // Also add to file output for persistence
+ if (responseText.length > 0 && !responseText.endsWith('\n')) {
+ responseText += '\n';
+ }
+ responseText += `\n🔧 Tool: ${block.name}\n`;
+ if (block.input) {
+ responseText += `Input: ${JSON.stringify(block.input, null, 2)}\n`;
+ }
+ scheduleWrite();
}
- responseText += `\n🔧 Tool: ${block.name}\n`;
- if (block.input) {
- responseText += `Input: ${JSON.stringify(block.input, null, 2)}\n`;
- }
- scheduleWrite();
}
+ } else if (msg.type === 'error') {
+ // Handle error messages
+ throw new Error(msg.error || 'Unknown error');
+ } else if (msg.type === 'result' && msg.subtype === 'success') {
+ // Don't replace responseText - the accumulated content is the full history
+ // The msg.result is just a summary which would lose all tool use details
+ // Just ensure final write happens
+ scheduleWrite();
}
- } else if (msg.type === 'error') {
- // Handle error messages
- throw new Error(msg.error || 'Unknown error');
- } else if (msg.type === 'result' && msg.subtype === 'success') {
- // Don't replace responseText - the accumulated content is the full history
- // The msg.result is just a summary which would lose all tool use details
- // Just ensure final write happens
- scheduleWrite();
+ }
+
+ // Final write - ensure all accumulated content is saved (on success path)
+ await writeToFile();
+
+ // Flush remaining raw output (only if enabled, on success path)
+ if (enableRawOutput && rawOutputLines.length > 0) {
+ try {
+ await secureFs.mkdir(path.dirname(rawOutputPath), { recursive: true });
+ await secureFs.appendFile(rawOutputPath, rawOutputLines.join('\n') + '\n');
+ } catch (error) {
+ logger.error(`Failed to write final raw output for ${featureId}:`, error);
+ }
+ }
+ } finally {
+ clearInterval(streamHeartbeat);
+ // ALWAYS clear pending timeouts to prevent memory leaks
+ // This runs on success, error, or abort
+ if (writeTimeout) {
+ clearTimeout(writeTimeout);
+ writeTimeout = null;
+ }
+ if (rawWriteTimeout) {
+ clearTimeout(rawWriteTimeout);
+ rawWriteTimeout = null;
}
}
-
- // Clear any pending timeout and do a final write to ensure all content is saved
- if (writeTimeout) {
- clearTimeout(writeTimeout);
- }
- // Final write - ensure all accumulated content is saved
- await writeToFile();
}
private async executeFeatureWithContext(
@@ -3136,4 +3385,333 @@ Begin implementing task ${task.id} now.`;
}
});
}
+
+ // ============================================================================
+ // Execution State Persistence - For recovery after server restart
+ // ============================================================================
+
+ /**
+ * Save execution state to disk for recovery after server restart
+ */
+ private async saveExecutionState(projectPath: string): Promise {
+ try {
+ await ensureAutomakerDir(projectPath);
+ const statePath = getExecutionStatePath(projectPath);
+ const state: ExecutionState = {
+ version: 1,
+ autoLoopWasRunning: this.autoLoopRunning,
+ maxConcurrency: this.config?.maxConcurrency ?? 3,
+ projectPath,
+ runningFeatureIds: Array.from(this.runningFeatures.keys()),
+ savedAt: new Date().toISOString(),
+ };
+ await secureFs.writeFile(statePath, JSON.stringify(state, null, 2), 'utf-8');
+ logger.info(`Saved execution state: ${state.runningFeatureIds.length} running features`);
+ } catch (error) {
+ logger.error('Failed to save execution state:', error);
+ }
+ }
+
+ /**
+ * Load execution state from disk
+ */
+ private async loadExecutionState(projectPath: string): Promise {
+ try {
+ const statePath = getExecutionStatePath(projectPath);
+ const content = (await secureFs.readFile(statePath, 'utf-8')) as string;
+ const state = JSON.parse(content) as ExecutionState;
+ return state;
+ } catch (error) {
+ if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
+ logger.error('Failed to load execution state:', error);
+ }
+ return DEFAULT_EXECUTION_STATE;
+ }
+ }
+
+ /**
+ * Clear execution state (called on successful shutdown or when auto-loop stops)
+ */
+ private async clearExecutionState(projectPath: string): Promise {
+ try {
+ const statePath = getExecutionStatePath(projectPath);
+ await secureFs.unlink(statePath);
+ logger.info('Cleared execution state');
+ } catch (error) {
+ if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
+ logger.error('Failed to clear execution state:', error);
+ }
+ }
+ }
+
+ /**
+ * Check for and resume interrupted features after server restart
+ * This should be called during server initialization
+ */
+ async resumeInterruptedFeatures(projectPath: string): Promise {
+ logger.info('Checking for interrupted features to resume...');
+
+ // Load all features and find those that were interrupted
+ const featuresDir = getFeaturesDir(projectPath);
+
+ try {
+ const entries = await secureFs.readdir(featuresDir, { withFileTypes: true });
+ const interruptedFeatures: Feature[] = [];
+
+ for (const entry of entries) {
+ if (entry.isDirectory()) {
+ const featurePath = path.join(featuresDir, entry.name, 'feature.json');
+ try {
+ const data = (await secureFs.readFile(featurePath, 'utf-8')) as string;
+ const feature = JSON.parse(data) as Feature;
+
+ // Check if feature was interrupted (in_progress or pipeline_*)
+ if (
+ feature.status === 'in_progress' ||
+ (feature.status && feature.status.startsWith('pipeline_'))
+ ) {
+ // Verify it has existing context (agent-output.md)
+ const featureDir = getFeatureDir(projectPath, feature.id);
+ const contextPath = path.join(featureDir, 'agent-output.md');
+ try {
+ await secureFs.access(contextPath);
+ interruptedFeatures.push(feature);
+ logger.info(
+ `Found interrupted feature: ${feature.id} (${feature.title}) - status: ${feature.status}`
+ );
+ } catch {
+ // No context file, skip this feature - it will be restarted fresh
+ logger.info(`Interrupted feature ${feature.id} has no context, will restart fresh`);
+ }
+ }
+ } catch {
+ // Skip invalid features
+ }
+ }
+ }
+
+ if (interruptedFeatures.length === 0) {
+ logger.info('No interrupted features found');
+ return;
+ }
+
+ logger.info(`Found ${interruptedFeatures.length} interrupted feature(s) to resume`);
+
+ // Emit event to notify UI
+ this.emitAutoModeEvent('auto_mode_resuming_features', {
+ message: `Resuming ${interruptedFeatures.length} interrupted feature(s) after server restart`,
+ projectPath,
+ featureIds: interruptedFeatures.map((f) => f.id),
+ features: interruptedFeatures.map((f) => ({
+ id: f.id,
+ title: f.title,
+ status: f.status,
+ })),
+ });
+
+ // Resume each interrupted feature
+ for (const feature of interruptedFeatures) {
+ try {
+ logger.info(`Resuming feature: ${feature.id} (${feature.title})`);
+ // Use resumeFeature which will detect the existing context and continue
+ await this.resumeFeature(projectPath, feature.id, true);
+ } catch (error) {
+ logger.error(`Failed to resume feature ${feature.id}:`, error);
+ // Continue with other features
+ }
+ }
+ } catch (error) {
+ if ((error as NodeJS.ErrnoException).code === 'ENOENT') {
+ logger.info('No features directory found, nothing to resume');
+ } else {
+ logger.error('Error checking for interrupted features:', error);
+ }
+ }
+ }
+
+ /**
+ * Extract and record learnings from a completed feature
+ * Uses a quick Claude call to identify important decisions and patterns
+ */
+ private async recordLearningsFromFeature(
+ projectPath: string,
+ feature: Feature,
+ agentOutput: string
+ ): Promise {
+ if (!agentOutput || agentOutput.length < 100) {
+ // Not enough output to extract learnings from
+ console.log(
+ `[AutoMode] Skipping learning extraction - output too short (${agentOutput?.length || 0} chars)`
+ );
+ return;
+ }
+
+ console.log(
+ `[AutoMode] Extracting learnings from feature "${feature.title}" (${agentOutput.length} chars)`
+ );
+
+ // Limit output to avoid token limits
+ const truncatedOutput = agentOutput.length > 10000 ? agentOutput.slice(-10000) : agentOutput;
+
+ const userPrompt = `You are an Architecture Decision Record (ADR) extractor. Analyze this implementation and return ONLY JSON with learnings. No explanations.
+
+Feature: "${feature.title}"
+
+Implementation log:
+${truncatedOutput}
+
+Extract MEANINGFUL learnings - not obvious things. For each, capture:
+- DECISIONS: Why this approach vs alternatives? What would break if changed?
+- GOTCHAS: What was unexpected? What's the root cause? How to avoid?
+- PATTERNS: Why this pattern? What problem does it solve? Trade-offs?
+
+JSON format ONLY (no markdown, no text):
+{"learnings": [{
+ "category": "architecture|api|ui|database|auth|testing|performance|security|gotchas",
+ "type": "decision|gotcha|pattern",
+ "content": "What was done/learned",
+ "context": "Problem being solved or situation faced",
+ "why": "Reasoning - why this approach",
+ "rejected": "Alternative considered and why rejected",
+ "tradeoffs": "What became easier/harder",
+ "breaking": "What breaks if this is changed/removed"
+}]}
+
+IMPORTANT: Only include NON-OBVIOUS learnings with real reasoning. Skip trivial patterns.
+If nothing notable: {"learnings": []}`;
+
+ try {
+ // Get model from phase settings
+ const settings = await this.settingsService?.getGlobalSettings();
+ const phaseModelEntry =
+ settings?.phaseModels?.memoryExtractionModel || DEFAULT_PHASE_MODELS.memoryExtractionModel;
+ const { model } = resolvePhaseModel(phaseModelEntry);
+
+ const result = await simpleQuery({
+ prompt: userPrompt,
+ model,
+ cwd: projectPath,
+ maxTurns: 1,
+ allowedTools: [],
+ systemPrompt:
+ 'You are a JSON extraction assistant. You MUST respond with ONLY valid JSON, no explanations, no markdown, no other text. Extract learnings from the provided implementation context and return them as JSON.',
+ });
+
+ const responseText = result.text;
+
+ console.log(`[AutoMode] Learning extraction response: ${responseText.length} chars`);
+ console.log(`[AutoMode] Response preview: ${responseText.substring(0, 300)}`);
+
+ // Parse the response - handle JSON in markdown code blocks or raw
+ let jsonStr: string | null = null;
+
+ // First try to find JSON in markdown code blocks
+ const codeBlockMatch = responseText.match(/```(?:json)?\s*(\{[\s\S]*?\})\s*```/);
+ if (codeBlockMatch) {
+ console.log('[AutoMode] Found JSON in code block');
+ jsonStr = codeBlockMatch[1];
+ } else {
+ // Fall back to finding balanced braces containing "learnings"
+ // Use a more precise approach: find the opening brace before "learnings"
+ const learningsIndex = responseText.indexOf('"learnings"');
+ if (learningsIndex !== -1) {
+ // Find the opening brace before "learnings"
+ let braceStart = responseText.lastIndexOf('{', learningsIndex);
+ if (braceStart !== -1) {
+ // Find matching closing brace
+ let braceCount = 0;
+ let braceEnd = -1;
+ for (let i = braceStart; i < responseText.length; i++) {
+ if (responseText[i] === '{') braceCount++;
+ if (responseText[i] === '}') braceCount--;
+ if (braceCount === 0) {
+ braceEnd = i;
+ break;
+ }
+ }
+ if (braceEnd !== -1) {
+ jsonStr = responseText.substring(braceStart, braceEnd + 1);
+ }
+ }
+ }
+ }
+
+ if (!jsonStr) {
+ console.log('[AutoMode] Could not extract JSON from response');
+ return;
+ }
+
+ console.log(`[AutoMode] Extracted JSON: ${jsonStr.substring(0, 200)}`);
+
+ let parsed: { learnings?: unknown[] };
+ try {
+ parsed = JSON.parse(jsonStr);
+ } catch {
+ console.warn('[AutoMode] Failed to parse learnings JSON:', jsonStr.substring(0, 200));
+ return;
+ }
+
+ if (!parsed.learnings || !Array.isArray(parsed.learnings)) {
+ console.log('[AutoMode] No learnings array in parsed response');
+ return;
+ }
+
+ console.log(`[AutoMode] Found ${parsed.learnings.length} potential learnings`);
+
+ // Valid learning types
+ const validTypes = new Set(['decision', 'learning', 'pattern', 'gotcha']);
+
+ // Record each learning
+ for (const item of parsed.learnings) {
+ // Validate required fields with proper type narrowing
+ if (!item || typeof item !== 'object') continue;
+
+ const learning = item as Record;
+ if (
+ !learning.category ||
+ typeof learning.category !== 'string' ||
+ !learning.content ||
+ typeof learning.content !== 'string' ||
+ !learning.content.trim()
+ ) {
+ continue;
+ }
+
+ // Validate and normalize type
+ const typeStr = typeof learning.type === 'string' ? learning.type : 'learning';
+ const learningType = validTypes.has(typeStr)
+ ? (typeStr as 'decision' | 'learning' | 'pattern' | 'gotcha')
+ : 'learning';
+
+ console.log(
+ `[AutoMode] Appending learning: category=${learning.category}, type=${learningType}`
+ );
+ await appendLearning(
+ projectPath,
+ {
+ category: learning.category,
+ type: learningType,
+ content: learning.content.trim(),
+ context: typeof learning.context === 'string' ? learning.context : undefined,
+ why: typeof learning.why === 'string' ? learning.why : undefined,
+ rejected: typeof learning.rejected === 'string' ? learning.rejected : undefined,
+ tradeoffs: typeof learning.tradeoffs === 'string' ? learning.tradeoffs : undefined,
+ breaking: typeof learning.breaking === 'string' ? learning.breaking : undefined,
+ },
+ secureFs as Parameters[2]
+ );
+ }
+
+ const validLearnings = parsed.learnings.filter(
+ (l) => l && typeof l === 'object' && (l as Record).content
+ );
+ if (validLearnings.length > 0) {
+ console.log(
+ `[AutoMode] Recorded ${parsed.learnings.length} learning(s) from feature ${feature.id}`
+ );
+ }
+ } catch (error) {
+ console.warn(`[AutoMode] Failed to extract learnings from feature ${feature.id}:`, error);
+ }
+ }
}
diff --git a/apps/server/src/services/claude-usage-service.ts b/apps/server/src/services/claude-usage-service.ts
index 098ce29c..64ace35d 100644
--- a/apps/server/src/services/claude-usage-service.ts
+++ b/apps/server/src/services/claude-usage-service.ts
@@ -2,6 +2,7 @@ import { spawn } from 'child_process';
import * as os from 'os';
import * as pty from 'node-pty';
import { ClaudeUsage } from '../routes/claude/types.js';
+import { createLogger } from '@automaker/utils';
/**
* Claude Usage Service
@@ -14,6 +15,8 @@ import { ClaudeUsage } from '../routes/claude/types.js';
* - macOS: Uses 'expect' command for PTY
* - Windows/Linux: Uses node-pty for PTY
*/
+const logger = createLogger('ClaudeUsage');
+
export class ClaudeUsageService {
private claudeBinary = 'claude';
private timeout = 30000; // 30 second timeout
@@ -164,21 +167,40 @@ export class ClaudeUsageService {
const shell = this.isWindows ? 'cmd.exe' : '/bin/sh';
const args = this.isWindows ? ['/c', 'claude', '/usage'] : ['-c', 'claude /usage'];
- const ptyProcess = pty.spawn(shell, args, {
- name: 'xterm-256color',
- cols: 120,
- rows: 30,
- cwd: workingDirectory,
- env: {
- ...process.env,
- TERM: 'xterm-256color',
- } as Record,
- });
+ let ptyProcess: any = null;
+
+ try {
+ ptyProcess = pty.spawn(shell, args, {
+ name: 'xterm-256color',
+ cols: 120,
+ rows: 30,
+ cwd: workingDirectory,
+ env: {
+ ...process.env,
+ TERM: 'xterm-256color',
+ } as Record,
+ });
+ } catch (spawnError) {
+ // pty.spawn() can throw synchronously if the native module fails to load
+ // or if PTY is not available in the current environment (e.g., containers without /dev/pts)
+ const errorMessage = spawnError instanceof Error ? spawnError.message : String(spawnError);
+ logger.error('[executeClaudeUsageCommandPty] Failed to spawn PTY:', errorMessage);
+
+ // Return a user-friendly error instead of crashing
+ reject(
+ new Error(
+ `Unable to access terminal: ${errorMessage}. Claude CLI may not be available or PTY support is limited in this environment.`
+ )
+ );
+ return;
+ }
const timeoutId = setTimeout(() => {
if (!settled) {
settled = true;
- ptyProcess.kill();
+ if (ptyProcess && !ptyProcess.killed) {
+ ptyProcess.kill();
+ }
// Don't fail if we have data - return it instead
if (output.includes('Current session')) {
resolve(output);
@@ -188,7 +210,7 @@ export class ClaudeUsageService {
}
}, this.timeout);
- ptyProcess.onData((data) => {
+ ptyProcess.onData((data: string) => {
output += data;
// Check if we've seen the usage data (look for "Current session")
@@ -196,12 +218,12 @@ export class ClaudeUsageService {
hasSeenUsageData = true;
// Wait for full output, then send escape to exit
setTimeout(() => {
- if (!settled) {
+ if (!settled && ptyProcess && !ptyProcess.killed) {
ptyProcess.write('\x1b'); // Send escape key
// Fallback: if ESC doesn't exit (Linux), use SIGTERM after 2s
setTimeout(() => {
- if (!settled) {
+ if (!settled && ptyProcess && !ptyProcess.killed) {
ptyProcess.kill('SIGTERM');
}
}, 2000);
@@ -212,14 +234,14 @@ export class ClaudeUsageService {
// Fallback: if we see "Esc to cancel" but haven't seen usage data yet
if (!hasSeenUsageData && output.includes('Esc to cancel')) {
setTimeout(() => {
- if (!settled) {
+ if (!settled && ptyProcess && !ptyProcess.killed) {
ptyProcess.write('\x1b'); // Send escape key
}
}, 3000);
}
});
- ptyProcess.onExit(({ exitCode }) => {
+ ptyProcess.onExit(({ exitCode }: { exitCode: number }) => {
clearTimeout(timeoutId);
if (settled) return;
settled = true;
diff --git a/apps/server/src/services/codex-app-server-service.ts b/apps/server/src/services/codex-app-server-service.ts
new file mode 100644
index 00000000..ecfb99da
--- /dev/null
+++ b/apps/server/src/services/codex-app-server-service.ts
@@ -0,0 +1,212 @@
+import { spawn, type ChildProcess } from 'child_process';
+import readline from 'readline';
+import { findCodexCliPath } from '@automaker/platform';
+import { createLogger } from '@automaker/utils';
+import type {
+ AppServerModelResponse,
+ AppServerAccountResponse,
+ AppServerRateLimitsResponse,
+ JsonRpcRequest,
+} from '@automaker/types';
+
+const logger = createLogger('CodexAppServer');
+
+/**
+ * CodexAppServerService
+ *
+ * Centralized service for communicating with Codex CLI's app-server via JSON-RPC protocol.
+ * Handles process spawning, JSON-RPC messaging, and cleanup.
+ *
+ * Connection strategy: Spawn on-demand (new process for each method call)
+ */
+export class CodexAppServerService {
+ private cachedCliPath: string | null = null;
+
+ /**
+ * Check if Codex CLI is available on the system
+ */
+ async isAvailable(): Promise {
+ this.cachedCliPath = await findCodexCliPath();
+ return Boolean(this.cachedCliPath);
+ }
+
+ /**
+ * Fetch available models from app-server
+ */
+ async getModels(): Promise {
+ const result = await this.executeJsonRpc((sendRequest) => {
+ return sendRequest('model/list', {});
+ });
+
+ if (result) {
+ logger.info(`[getModels] ✓ Fetched ${result.data.length} models`);
+ }
+
+ return result;
+ }
+
+ /**
+ * Fetch account information from app-server
+ */
+ async getAccount(): Promise {
+ return this.executeJsonRpc((sendRequest) => {
+ return sendRequest('account/read', { refreshToken: false });
+ });
+ }
+
+ /**
+ * Fetch rate limits from app-server
+ */
+ async getRateLimits(): Promise {
+ return this.executeJsonRpc((sendRequest) => {
+ return sendRequest('account/rateLimits/read', {});
+ });
+ }
+
+ /**
+ * Execute JSON-RPC requests via Codex app-server
+ *
+ * This method:
+ * 1. Spawns a new `codex app-server` process
+ * 2. Handles JSON-RPC initialization handshake
+ * 3. Executes user-provided requests
+ * 4. Cleans up the process
+ *
+ * @param requestFn - Function that receives sendRequest helper and returns a promise
+ * @returns Result of the JSON-RPC request or null on failure
+ */
+ private async executeJsonRpc(
+ requestFn: (sendRequest: (method: string, params?: unknown) => Promise) => Promise
+ ): Promise {
+ let childProcess: ChildProcess | null = null;
+
+ try {
+ const cliPath = this.cachedCliPath || (await findCodexCliPath());
+
+ if (!cliPath) {
+ return null;
+ }
+
+ // On Windows, .cmd files must be run through shell
+ const needsShell = process.platform === 'win32' && cliPath.toLowerCase().endsWith('.cmd');
+
+ childProcess = spawn(cliPath, ['app-server'], {
+ cwd: process.cwd(),
+ env: {
+ ...process.env,
+ TERM: 'dumb',
+ },
+ stdio: ['pipe', 'pipe', 'pipe'],
+ shell: needsShell,
+ });
+
+ if (!childProcess.stdin || !childProcess.stdout) {
+ throw new Error('Failed to create stdio pipes');
+ }
+
+ // Setup readline for reading JSONL responses
+ const rl = readline.createInterface({
+ input: childProcess.stdout,
+ crlfDelay: Infinity,
+ });
+
+ // Message ID counter for JSON-RPC
+ let messageId = 0;
+ const pendingRequests = new Map<
+ number,
+ {
+ resolve: (value: unknown) => void;
+ reject: (error: Error) => void;
+ timeout: NodeJS.Timeout;
+ }
+ >();
+
+ // Process incoming messages
+ rl.on('line', (line) => {
+ if (!line.trim()) return;
+
+ try {
+ const message = JSON.parse(line);
+
+ // Handle response to our request
+ if ('id' in message && message.id !== undefined) {
+ const pending = pendingRequests.get(message.id);
+ if (pending) {
+ clearTimeout(pending.timeout);
+ pendingRequests.delete(message.id);
+ if (message.error) {
+ pending.reject(new Error(message.error.message || 'Unknown error'));
+ } else {
+ pending.resolve(message.result);
+ }
+ }
+ }
+ // Ignore notifications (no id field)
+ } catch {
+ // Ignore parse errors for non-JSON lines
+ }
+ });
+
+ // Helper to send JSON-RPC request and wait for response
+ const sendRequest = (method: string, params?: unknown): Promise