mirror of
https://github.com/AutoMaker-Org/automaker.git
synced 2026-01-30 06:12:03 +00:00
Compare commits
239 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
78e5ddb4a8 | ||
|
|
43904cdb02 | ||
|
|
7ea1383e10 | ||
|
|
425e38811f | ||
|
|
f6bda66ed4 | ||
|
|
0df7e4a33d | ||
|
|
41ad717b8e | ||
|
|
fec5f88d91 | ||
|
|
724858d215 | ||
|
|
2b93afbd43 | ||
|
|
ca0f3ecedf | ||
|
|
ee0d0c6c59 | ||
|
|
ac38e85f3c | ||
|
|
ca3286a374 | ||
|
|
0898578c11 | ||
|
|
07593f8704 | ||
|
|
3f8a8db7a5 | ||
|
|
13eead3855 | ||
|
|
cb910feae9 | ||
|
|
c75f9a29cb | ||
|
|
3c5e453b01 | ||
|
|
63e0ffac42 | ||
|
|
d0155f28c8 | ||
|
|
27ca08d98a | ||
|
|
df99950475 | ||
|
|
6a85073d94 | ||
|
|
7b73ff34f1 | ||
|
|
8419b12f3f | ||
|
|
f1a5bcd17a | ||
|
|
28d8a4cc9e | ||
|
|
7108cdd2ca | ||
|
|
e7bfb19203 | ||
|
|
beac823472 | ||
|
|
c7fac3d9e6 | ||
|
|
3689eb969d | ||
|
|
5e330b7691 | ||
|
|
5ec5fe82e6 | ||
|
|
ee13bf9a8f | ||
|
|
219af28afc | ||
|
|
b64025b134 | ||
|
|
51e4e8489a | ||
|
|
bb70d04b88 | ||
|
|
32f6c6d6eb | ||
|
|
b6688e630e | ||
|
|
073f6d5793 | ||
|
|
9153b06f09 | ||
|
|
6cb2af8757 | ||
|
|
ca3b013a7b | ||
|
|
abde1ba40a | ||
|
|
b04659fb56 | ||
|
|
74ee30d5db | ||
|
|
a300466ca9 | ||
|
|
9311f2e62a | ||
|
|
67245158ea | ||
|
|
520d9a945c | ||
|
|
fa3ead0e8d | ||
|
|
253ab94646 | ||
|
|
fbb3f697e1 | ||
|
|
1a1517dffb | ||
|
|
690cf1f281 | ||
|
|
6f55da46ac | ||
|
|
57453966ac | ||
|
|
298acc9f89 | ||
|
|
f4390bc82f | ||
|
|
62af2031f6 | ||
|
|
0ddd672e0e | ||
|
|
7ef525effa | ||
|
|
2303dcd133 | ||
|
|
cc4f39a6ab | ||
|
|
d4076ad0ce | ||
|
|
3bd8626d48 | ||
|
|
ff5915dd20 | ||
|
|
8500f71565 | ||
|
|
81bab1d8ab | ||
|
|
24a6633322 | ||
|
|
f073f6ecc3 | ||
|
|
2870ddb223 | ||
|
|
1578d02e70 | ||
|
|
bb710ada1a | ||
|
|
33ae860059 | ||
|
|
3de6d58af3 | ||
|
|
c8e66a866e | ||
|
|
c25efdc0d8 | ||
|
|
bde82492ae | ||
|
|
67f18021c3 | ||
|
|
6704293cb1 | ||
|
|
8f1740c0f5 | ||
|
|
62019d5916 | ||
|
|
e66283b1d6 | ||
|
|
a0d6d76626 | ||
|
|
c2f5c07038 | ||
|
|
419abf88dd | ||
|
|
b7596617ed | ||
|
|
26da99e834 | ||
|
|
2b33a0d322 | ||
|
|
c796adbae8 | ||
|
|
18d82b1bb1 | ||
|
|
0c68fcc8c8 | ||
|
|
e4458b8222 | ||
|
|
eb8ebe3ce0 | ||
|
|
0dc70addb6 | ||
|
|
f3f5d05349 | ||
|
|
0c4b833b07 | ||
|
|
029c5ca855 | ||
|
|
1f270edbe1 | ||
|
|
47c188d8f9 | ||
|
|
cca4638b71 | ||
|
|
19c12b7813 | ||
|
|
0261ec2892 | ||
|
|
5e4f5f86cd | ||
|
|
fbab1d323f | ||
|
|
8b19266c9a | ||
|
|
1b9d194dd1 | ||
|
|
74c793b6c6 | ||
|
|
d1222268c3 | ||
|
|
df7a0f8687 | ||
|
|
c7def000df | ||
|
|
e2394244f6 | ||
|
|
007830ec74 | ||
|
|
f721eb7152 | ||
|
|
e56db2362c | ||
|
|
d2c7a9e05d | ||
|
|
acce06b304 | ||
|
|
4ab54270db | ||
|
|
f50520c93f | ||
|
|
cebf57ffd3 | ||
|
|
6020219fda | ||
|
|
8094941385 | ||
|
|
9ce3cfee7d | ||
|
|
6184440441 | ||
|
|
0cff4cf510 | ||
|
|
b152f119c5 | ||
|
|
9f936c6968 | ||
|
|
b8531cf7e8 | ||
|
|
edcc4e789b | ||
|
|
20cc401238 | ||
|
|
70204a2d36 | ||
|
|
e38325c27f | ||
|
|
5e4b422315 | ||
|
|
6c5206daf4 | ||
|
|
ed65f70315 | ||
|
|
f41a42010c | ||
|
|
aa8caeaeb0 | ||
|
|
a0669d4262 | ||
|
|
a4a792c6b1 | ||
|
|
6842e4c7f7 | ||
|
|
6638c35945 | ||
|
|
53f5c2b2bb | ||
|
|
6e13cdd516 | ||
|
|
a48c67d271 | ||
|
|
43fc3de2e1 | ||
|
|
80081b60bf | ||
|
|
cbca9b68e6 | ||
|
|
b9b3695497 | ||
|
|
1b9acb1395 | ||
|
|
01cf81a105 | ||
|
|
6381ecaa37 | ||
|
|
6d267ce0fa | ||
|
|
8b0b565282 | ||
|
|
a046d1232e | ||
|
|
d724e782dd | ||
|
|
a266d85ecd | ||
|
|
a4a111fad0 | ||
|
|
2a98de85a8 | ||
|
|
fb3a8499f3 | ||
|
|
33dd9ae347 | ||
|
|
ac87594b5d | ||
|
|
32656a9662 | ||
|
|
785a4d2c3b | ||
|
|
41a6c7f712 | ||
|
|
7e5d915b60 | ||
|
|
8321c06e16 | ||
|
|
f60c18d31a | ||
|
|
e171b6a049 | ||
|
|
6e4b611662 | ||
|
|
7522e58fee | ||
|
|
317c21ffc0 | ||
|
|
9c5fe44617 | ||
|
|
7f79d9692c | ||
|
|
2d4ffc7514 | ||
|
|
5f3db1f25e | ||
|
|
7115460804 | ||
|
|
0db8808b2a | ||
|
|
cf3ed1dd8f | ||
|
|
da682e3993 | ||
|
|
4a59e901e6 | ||
|
|
8ed2fa07a0 | ||
|
|
385e7f5c1e | ||
|
|
861fff1aae | ||
|
|
09527b3b67 | ||
|
|
d98ff16c8f | ||
|
|
e902e8ea4c | ||
|
|
aeb5bd829f | ||
|
|
a92457b871 | ||
|
|
c24e6207d0 | ||
|
|
6c412cd367 | ||
|
|
89a960629a | ||
|
|
05d96a7d6e | ||
|
|
41144ff1fa | ||
|
|
360cddcb91 | ||
|
|
427832e72e | ||
|
|
27c60658f7 | ||
|
|
fa8ae149d3 | ||
|
|
0c19beb11c | ||
|
|
e34e4a59e9 | ||
|
|
7cc092cd59 | ||
|
|
51cd7156d2 | ||
|
|
1dc843d2d0 | ||
|
|
4040bef4b8 | ||
|
|
e64a850f57 | ||
|
|
555523df38 | ||
|
|
dd882139f3 | ||
|
|
a67b8c6109 | ||
|
|
134208dab6 | ||
|
|
887343d232 | ||
|
|
299b838400 | ||
|
|
c5d0a8be7d | ||
|
|
fe433a84c9 | ||
|
|
543aa7a27b | ||
|
|
36ddf0513b | ||
|
|
c99883e634 | ||
|
|
604f98b08f | ||
|
|
c5009a0333 | ||
|
|
99b05d35a2 | ||
|
|
a3ecc6fe02 | ||
|
|
fc20dd5ad4 | ||
|
|
eb94e4de72 | ||
|
|
0fa5fdd478 | ||
|
|
472342c246 | ||
|
|
71e03c2a13 | ||
|
|
c3403c033c | ||
|
|
2a87d55519 | ||
|
|
2d309f6833 | ||
|
|
7a2a3ef500 | ||
|
|
3ff9658723 | ||
|
|
c587947de6 | ||
|
|
a9403651d4 | ||
|
|
d2f64f10ff | ||
|
|
9fe5b485f8 |
108
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
108
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
name: Feature Request
|
||||
description: Suggest a new feature or enhancement for Automaker
|
||||
title: '[Feature]: '
|
||||
labels: ['enhancement']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to suggest a feature! Please fill out the form below to help us understand your request.
|
||||
|
||||
- type: dropdown
|
||||
id: feature-area
|
||||
attributes:
|
||||
label: Feature Area
|
||||
description: Which area of Automaker does this feature relate to?
|
||||
options:
|
||||
- UI/UX (User Interface)
|
||||
- Agent/AI
|
||||
- Kanban Board
|
||||
- Git/Worktree Management
|
||||
- Project Management
|
||||
- Settings/Configuration
|
||||
- Documentation
|
||||
- Performance
|
||||
- Other
|
||||
default: 0
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: priority
|
||||
attributes:
|
||||
label: Priority
|
||||
description: How important is this feature to your workflow?
|
||||
options:
|
||||
- Nice to have
|
||||
- Would improve my workflow
|
||||
- Critical for my use case
|
||||
default: 0
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: problem-statement
|
||||
attributes:
|
||||
label: Problem Statement
|
||||
description: Is your feature request related to a problem? Please describe the problem you're trying to solve.
|
||||
placeholder: A clear and concise description of what the problem is. Ex. I'm always frustrated when...
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: proposed-solution
|
||||
attributes:
|
||||
label: Proposed Solution
|
||||
description: Describe the solution you'd like to see implemented.
|
||||
placeholder: A clear and concise description of what you want to happen.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: alternatives-considered
|
||||
attributes:
|
||||
label: Alternatives Considered
|
||||
description: Describe any alternative solutions or workarounds you've considered.
|
||||
placeholder: A clear and concise description of any alternative solutions or features you've considered.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: use-cases
|
||||
attributes:
|
||||
label: Use Cases
|
||||
description: Describe specific scenarios where this feature would be useful.
|
||||
placeholder: |
|
||||
1. When working on...
|
||||
2. As a user who needs to...
|
||||
3. In situations where...
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: mockups
|
||||
attributes:
|
||||
label: Mockups/Screenshots
|
||||
description: If applicable, add mockups, wireframes, or screenshots to help illustrate your feature request.
|
||||
placeholder: Drag and drop images here or paste image URLs
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: additional-context
|
||||
attributes:
|
||||
label: Additional Context
|
||||
description: Add any other context, references, or examples about the feature request here.
|
||||
placeholder: Any additional information that might be helpful...
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
attributes:
|
||||
label: Checklist
|
||||
options:
|
||||
- label: I have searched existing issues to ensure this feature hasn't been requested already
|
||||
required: true
|
||||
- label: I have provided a clear description of the problem and proposed solution
|
||||
required: true
|
||||
88
.github/workflows/e2e-tests.yml
vendored
88
.github/workflows/e2e-tests.yml
vendored
@@ -37,7 +37,14 @@ jobs:
|
||||
git config --global user.email "ci@example.com"
|
||||
|
||||
- name: Start backend server
|
||||
run: npm run start --workspace=apps/server &
|
||||
run: |
|
||||
echo "Starting backend server..."
|
||||
# Start server in background and save PID
|
||||
npm run start --workspace=apps/server > backend.log 2>&1 &
|
||||
SERVER_PID=$!
|
||||
echo "Server started with PID: $SERVER_PID"
|
||||
echo "SERVER_PID=$SERVER_PID" >> $GITHUB_ENV
|
||||
|
||||
env:
|
||||
PORT: 3008
|
||||
NODE_ENV: test
|
||||
@@ -53,21 +60,70 @@ jobs:
|
||||
- name: Wait for backend server
|
||||
run: |
|
||||
echo "Waiting for backend server to be ready..."
|
||||
|
||||
# Check if server process is running
|
||||
if [ -z "$SERVER_PID" ]; then
|
||||
echo "ERROR: Server PID not found in environment"
|
||||
cat backend.log 2>/dev/null || echo "No backend log found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if process is actually running
|
||||
if ! kill -0 $SERVER_PID 2>/dev/null; then
|
||||
echo "ERROR: Server process $SERVER_PID is not running!"
|
||||
echo "=== Backend logs ==="
|
||||
cat backend.log
|
||||
echo ""
|
||||
echo "=== Recent system logs ==="
|
||||
dmesg 2>/dev/null | tail -20 || echo "No dmesg available"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Wait for health endpoint
|
||||
for i in {1..60}; do
|
||||
if curl -s -f http://localhost:3008/api/health > /dev/null 2>&1; then
|
||||
echo "Backend server is ready!"
|
||||
curl -s http://localhost:3008/api/health | jq . 2>/dev/null || echo "Health check response: $(curl -s http://localhost:3008/api/health 2>/dev/null || echo 'No response')"
|
||||
echo "=== Backend logs ==="
|
||||
cat backend.log
|
||||
echo ""
|
||||
echo "Health check response:"
|
||||
curl -s http://localhost:3008/api/health | jq . 2>/dev/null || echo "Health check: $(curl -s http://localhost:3008/api/health 2>/dev/null || echo 'No response')"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if server process is still running
|
||||
if ! kill -0 $SERVER_PID 2>/dev/null; then
|
||||
echo "ERROR: Server process died during wait!"
|
||||
echo "=== Backend logs ==="
|
||||
cat backend.log
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Waiting... ($i/60)"
|
||||
sleep 1
|
||||
done
|
||||
echo "Backend server failed to start!"
|
||||
echo "Checking server status..."
|
||||
|
||||
echo "ERROR: Backend server failed to start within 60 seconds!"
|
||||
echo "=== Backend logs ==="
|
||||
cat backend.log
|
||||
echo ""
|
||||
echo "=== Process status ==="
|
||||
ps aux | grep -E "(node|tsx)" | grep -v grep || echo "No node processes found"
|
||||
echo ""
|
||||
echo "=== Port status ==="
|
||||
netstat -tlnp 2>/dev/null | grep :3008 || echo "Port 3008 not listening"
|
||||
echo "Testing health endpoint..."
|
||||
lsof -i :3008 2>/dev/null || echo "lsof not available or port not in use"
|
||||
echo ""
|
||||
echo "=== Health endpoint test ==="
|
||||
curl -v http://localhost:3008/api/health 2>&1 || echo "Health endpoint failed"
|
||||
|
||||
# Kill the server process if it's still hanging
|
||||
if kill -0 $SERVER_PID 2>/dev/null; then
|
||||
echo ""
|
||||
echo "Killing stuck server process..."
|
||||
kill -9 $SERVER_PID 2>/dev/null || true
|
||||
fi
|
||||
|
||||
exit 1
|
||||
|
||||
- name: Run E2E tests
|
||||
@@ -81,6 +137,18 @@ jobs:
|
||||
# Keep UI-side login/defaults consistent
|
||||
AUTOMAKER_API_KEY: test-api-key-for-e2e-tests
|
||||
|
||||
- name: Print backend logs on failure
|
||||
if: failure()
|
||||
run: |
|
||||
echo "=== E2E Tests Failed - Backend Logs ==="
|
||||
cat backend.log 2>/dev/null || echo "No backend log found"
|
||||
echo ""
|
||||
echo "=== Process status at failure ==="
|
||||
ps aux | grep -E "(node|tsx)" | grep -v grep || echo "No node processes found"
|
||||
echo ""
|
||||
echo "=== Port status ==="
|
||||
netstat -tlnp 2>/dev/null | grep :3008 || echo "Port 3008 not listening"
|
||||
|
||||
- name: Upload Playwright report
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
@@ -98,3 +166,13 @@ jobs:
|
||||
apps/ui/test-results/
|
||||
retention-days: 7
|
||||
if-no-files-found: ignore
|
||||
|
||||
- name: Cleanup - Kill backend server
|
||||
if: always()
|
||||
run: |
|
||||
if [ -n "$SERVER_PID" ]; then
|
||||
echo "Cleaning up backend server (PID: $SERVER_PID)..."
|
||||
kill $SERVER_PID 2>/dev/null || true
|
||||
kill -9 $SERVER_PID 2>/dev/null || true
|
||||
echo "Backend server cleanup complete"
|
||||
fi
|
||||
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -73,6 +73,9 @@ blob-report/
|
||||
!.env.example
|
||||
!.env.local.example
|
||||
|
||||
# Codex config (contains API keys)
|
||||
.codex/config.toml
|
||||
|
||||
# TypeScript
|
||||
*.tsbuildinfo
|
||||
|
||||
@@ -84,4 +87,11 @@ docker-compose.override.yml
|
||||
.claude/hans/
|
||||
|
||||
pnpm-lock.yaml
|
||||
yarn.lock
|
||||
yarn.lock
|
||||
|
||||
# Fork-specific workflow files (should never be committed)
|
||||
# API key files
|
||||
data/.api-key
|
||||
data/credentials.json
|
||||
data/
|
||||
.codex/
|
||||
|
||||
@@ -31,7 +31,12 @@ fi
|
||||
|
||||
# Ensure common system paths are in PATH (for systems without nvm)
|
||||
# This helps find node/npm installed via Homebrew, system packages, etc.
|
||||
export PATH="$PATH:/usr/local/bin:/opt/homebrew/bin:/usr/bin"
|
||||
if [ -n "$WINDIR" ]; then
|
||||
export PATH="$PATH:/c/Program Files/nodejs:/c/Program Files (x86)/nodejs"
|
||||
export PATH="$PATH:$APPDATA/npm:$LOCALAPPDATA/Programs/nodejs"
|
||||
else
|
||||
export PATH="$PATH:/usr/local/bin:/opt/homebrew/bin:/usr/bin"
|
||||
fi
|
||||
|
||||
# Run lint-staged - works with or without nvm
|
||||
# Prefer npx, fallback to npm exec, both work with system-installed Node.js
|
||||
|
||||
@@ -24,6 +24,7 @@ For complete details on contribution terms and rights assignment, please review
|
||||
- [Development Setup](#development-setup)
|
||||
- [Project Structure](#project-structure)
|
||||
- [Pull Request Process](#pull-request-process)
|
||||
- [Branching Strategy (RC Branches)](#branching-strategy-rc-branches)
|
||||
- [Branch Naming Convention](#branch-naming-convention)
|
||||
- [Commit Message Format](#commit-message-format)
|
||||
- [Submitting a Pull Request](#submitting-a-pull-request)
|
||||
@@ -186,6 +187,59 @@ automaker/
|
||||
|
||||
This section covers everything you need to know about contributing changes through pull requests, from creating your branch to getting your code merged.
|
||||
|
||||
### Branching Strategy (RC Branches)
|
||||
|
||||
Automaker uses **Release Candidate (RC) branches** for all development work. Understanding this workflow is essential before contributing.
|
||||
|
||||
**How it works:**
|
||||
|
||||
1. **All development happens on RC branches** - We maintain version-specific RC branches (e.g., `v0.10.0rc`, `v0.11.0rc`) where all active development occurs
|
||||
2. **RC branches are eventually merged to main** - Once an RC branch is stable and ready for release, it gets merged into `main`
|
||||
3. **Main branch is for releases only** - The `main` branch contains only released, stable code
|
||||
|
||||
**Before creating a PR:**
|
||||
|
||||
1. **Check for the latest RC branch** - Before starting work, check the repository for the current RC branch:
|
||||
|
||||
```bash
|
||||
git fetch upstream
|
||||
git branch -r | grep rc
|
||||
```
|
||||
|
||||
2. **Base your work on the RC branch** - Create your feature branch from the latest RC branch, not from `main`:
|
||||
|
||||
```bash
|
||||
# Find the latest RC branch (e.g., v0.11.0rc)
|
||||
git checkout upstream/v0.11.0rc
|
||||
git checkout -b feature/your-feature-name
|
||||
```
|
||||
|
||||
3. **Target the RC branch in your PR** - When opening your pull request, set the base branch to the current RC branch, not `main`
|
||||
|
||||
**Example workflow:**
|
||||
|
||||
```bash
|
||||
# 1. Fetch latest changes
|
||||
git fetch upstream
|
||||
|
||||
# 2. Check for RC branches
|
||||
git branch -r | grep rc
|
||||
# Output: upstream/v0.11.0rc
|
||||
|
||||
# 3. Create your branch from the RC
|
||||
git checkout -b feature/add-dark-mode upstream/v0.11.0rc
|
||||
|
||||
# 4. Make your changes and commit
|
||||
git commit -m "feat: Add dark mode support"
|
||||
|
||||
# 5. Push to your fork
|
||||
git push origin feature/add-dark-mode
|
||||
|
||||
# 6. Open PR targeting the RC branch (v0.11.0rc), NOT main
|
||||
```
|
||||
|
||||
**Important:** PRs opened directly against `main` will be asked to retarget to the current RC branch.
|
||||
|
||||
### Branch Naming Convention
|
||||
|
||||
We use a consistent branch naming pattern to keep our repository organized:
|
||||
@@ -275,14 +329,14 @@ Follow these steps to submit your contribution:
|
||||
|
||||
#### 1. Prepare Your Changes
|
||||
|
||||
Ensure you've synced with the latest upstream changes:
|
||||
Ensure you've synced with the latest upstream changes from the RC branch:
|
||||
|
||||
```bash
|
||||
# Fetch latest changes from upstream
|
||||
git fetch upstream
|
||||
|
||||
# Rebase your branch on main (if needed)
|
||||
git rebase upstream/main
|
||||
# Rebase your branch on the current RC branch (if needed)
|
||||
git rebase upstream/v0.11.0rc # Use the current RC branch name
|
||||
```
|
||||
|
||||
#### 2. Run Pre-submission Checks
|
||||
@@ -314,18 +368,19 @@ git push origin feature/your-feature-name
|
||||
|
||||
1. Go to your fork on GitHub
|
||||
2. Click "Compare & pull request" for your branch
|
||||
3. Ensure the base repository is `AutoMaker-Org/automaker` and base branch is `main`
|
||||
3. **Important:** Set the base repository to `AutoMaker-Org/automaker` and the base branch to the **current RC branch** (e.g., `v0.11.0rc`), not `main`
|
||||
4. Fill out the PR template completely
|
||||
|
||||
#### PR Requirements Checklist
|
||||
|
||||
Your PR should include:
|
||||
|
||||
- [ ] **Targets the current RC branch** (not `main`) - see [Branching Strategy](#branching-strategy-rc-branches)
|
||||
- [ ] **Clear title** describing the change (use conventional commit format)
|
||||
- [ ] **Description** explaining what changed and why
|
||||
- [ ] **Link to related issue** (if applicable): `Closes #123` or `Fixes #456`
|
||||
- [ ] **All CI checks passing** (format, lint, build, tests)
|
||||
- [ ] **No merge conflicts** with main branch
|
||||
- [ ] **No merge conflicts** with the RC branch
|
||||
- [ ] **Tests included** for new functionality
|
||||
- [ ] **Documentation updated** if adding/changing public APIs
|
||||
|
||||
|
||||
253
DEVELOPMENT_WORKFLOW.md
Normal file
253
DEVELOPMENT_WORKFLOW.md
Normal file
@@ -0,0 +1,253 @@
|
||||
# Development Workflow
|
||||
|
||||
This document defines the standard workflow for keeping a branch in sync with the upstream
|
||||
release candidate (RC) and for shipping feature work. It is paired with `check-sync.sh`.
|
||||
|
||||
## Quick Decision Rule
|
||||
|
||||
1. Ask the user to select a workflow:
|
||||
- **Sync Workflow** → you are maintaining the current RC branch with fixes/improvements
|
||||
and will push the same fixes to both origin and upstream RC when you have local
|
||||
commits to publish.
|
||||
- **PR Workflow** → you are starting new feature work on a new branch; upstream updates
|
||||
happen via PR only.
|
||||
2. After the user selects, run:
|
||||
```bash
|
||||
./check-sync.sh
|
||||
```
|
||||
3. Use the status output to confirm alignment. If it reports **diverged**, default to
|
||||
merging `upstream/<TARGET_RC>` into the current branch and preserving local commits.
|
||||
For Sync Workflow, when the working tree is clean and you are behind upstream RC,
|
||||
proceed with the fetch + merge without asking for additional confirmation.
|
||||
|
||||
## Target RC Resolution
|
||||
|
||||
The target RC is resolved dynamically so the workflow stays current as the RC changes.
|
||||
|
||||
Resolution order:
|
||||
|
||||
1. Latest `upstream/v*rc` branch (auto-detected)
|
||||
2. `upstream/HEAD` (fallback)
|
||||
3. If neither is available, you must pass `--rc <branch>`
|
||||
|
||||
Override for a single run:
|
||||
|
||||
```bash
|
||||
./check-sync.sh --rc <rc-branch>
|
||||
```
|
||||
|
||||
## Pre-Flight Checklist
|
||||
|
||||
1. Confirm a clean working tree:
|
||||
```bash
|
||||
git status
|
||||
```
|
||||
2. Confirm the current branch:
|
||||
```bash
|
||||
git branch --show-current
|
||||
```
|
||||
3. Ensure remotes exist (origin + upstream):
|
||||
```bash
|
||||
git remote -v
|
||||
```
|
||||
|
||||
## Sync Workflow (Upstream Sync)
|
||||
|
||||
Use this flow when you are updating the current branch with fixes or improvements and
|
||||
intend to keep origin and upstream RC in lockstep.
|
||||
|
||||
1. **Check sync status**
|
||||
```bash
|
||||
./check-sync.sh
|
||||
```
|
||||
2. **Update from upstream RC before editing (no pulls)**
|
||||
- **Behind upstream RC** → fetch and merge RC into your branch:
|
||||
```bash
|
||||
git fetch upstream
|
||||
git merge upstream/<TARGET_RC> --no-edit
|
||||
```
|
||||
When the working tree is clean and the user selected Sync Workflow, proceed without
|
||||
an extra confirmation prompt.
|
||||
- **Diverged** → stop and resolve manually.
|
||||
3. **Resolve conflicts if needed**
|
||||
- Handle conflicts intelligently: preserve upstream behavior and your local intent.
|
||||
4. **Make changes and commit (if you are delivering fixes)**
|
||||
```bash
|
||||
git add -A
|
||||
git commit -m "type: description"
|
||||
```
|
||||
5. **Build to verify**
|
||||
```bash
|
||||
npm run build:packages
|
||||
npm run build
|
||||
```
|
||||
6. **Push after a successful merge to keep remotes aligned**
|
||||
- If you only merged upstream RC changes, push **origin only** to sync your fork:
|
||||
```bash
|
||||
git push origin <branch>
|
||||
```
|
||||
- If you have local fixes to publish, push **origin + upstream**:
|
||||
```bash
|
||||
git push origin <branch>
|
||||
git push upstream <branch>:<TARGET_RC>
|
||||
```
|
||||
- Always ask the user which push to perform.
|
||||
- Origin (origin-only sync):
|
||||
```bash
|
||||
git push origin <branch>
|
||||
```
|
||||
- Upstream RC (publish the same fixes when you have local commits):
|
||||
```bash
|
||||
git push upstream <branch>:<TARGET_RC>
|
||||
```
|
||||
7. **Re-check sync**
|
||||
```bash
|
||||
./check-sync.sh
|
||||
```
|
||||
|
||||
## PR Workflow (Feature Work)
|
||||
|
||||
Use this flow only for new feature work on a new branch. Do not push to upstream RC.
|
||||
|
||||
1. **Create or switch to a feature branch**
|
||||
```bash
|
||||
git checkout -b <branch>
|
||||
```
|
||||
2. **Make changes and commit**
|
||||
```bash
|
||||
git add -A
|
||||
git commit -m "type: description"
|
||||
```
|
||||
3. **Merge upstream RC before shipping**
|
||||
```bash
|
||||
git merge upstream/<TARGET_RC> --no-edit
|
||||
```
|
||||
4. **Build and/or test**
|
||||
```bash
|
||||
npm run build:packages
|
||||
npm run build
|
||||
```
|
||||
5. **Push to origin**
|
||||
```bash
|
||||
git push -u origin <branch>
|
||||
```
|
||||
6. **Create or update the PR**
|
||||
- Use `gh pr create` or the GitHub UI.
|
||||
7. **Review and follow-up**
|
||||
|
||||
- Apply feedback, commit changes, and push again.
|
||||
- Re-run `./check-sync.sh` if additional upstream sync is needed.
|
||||
|
||||
## Conflict Resolution Checklist
|
||||
|
||||
1. Identify which changes are from upstream vs. local.
|
||||
2. Preserve both behaviors where possible; avoid dropping either side.
|
||||
3. Prefer minimal, safe integrations over refactors.
|
||||
4. Re-run build commands after resolving conflicts.
|
||||
5. Re-run `./check-sync.sh` to confirm status.
|
||||
|
||||
## Build/Test Matrix
|
||||
|
||||
- **Sync Workflow**: `npm run build:packages` and `npm run build`.
|
||||
- **PR Workflow**: `npm run build:packages` and `npm run build` (plus relevant tests).
|
||||
|
||||
## Post-Sync Verification
|
||||
|
||||
1. `git status` should be clean.
|
||||
2. `./check-sync.sh` should show expected alignment.
|
||||
3. Verify recent commits with:
|
||||
```bash
|
||||
git log --oneline -5
|
||||
```
|
||||
|
||||
## check-sync.sh Usage
|
||||
|
||||
- Uses dynamic Target RC resolution (see above).
|
||||
- Override target RC:
|
||||
```bash
|
||||
./check-sync.sh --rc <rc-branch>
|
||||
```
|
||||
- Optional preview limit:
|
||||
```bash
|
||||
./check-sync.sh --preview 10
|
||||
```
|
||||
- The script prints sync status for both origin and upstream and previews recent commits
|
||||
when you are behind.
|
||||
|
||||
## Stop Conditions
|
||||
|
||||
Stop and ask for guidance if any of the following are true:
|
||||
|
||||
- The working tree is dirty and you are about to merge or push.
|
||||
- `./check-sync.sh` reports **diverged** during PR Workflow, or a merge cannot be completed.
|
||||
- The script cannot resolve a target RC and requests `--rc`.
|
||||
- A build fails after sync or conflict resolution.
|
||||
|
||||
## AI Agent Guardrails
|
||||
|
||||
- Always run `./check-sync.sh` before merges or pushes.
|
||||
- Always ask for explicit user approval before any push command.
|
||||
- Do not ask for additional confirmation before a Sync Workflow fetch + merge when the
|
||||
working tree is clean and the user has already selected the Sync Workflow.
|
||||
- Choose Sync vs PR workflow based on intent (RC maintenance vs new feature work), not
|
||||
on the script's workflow hint.
|
||||
- Only use force push when the user explicitly requests a history rewrite.
|
||||
- Ask for explicit approval before dependency installs, branch deletion, or destructive operations.
|
||||
- When resolving merge conflicts, preserve both upstream changes and local intent where possible.
|
||||
- Do not create or switch to new branches unless the user explicitly requests it.
|
||||
|
||||
## AI Agent Decision Guidance
|
||||
|
||||
Agents should provide concrete, task-specific suggestions instead of repeatedly asking
|
||||
open-ended questions. Use the user's stated goal and the `./check-sync.sh` status to
|
||||
propose a default path plus one or two alternatives, and only ask for confirmation when
|
||||
an action requires explicit approval.
|
||||
|
||||
Default behavior:
|
||||
|
||||
- If the intent is RC maintenance, recommend the Sync Workflow and proceed with
|
||||
safe preparation steps (status checks, previews). If the branch is behind upstream RC,
|
||||
fetch and merge without additional confirmation when the working tree is clean, then
|
||||
push to origin to keep the fork aligned. Push upstream only when there are local fixes
|
||||
to publish.
|
||||
- If the intent is new feature work, recommend the PR Workflow and proceed with safe
|
||||
preparation steps (status checks, identifying scope). Ask for approval before merges,
|
||||
pushes, or dependency installs.
|
||||
- If `./check-sync.sh` reports **diverged** during Sync Workflow, merge
|
||||
`upstream/<TARGET_RC>` into the current branch and preserve local commits.
|
||||
- If `./check-sync.sh` reports **diverged** during PR Workflow, stop and ask for guidance
|
||||
with a short explanation of the divergence and the minimal options to resolve it.
|
||||
If the user's intent is RC maintenance, prefer the Sync Workflow regardless of the
|
||||
script hint. When the intent is new feature work, use the PR Workflow and avoid upstream
|
||||
RC pushes.
|
||||
|
||||
Suggestion format (keep it short):
|
||||
|
||||
- **Recommended**: one sentence with the default path and why it fits the task.
|
||||
- **Alternatives**: one or two options with the tradeoff or prerequisite.
|
||||
- **Approval points**: mention any upcoming actions that need explicit approval (exclude sync
|
||||
workflow pushes and merges).
|
||||
|
||||
## Failure Modes and How to Avoid Them
|
||||
|
||||
Sync Workflow:
|
||||
|
||||
- Wrong RC target: verify the auto-detected RC in `./check-sync.sh` output before merging.
|
||||
- Diverged from upstream RC: stop and resolve manually before any merge or push.
|
||||
- Dirty working tree: commit or stash before syncing to avoid accidental merges.
|
||||
- Missing remotes: ensure both `origin` and `upstream` are configured before syncing.
|
||||
- Build breaks after sync: run `npm run build:packages` and `npm run build` before pushing.
|
||||
|
||||
PR Workflow:
|
||||
|
||||
- Branch not synced to current RC: re-run `./check-sync.sh` and merge RC before shipping.
|
||||
- Pushing the wrong branch: confirm `git branch --show-current` before pushing.
|
||||
- Unreviewed changes: always commit and push to origin before opening or updating a PR.
|
||||
- Skipped tests/builds: run the build commands before declaring the PR ready.
|
||||
|
||||
## Notes
|
||||
|
||||
- Avoid merging with uncommitted changes; commit or stash first.
|
||||
- Prefer merge over rebase for PR branches; rebases rewrite history and often require a force push,
|
||||
which should only be done with an explicit user request.
|
||||
- Use clear, conventional commit messages and split unrelated changes into separate commits.
|
||||
25
Dockerfile
25
Dockerfile
@@ -59,9 +59,22 @@ FROM node:22-slim AS server
|
||||
ARG GIT_COMMIT_SHA=unknown
|
||||
LABEL automaker.git.commit.sha="${GIT_COMMIT_SHA}"
|
||||
|
||||
# Build arguments for user ID matching (allows matching host user for mounted volumes)
|
||||
# Override at build time: docker build --build-arg UID=$(id -u) --build-arg GID=$(id -g) ...
|
||||
ARG UID=1001
|
||||
ARG GID=1001
|
||||
|
||||
# Install git, curl, bash (for terminal), gosu (for user switching), and GitHub CLI (pinned version, multi-arch)
|
||||
# Also install Playwright/Chromium system dependencies (aligns with playwright install-deps on Debian/Ubuntu)
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
git curl bash gosu ca-certificates openssh-client \
|
||||
# Playwright/Chromium dependencies
|
||||
libglib2.0-0 libnss3 libnspr4 libdbus-1-3 libatk1.0-0 libatk-bridge2.0-0 \
|
||||
libcups2 libdrm2 libxkbcommon0 libatspi2.0-0 libxcomposite1 libxdamage1 \
|
||||
libxfixes3 libxrandr2 libgbm1 libasound2 libpango-1.0-0 libcairo2 \
|
||||
libx11-6 libx11-xcb1 libxcb1 libxext6 libxrender1 libxss1 libxtst6 \
|
||||
libxshmfence1 libgtk-3-0 libexpat1 libfontconfig1 fonts-liberation \
|
||||
xdg-utils libpangocairo-1.0-0 libpangoft2-1.0-0 libu2f-udev libvulkan1 \
|
||||
&& GH_VERSION="2.63.2" \
|
||||
&& ARCH=$(uname -m) \
|
||||
&& case "$ARCH" in \
|
||||
@@ -79,8 +92,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
RUN npm install -g @anthropic-ai/claude-code
|
||||
|
||||
# Create non-root user with home directory BEFORE installing Cursor CLI
|
||||
RUN groupadd -g 1001 automaker && \
|
||||
useradd -u 1001 -g automaker -m -d /home/automaker -s /bin/bash automaker && \
|
||||
# Uses UID/GID build args to match host user for mounted volume permissions
|
||||
# Use -o flag to allow non-unique IDs (GID 1000 may already exist as 'node' group)
|
||||
RUN groupadd -o -g ${GID} automaker && \
|
||||
useradd -o -u ${UID} -g automaker -m -d /home/automaker -s /bin/bash automaker && \
|
||||
mkdir -p /home/automaker/.local/bin && \
|
||||
mkdir -p /home/automaker/.cursor && \
|
||||
chown -R automaker:automaker /home/automaker && \
|
||||
@@ -95,6 +110,12 @@ RUN curl https://cursor.com/install -fsS | bash && \
|
||||
ls -la /home/automaker/.local/bin/ && \
|
||||
echo "=== PATH is: $PATH ===" && \
|
||||
(which cursor-agent && cursor-agent --version) || echo "cursor-agent installed (may need auth setup)"
|
||||
|
||||
# Install OpenCode CLI (for multi-provider AI model access)
|
||||
RUN curl -fsSL https://opencode.ai/install | bash && \
|
||||
echo "=== Checking OpenCode CLI installation ===" && \
|
||||
ls -la /home/automaker/.local/bin/ && \
|
||||
(which opencode && opencode --version) || echo "opencode installed (may need auth setup)"
|
||||
USER root
|
||||
|
||||
# Add PATH to profile so it's available in all interactive shells (for login shells)
|
||||
|
||||
@@ -8,9 +8,17 @@
|
||||
FROM node:22-slim
|
||||
|
||||
# Install build dependencies for native modules (node-pty) and runtime tools
|
||||
# Also install Playwright/Chromium system dependencies (aligns with playwright install-deps on Debian/Ubuntu)
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3 make g++ \
|
||||
git curl bash gosu ca-certificates openssh-client \
|
||||
# Playwright/Chromium dependencies
|
||||
libglib2.0-0 libnss3 libnspr4 libdbus-1-3 libatk1.0-0 libatk-bridge2.0-0 \
|
||||
libcups2 libdrm2 libxkbcommon0 libatspi2.0-0 libxcomposite1 libxdamage1 \
|
||||
libxfixes3 libxrandr2 libgbm1 libasound2 libpango-1.0-0 libcairo2 \
|
||||
libx11-6 libx11-xcb1 libxcb1 libxext6 libxrender1 libxss1 libxtst6 \
|
||||
libxshmfence1 libgtk-3-0 libexpat1 libfontconfig1 fonts-liberation \
|
||||
xdg-utils libpangocairo-1.0-0 libpangoft2-1.0-0 libu2f-udev libvulkan1 \
|
||||
&& GH_VERSION="2.63.2" \
|
||||
&& ARCH=$(uname -m) \
|
||||
&& case "$ARCH" in \
|
||||
@@ -27,9 +35,15 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
# Install Claude CLI globally
|
||||
RUN npm install -g @anthropic-ai/claude-code
|
||||
|
||||
# Create non-root user
|
||||
RUN groupadd -g 1001 automaker && \
|
||||
useradd -u 1001 -g automaker -m -d /home/automaker -s /bin/bash automaker && \
|
||||
# Build arguments for user ID matching (allows matching host user for mounted volumes)
|
||||
# Override at build time: docker-compose build --build-arg UID=$(id -u) --build-arg GID=$(id -g)
|
||||
ARG UID=1001
|
||||
ARG GID=1001
|
||||
|
||||
# Create non-root user with configurable UID/GID
|
||||
# Use -o flag to allow non-unique IDs (GID 1000 may already exist as 'node' group)
|
||||
RUN groupadd -o -g ${GID} automaker && \
|
||||
useradd -o -u ${UID} -g automaker -m -d /home/automaker -s /bin/bash automaker && \
|
||||
mkdir -p /home/automaker/.local/bin && \
|
||||
mkdir -p /home/automaker/.cursor && \
|
||||
chown -R automaker:automaker /home/automaker && \
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@automaker/server",
|
||||
"version": "0.9.0",
|
||||
"version": "0.11.0",
|
||||
"description": "Backend server for Automaker - provides API for both web and Electron modes",
|
||||
"author": "AutoMaker Team",
|
||||
"license": "SEE LICENSE IN LICENSE",
|
||||
@@ -32,7 +32,7 @@
|
||||
"@automaker/prompts": "1.0.0",
|
||||
"@automaker/types": "1.0.0",
|
||||
"@automaker/utils": "1.0.0",
|
||||
"@modelcontextprotocol/sdk": "1.25.1",
|
||||
"@modelcontextprotocol/sdk": "1.25.2",
|
||||
"@openai/codex-sdk": "^0.77.0",
|
||||
"cookie-parser": "1.4.7",
|
||||
"cors": "2.8.5",
|
||||
|
||||
@@ -55,6 +55,8 @@ import { createClaudeRoutes } from './routes/claude/index.js';
|
||||
import { ClaudeUsageService } from './services/claude-usage-service.js';
|
||||
import { createCodexRoutes } from './routes/codex/index.js';
|
||||
import { CodexUsageService } from './services/codex-usage-service.js';
|
||||
import { CodexAppServerService } from './services/codex-app-server-service.js';
|
||||
import { CodexModelCacheService } from './services/codex-model-cache-service.js';
|
||||
import { createGitHubRoutes } from './routes/github/index.js';
|
||||
import { createContextRoutes } from './routes/context/index.js';
|
||||
import { createBacklogPlanRoutes } from './routes/backlog-plan/index.js';
|
||||
@@ -65,6 +67,7 @@ import { createPipelineRoutes } from './routes/pipeline/index.js';
|
||||
import { pipelineService } from './services/pipeline-service.js';
|
||||
import { createIdeationRoutes } from './routes/ideation/index.js';
|
||||
import { IdeationService } from './services/ideation-service.js';
|
||||
import { getDevServerService } from './services/dev-server-service.js';
|
||||
|
||||
// Load environment variables
|
||||
dotenv.config();
|
||||
@@ -168,14 +171,25 @@ const agentService = new AgentService(DATA_DIR, events, settingsService);
|
||||
const featureLoader = new FeatureLoader();
|
||||
const autoModeService = new AutoModeService(events, settingsService);
|
||||
const claudeUsageService = new ClaudeUsageService();
|
||||
const codexUsageService = new CodexUsageService();
|
||||
const codexAppServerService = new CodexAppServerService();
|
||||
const codexModelCacheService = new CodexModelCacheService(DATA_DIR, codexAppServerService);
|
||||
const codexUsageService = new CodexUsageService(codexAppServerService);
|
||||
const mcpTestService = new MCPTestService(settingsService);
|
||||
const ideationService = new IdeationService(events, settingsService, featureLoader);
|
||||
|
||||
// Initialize DevServerService with event emitter for real-time log streaming
|
||||
const devServerService = getDevServerService();
|
||||
devServerService.setEventEmitter(events);
|
||||
|
||||
// Initialize services
|
||||
(async () => {
|
||||
await agentService.initialize();
|
||||
logger.info('Agent service initialized');
|
||||
|
||||
// Bootstrap Codex model cache in background (don't block server startup)
|
||||
void codexModelCacheService.getModels().catch((err) => {
|
||||
logger.error('Failed to bootstrap Codex model cache:', err);
|
||||
});
|
||||
})();
|
||||
|
||||
// Run stale validation cleanup every hour to prevent memory leaks from crashed validations
|
||||
@@ -208,7 +222,7 @@ app.use('/api/sessions', createSessionsRoutes(agentService));
|
||||
app.use('/api/features', createFeaturesRoutes(featureLoader));
|
||||
app.use('/api/auto-mode', createAutoModeRoutes(autoModeService));
|
||||
app.use('/api/enhance-prompt', createEnhancePromptRoutes(settingsService));
|
||||
app.use('/api/worktree', createWorktreeRoutes());
|
||||
app.use('/api/worktree', createWorktreeRoutes(events, settingsService));
|
||||
app.use('/api/git', createGitRoutes());
|
||||
app.use('/api/suggestions', createSuggestionsRoutes(events, settingsService));
|
||||
app.use('/api/models', createModelsRoutes());
|
||||
@@ -219,7 +233,7 @@ app.use('/api/templates', createTemplatesRoutes());
|
||||
app.use('/api/terminal', createTerminalRoutes());
|
||||
app.use('/api/settings', createSettingsRoutes(settingsService));
|
||||
app.use('/api/claude', createClaudeRoutes(claudeUsageService));
|
||||
app.use('/api/codex', createCodexRoutes(codexUsageService));
|
||||
app.use('/api/codex', createCodexRoutes(codexUsageService, codexModelCacheService));
|
||||
app.use('/api/github', createGitHubRoutes(events, settingsService));
|
||||
app.use('/api/context', createContextRoutes(settingsService));
|
||||
app.use('/api/backlog-plan', createBacklogPlanRoutes(events, settingsService));
|
||||
@@ -588,6 +602,26 @@ const startServer = (port: number) => {
|
||||
|
||||
startServer(PORT);
|
||||
|
||||
// Global error handlers to prevent crashes from uncaught errors
|
||||
process.on('unhandledRejection', (reason: unknown, _promise: Promise<unknown>) => {
|
||||
logger.error('Unhandled Promise Rejection:', {
|
||||
reason: reason instanceof Error ? reason.message : String(reason),
|
||||
stack: reason instanceof Error ? reason.stack : undefined,
|
||||
});
|
||||
// Don't exit - log the error and continue running
|
||||
// This prevents the server from crashing due to unhandled rejections
|
||||
});
|
||||
|
||||
process.on('uncaughtException', (error: Error) => {
|
||||
logger.error('Uncaught Exception:', {
|
||||
message: error.message,
|
||||
stack: error.stack,
|
||||
});
|
||||
// Exit on uncaught exceptions to prevent undefined behavior
|
||||
// The process is in an unknown state after an uncaught exception
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Graceful shutdown
|
||||
process.on('SIGTERM', () => {
|
||||
logger.info('SIGTERM received, shutting down...');
|
||||
|
||||
@@ -5,9 +5,11 @@
|
||||
* Never assumes authenticated - only returns true if CLI confirms.
|
||||
*/
|
||||
|
||||
import { spawnProcess, getCodexAuthPath } from '@automaker/platform';
|
||||
import { spawnProcess } from '@automaker/platform';
|
||||
import { findCodexCliPath } from '@automaker/platform';
|
||||
import * as fs from 'fs';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('CodexAuth');
|
||||
|
||||
const CODEX_COMMAND = 'codex';
|
||||
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
|
||||
@@ -26,36 +28,16 @@ export interface CodexAuthCheckResult {
|
||||
export async function checkCodexAuthentication(
|
||||
cliPath?: string | null
|
||||
): Promise<CodexAuthCheckResult> {
|
||||
console.log('[CodexAuth] checkCodexAuthentication called with cliPath:', cliPath);
|
||||
|
||||
const resolvedCliPath = cliPath || (await findCodexCliPath());
|
||||
const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
|
||||
|
||||
console.log('[CodexAuth] resolvedCliPath:', resolvedCliPath);
|
||||
console.log('[CodexAuth] hasApiKey:', hasApiKey);
|
||||
|
||||
// Debug: Check auth file
|
||||
const authFilePath = getCodexAuthPath();
|
||||
console.log('[CodexAuth] Auth file path:', authFilePath);
|
||||
try {
|
||||
const authFileExists = fs.existsSync(authFilePath);
|
||||
console.log('[CodexAuth] Auth file exists:', authFileExists);
|
||||
if (authFileExists) {
|
||||
const authContent = fs.readFileSync(authFilePath, 'utf-8');
|
||||
console.log('[CodexAuth] Auth file content:', authContent.substring(0, 500)); // First 500 chars
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('[CodexAuth] Error reading auth file:', error);
|
||||
}
|
||||
|
||||
// If CLI is not installed, cannot be authenticated
|
||||
if (!resolvedCliPath) {
|
||||
console.log('[CodexAuth] No CLI path found, returning not authenticated');
|
||||
logger.info('CLI not found');
|
||||
return { authenticated: false, method: 'none' };
|
||||
}
|
||||
|
||||
try {
|
||||
console.log('[CodexAuth] Running: ' + resolvedCliPath + ' login status');
|
||||
const result = await spawnProcess({
|
||||
command: resolvedCliPath || CODEX_COMMAND,
|
||||
args: ['login', 'status'],
|
||||
@@ -66,33 +48,21 @@ export async function checkCodexAuthentication(
|
||||
},
|
||||
});
|
||||
|
||||
console.log('[CodexAuth] Command result:');
|
||||
console.log('[CodexAuth] exitCode:', result.exitCode);
|
||||
console.log('[CodexAuth] stdout:', JSON.stringify(result.stdout));
|
||||
console.log('[CodexAuth] stderr:', JSON.stringify(result.stderr));
|
||||
|
||||
// Check both stdout and stderr for "logged in" - Codex CLI outputs to stderr
|
||||
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
|
||||
const isLoggedIn = combinedOutput.includes('logged in');
|
||||
console.log('[CodexAuth] isLoggedIn (contains "logged in" in stdout or stderr):', isLoggedIn);
|
||||
|
||||
if (result.exitCode === 0 && isLoggedIn) {
|
||||
// Determine auth method based on what we know
|
||||
const method = hasApiKey ? 'api_key_env' : 'cli_authenticated';
|
||||
console.log('[CodexAuth] Authenticated! method:', method);
|
||||
logger.info(`✓ Authenticated (${method})`);
|
||||
return { authenticated: true, method };
|
||||
}
|
||||
|
||||
console.log(
|
||||
'[CodexAuth] Not authenticated. exitCode:',
|
||||
result.exitCode,
|
||||
'isLoggedIn:',
|
||||
isLoggedIn
|
||||
);
|
||||
logger.info('Not authenticated');
|
||||
return { authenticated: false, method: 'none' };
|
||||
} catch (error) {
|
||||
console.log('[CodexAuth] Error running command:', error);
|
||||
logger.error('Failed to check authentication:', error);
|
||||
return { authenticated: false, method: 'none' };
|
||||
}
|
||||
|
||||
console.log('[CodexAuth] Returning not authenticated');
|
||||
return { authenticated: false, method: 'none' };
|
||||
}
|
||||
|
||||
@@ -129,10 +129,30 @@ export const TOOL_PRESETS = {
|
||||
specGeneration: ['Read', 'Glob', 'Grep'] as const,
|
||||
|
||||
/** Full tool access for feature implementation */
|
||||
fullAccess: ['Read', 'Write', 'Edit', 'Glob', 'Grep', 'Bash', 'WebSearch', 'WebFetch'] as const,
|
||||
fullAccess: [
|
||||
'Read',
|
||||
'Write',
|
||||
'Edit',
|
||||
'Glob',
|
||||
'Grep',
|
||||
'Bash',
|
||||
'WebSearch',
|
||||
'WebFetch',
|
||||
'TodoWrite',
|
||||
] as const,
|
||||
|
||||
/** Tools for chat/interactive mode */
|
||||
chat: ['Read', 'Write', 'Edit', 'Glob', 'Grep', 'Bash', 'WebSearch', 'WebFetch'] as const,
|
||||
chat: [
|
||||
'Read',
|
||||
'Write',
|
||||
'Edit',
|
||||
'Glob',
|
||||
'Grep',
|
||||
'Bash',
|
||||
'WebSearch',
|
||||
'WebFetch',
|
||||
'TodoWrite',
|
||||
] as const,
|
||||
} as const;
|
||||
|
||||
/**
|
||||
|
||||
@@ -21,6 +21,12 @@ export interface WorktreeMetadata {
|
||||
branch: string;
|
||||
createdAt: string;
|
||||
pr?: WorktreePRInfo;
|
||||
/** Whether the init script has been executed for this worktree */
|
||||
initScriptRan?: boolean;
|
||||
/** Status of the init script execution */
|
||||
initScriptStatus?: 'running' | 'success' | 'failed';
|
||||
/** Error message if init script failed */
|
||||
initScriptError?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -8,12 +8,28 @@ import type { Request, Response, NextFunction } from 'express';
|
||||
import { validatePath, PathNotAllowedError } from '@automaker/platform';
|
||||
|
||||
/**
|
||||
* Creates a middleware that validates specified path parameters in req.body
|
||||
* Helper to get parameter value from request (checks body first, then query)
|
||||
*/
|
||||
function getParamValue(req: Request, paramName: string): unknown {
|
||||
// Check body first (for POST/PUT/PATCH requests)
|
||||
if (req.body && req.body[paramName] !== undefined) {
|
||||
return req.body[paramName];
|
||||
}
|
||||
// Fall back to query params (for GET requests)
|
||||
if (req.query && req.query[paramName] !== undefined) {
|
||||
return req.query[paramName];
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a middleware that validates specified path parameters in req.body or req.query
|
||||
* @param paramNames - Names of parameters to validate (e.g., 'projectPath', 'worktreePath')
|
||||
* @example
|
||||
* router.post('/create', validatePathParams('projectPath'), handler);
|
||||
* router.post('/delete', validatePathParams('projectPath', 'worktreePath'), handler);
|
||||
* router.post('/send', validatePathParams('workingDirectory?', 'imagePaths[]'), handler);
|
||||
* router.get('/logs', validatePathParams('worktreePath'), handler); // Works with query params too
|
||||
*
|
||||
* Special syntax:
|
||||
* - 'paramName?' - Optional parameter (only validated if present)
|
||||
@@ -26,8 +42,8 @@ export function validatePathParams(...paramNames: string[]) {
|
||||
// Handle optional parameters (paramName?)
|
||||
if (paramName.endsWith('?')) {
|
||||
const actualName = paramName.slice(0, -1);
|
||||
const value = req.body[actualName];
|
||||
if (value) {
|
||||
const value = getParamValue(req, actualName);
|
||||
if (value && typeof value === 'string') {
|
||||
validatePath(value);
|
||||
}
|
||||
continue;
|
||||
@@ -36,18 +52,20 @@ export function validatePathParams(...paramNames: string[]) {
|
||||
// Handle array parameters (paramName[])
|
||||
if (paramName.endsWith('[]')) {
|
||||
const actualName = paramName.slice(0, -2);
|
||||
const values = req.body[actualName];
|
||||
const values = getParamValue(req, actualName);
|
||||
if (Array.isArray(values) && values.length > 0) {
|
||||
for (const value of values) {
|
||||
validatePath(value);
|
||||
if (typeof value === 'string') {
|
||||
validatePath(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle regular parameters
|
||||
const value = req.body[paramName];
|
||||
if (value) {
|
||||
const value = getParamValue(req, paramName);
|
||||
if (value && typeof value === 'string') {
|
||||
validatePath(value);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,8 @@ import type {
|
||||
// Only these vars are passed - nothing else from process.env leaks through.
|
||||
const ALLOWED_ENV_VARS = [
|
||||
'ANTHROPIC_API_KEY',
|
||||
'ANTHROPIC_BASE_URL',
|
||||
'ANTHROPIC_AUTH_TOKEN',
|
||||
'PATH',
|
||||
'HOME',
|
||||
'SHELL',
|
||||
@@ -99,6 +101,8 @@ export class ClaudeProvider extends BaseProvider {
|
||||
...(maxThinkingTokens && { maxThinkingTokens }),
|
||||
// Subagents configuration for specialized task delegation
|
||||
...(options.agents && { agents: options.agents }),
|
||||
// Pass through outputFormat for structured JSON outputs
|
||||
...(options.outputFormat && { outputFormat: options.outputFormat }),
|
||||
};
|
||||
|
||||
// Build prompt payload
|
||||
|
||||
@@ -26,22 +26,22 @@
|
||||
* ```
|
||||
*/
|
||||
|
||||
import { execSync } from 'child_process';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
import { BaseProvider } from './base-provider.js';
|
||||
import type { ProviderConfig, ExecuteOptions, ProviderMessage } from './types.js';
|
||||
import {
|
||||
spawnJSONLProcess,
|
||||
type SubprocessOptions,
|
||||
isWslAvailable,
|
||||
findCliInWsl,
|
||||
createWslCommand,
|
||||
findCliInWsl,
|
||||
isWslAvailable,
|
||||
spawnJSONLProcess,
|
||||
windowsToWslPath,
|
||||
type SubprocessOptions,
|
||||
type WslCliResult,
|
||||
} from '@automaker/platform';
|
||||
import { createLogger, isAbortError } from '@automaker/utils';
|
||||
import { execSync } from 'child_process';
|
||||
import * as fs from 'fs';
|
||||
import * as os from 'os';
|
||||
import * as path from 'path';
|
||||
import { BaseProvider } from './base-provider.js';
|
||||
import type { ExecuteOptions, ProviderConfig, ProviderMessage } from './types.js';
|
||||
|
||||
/**
|
||||
* Spawn strategy for CLI tools on Windows
|
||||
@@ -522,8 +522,13 @@ export abstract class CliProvider extends BaseProvider {
|
||||
throw new Error(`${this.getCliName()} CLI not found. ${this.getInstallInstructions()}`);
|
||||
}
|
||||
|
||||
const cliArgs = this.buildCliArgs(options);
|
||||
const subprocessOptions = this.buildSubprocessOptions(options, cliArgs);
|
||||
// Many CLI-based providers do not support a separate "system" message.
|
||||
// If a systemPrompt is provided, embed it into the prompt so downstream models
|
||||
// still receive critical formatting/schema instructions (e.g., JSON-only outputs).
|
||||
const effectiveOptions = this.embedSystemPromptIntoPrompt(options);
|
||||
|
||||
const cliArgs = this.buildCliArgs(effectiveOptions);
|
||||
const subprocessOptions = this.buildSubprocessOptions(effectiveOptions, cliArgs);
|
||||
|
||||
try {
|
||||
for await (const rawEvent of spawnJSONLProcess(subprocessOptions)) {
|
||||
@@ -555,4 +560,52 @@ export abstract class CliProvider extends BaseProvider {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Embed system prompt text into the user prompt for CLI providers.
|
||||
*
|
||||
* Most CLI providers we integrate with only accept a single prompt via stdin/args.
|
||||
* When upstream code supplies `options.systemPrompt`, we prepend it to the prompt
|
||||
* content and clear `systemPrompt` to avoid any accidental double-injection by
|
||||
* subclasses.
|
||||
*/
|
||||
protected embedSystemPromptIntoPrompt(options: ExecuteOptions): ExecuteOptions {
|
||||
if (!options.systemPrompt) {
|
||||
return options;
|
||||
}
|
||||
|
||||
// Only string system prompts can be reliably embedded for CLI providers.
|
||||
// Presets are provider-specific (e.g., Claude SDK) and cannot be represented
|
||||
// universally. If a preset is provided, we only embed its optional `append`.
|
||||
const systemText =
|
||||
typeof options.systemPrompt === 'string'
|
||||
? options.systemPrompt
|
||||
: options.systemPrompt.append
|
||||
? options.systemPrompt.append
|
||||
: '';
|
||||
|
||||
if (!systemText) {
|
||||
return { ...options, systemPrompt: undefined };
|
||||
}
|
||||
|
||||
// Preserve original prompt structure.
|
||||
if (typeof options.prompt === 'string') {
|
||||
return {
|
||||
...options,
|
||||
prompt: `${systemText}\n\n---\n\n${options.prompt}`,
|
||||
systemPrompt: undefined,
|
||||
};
|
||||
}
|
||||
|
||||
if (Array.isArray(options.prompt)) {
|
||||
return {
|
||||
...options,
|
||||
prompt: [{ type: 'text', text: systemText }, ...options.prompt],
|
||||
systemPrompt: undefined,
|
||||
};
|
||||
}
|
||||
|
||||
// Should be unreachable due to ExecuteOptions typing, but keep safe.
|
||||
return { ...options, systemPrompt: undefined };
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import {
|
||||
extractTextFromContent,
|
||||
classifyError,
|
||||
getUserFriendlyErrorMessage,
|
||||
createLogger,
|
||||
} from '@automaker/utils';
|
||||
import type {
|
||||
ExecuteOptions,
|
||||
@@ -44,6 +45,7 @@ import {
|
||||
getCodexTodoToolName,
|
||||
} from './codex-tool-mapping.js';
|
||||
import { SettingsService } from '../services/settings-service.js';
|
||||
import { createTempEnvOverride } from '../lib/auth-utils.js';
|
||||
import { checkSandboxCompatibility } from '../lib/sdk-options.js';
|
||||
import { CODEX_MODELS } from './codex-models.js';
|
||||
|
||||
@@ -141,6 +143,7 @@ type CodexExecutionMode = typeof CODEX_EXECUTION_MODE_CLI | typeof CODEX_EXECUTI
|
||||
type CodexExecutionPlan = {
|
||||
mode: CodexExecutionMode;
|
||||
cliPath: string | null;
|
||||
openAiApiKey?: string | null;
|
||||
};
|
||||
|
||||
const ALLOWED_ENV_VARS = [
|
||||
@@ -165,6 +168,22 @@ function buildEnv(): Record<string, string> {
|
||||
return env;
|
||||
}
|
||||
|
||||
async function resolveOpenAiApiKey(): Promise<string | null> {
|
||||
const envKey = process.env[OPENAI_API_KEY_ENV];
|
||||
if (envKey) {
|
||||
return envKey;
|
||||
}
|
||||
|
||||
try {
|
||||
const settingsService = new SettingsService(getCodexSettingsDir());
|
||||
const credentials = await settingsService.getCredentials();
|
||||
const storedKey = credentials.apiKeys.openai?.trim();
|
||||
return storedKey ? storedKey : null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function hasMcpServersConfigured(options: ExecuteOptions): boolean {
|
||||
return Boolean(options.mcpServers && Object.keys(options.mcpServers).length > 0);
|
||||
}
|
||||
@@ -180,18 +199,21 @@ function isSdkEligible(options: ExecuteOptions): boolean {
|
||||
async function resolveCodexExecutionPlan(options: ExecuteOptions): Promise<CodexExecutionPlan> {
|
||||
const cliPath = await findCodexCliPath();
|
||||
const authIndicators = await getCodexAuthIndicators();
|
||||
const hasApiKey = Boolean(process.env[OPENAI_API_KEY_ENV]);
|
||||
const openAiApiKey = await resolveOpenAiApiKey();
|
||||
const hasApiKey = Boolean(openAiApiKey);
|
||||
const cliAuthenticated = authIndicators.hasOAuthToken || authIndicators.hasApiKey || hasApiKey;
|
||||
const sdkEligible = isSdkEligible(options);
|
||||
const cliAvailable = Boolean(cliPath);
|
||||
|
||||
if (hasApiKey) {
|
||||
return {
|
||||
mode: CODEX_EXECUTION_MODE_SDK,
|
||||
cliPath,
|
||||
openAiApiKey,
|
||||
};
|
||||
}
|
||||
|
||||
if (sdkEligible) {
|
||||
if (hasApiKey) {
|
||||
return {
|
||||
mode: CODEX_EXECUTION_MODE_SDK,
|
||||
cliPath,
|
||||
};
|
||||
}
|
||||
if (!cliAvailable) {
|
||||
throw new Error(ERROR_CODEX_SDK_AUTH_REQUIRED);
|
||||
}
|
||||
@@ -208,6 +230,7 @@ async function resolveCodexExecutionPlan(options: ExecuteOptions): Promise<Codex
|
||||
return {
|
||||
mode: CODEX_EXECUTION_MODE_CLI,
|
||||
cliPath,
|
||||
openAiApiKey,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -658,6 +681,8 @@ async function loadCodexInstructions(cwd: string, enabled: boolean): Promise<str
|
||||
.join('\n\n');
|
||||
}
|
||||
|
||||
const logger = createLogger('CodexProvider');
|
||||
|
||||
export class CodexProvider extends BaseProvider {
|
||||
getName(): string {
|
||||
return 'codex';
|
||||
@@ -698,7 +723,14 @@ export class CodexProvider extends BaseProvider {
|
||||
|
||||
const executionPlan = await resolveCodexExecutionPlan(options);
|
||||
if (executionPlan.mode === CODEX_EXECUTION_MODE_SDK) {
|
||||
yield* executeCodexSdkQuery(options, combinedSystemPrompt);
|
||||
const cleanupEnv = executionPlan.openAiApiKey
|
||||
? createTempEnvOverride({ [OPENAI_API_KEY_ENV]: executionPlan.openAiApiKey })
|
||||
: null;
|
||||
try {
|
||||
yield* executeCodexSdkQuery(options, combinedSystemPrompt);
|
||||
} finally {
|
||||
cleanupEnv?.();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -777,11 +809,16 @@ export class CodexProvider extends BaseProvider {
|
||||
'-', // Read prompt from stdin to avoid shell escaping issues
|
||||
];
|
||||
|
||||
const envOverrides = buildEnv();
|
||||
if (executionPlan.openAiApiKey && !envOverrides[OPENAI_API_KEY_ENV]) {
|
||||
envOverrides[OPENAI_API_KEY_ENV] = executionPlan.openAiApiKey;
|
||||
}
|
||||
|
||||
const stream = spawnJSONLProcess({
|
||||
command: commandPath,
|
||||
args,
|
||||
cwd: options.cwd,
|
||||
env: buildEnv(),
|
||||
env: envOverrides,
|
||||
abortController: options.abortController,
|
||||
timeout: DEFAULT_TIMEOUT_MS,
|
||||
stdinData: promptText, // Pass prompt via stdin
|
||||
@@ -967,21 +1004,11 @@ export class CodexProvider extends BaseProvider {
|
||||
}
|
||||
|
||||
async detectInstallation(): Promise<InstallationStatus> {
|
||||
console.log('[CodexProvider.detectInstallation] Starting...');
|
||||
|
||||
const cliPath = await findCodexCliPath();
|
||||
const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
|
||||
const hasApiKey = Boolean(await resolveOpenAiApiKey());
|
||||
const authIndicators = await getCodexAuthIndicators();
|
||||
const installed = !!cliPath;
|
||||
|
||||
console.log('[CodexProvider.detectInstallation] cliPath:', cliPath);
|
||||
console.log('[CodexProvider.detectInstallation] hasApiKey:', hasApiKey);
|
||||
console.log(
|
||||
'[CodexProvider.detectInstallation] authIndicators:',
|
||||
JSON.stringify(authIndicators)
|
||||
);
|
||||
console.log('[CodexProvider.detectInstallation] installed:', installed);
|
||||
|
||||
let version = '';
|
||||
if (installed) {
|
||||
try {
|
||||
@@ -991,20 +1018,16 @@ export class CodexProvider extends BaseProvider {
|
||||
cwd: process.cwd(),
|
||||
});
|
||||
version = result.stdout.trim();
|
||||
console.log('[CodexProvider.detectInstallation] version:', version);
|
||||
} catch (error) {
|
||||
console.log('[CodexProvider.detectInstallation] Error getting version:', error);
|
||||
version = '';
|
||||
}
|
||||
}
|
||||
|
||||
// Determine auth status - always verify with CLI, never assume authenticated
|
||||
console.log('[CodexProvider.detectInstallation] Calling checkCodexAuthentication...');
|
||||
const authCheck = await checkCodexAuthentication(cliPath);
|
||||
console.log('[CodexProvider.detectInstallation] authCheck result:', JSON.stringify(authCheck));
|
||||
const authenticated = authCheck.authenticated;
|
||||
|
||||
const result = {
|
||||
return {
|
||||
installed,
|
||||
path: cliPath || undefined,
|
||||
version: version || undefined,
|
||||
@@ -1012,8 +1035,6 @@ export class CodexProvider extends BaseProvider {
|
||||
hasApiKey,
|
||||
authenticated,
|
||||
};
|
||||
console.log('[CodexProvider.detectInstallation] Final result:', JSON.stringify(result));
|
||||
return result;
|
||||
}
|
||||
|
||||
getAvailableModels(): ModelDefinition[] {
|
||||
@@ -1025,36 +1046,24 @@ export class CodexProvider extends BaseProvider {
|
||||
* Check authentication status for Codex CLI
|
||||
*/
|
||||
async checkAuth(): Promise<CodexAuthStatus> {
|
||||
console.log('[CodexProvider.checkAuth] Starting auth check...');
|
||||
|
||||
const cliPath = await findCodexCliPath();
|
||||
const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
|
||||
const hasApiKey = Boolean(await resolveOpenAiApiKey());
|
||||
const authIndicators = await getCodexAuthIndicators();
|
||||
|
||||
console.log('[CodexProvider.checkAuth] cliPath:', cliPath);
|
||||
console.log('[CodexProvider.checkAuth] hasApiKey:', hasApiKey);
|
||||
console.log('[CodexProvider.checkAuth] authIndicators:', JSON.stringify(authIndicators));
|
||||
|
||||
// Check for API key in environment
|
||||
if (hasApiKey) {
|
||||
console.log('[CodexProvider.checkAuth] Has API key, returning authenticated');
|
||||
return { authenticated: true, method: 'api_key' };
|
||||
}
|
||||
|
||||
// Check for OAuth/token from Codex CLI
|
||||
if (authIndicators.hasOAuthToken || authIndicators.hasApiKey) {
|
||||
console.log(
|
||||
'[CodexProvider.checkAuth] Has OAuth token or API key in auth file, returning authenticated'
|
||||
);
|
||||
return { authenticated: true, method: 'oauth' };
|
||||
}
|
||||
|
||||
// CLI is installed but not authenticated via indicators - try CLI command
|
||||
console.log('[CodexProvider.checkAuth] No indicators found, trying CLI command...');
|
||||
if (cliPath) {
|
||||
try {
|
||||
// Try 'codex login status' first (same as checkCodexAuthentication)
|
||||
console.log('[CodexProvider.checkAuth] Running: ' + cliPath + ' login status');
|
||||
const result = await spawnProcess({
|
||||
command: cliPath || CODEX_COMMAND,
|
||||
args: ['login', 'status'],
|
||||
@@ -1064,26 +1073,19 @@ export class CodexProvider extends BaseProvider {
|
||||
TERM: 'dumb',
|
||||
},
|
||||
});
|
||||
console.log('[CodexProvider.checkAuth] login status result:');
|
||||
console.log('[CodexProvider.checkAuth] exitCode:', result.exitCode);
|
||||
console.log('[CodexProvider.checkAuth] stdout:', JSON.stringify(result.stdout));
|
||||
console.log('[CodexProvider.checkAuth] stderr:', JSON.stringify(result.stderr));
|
||||
|
||||
// Check both stdout and stderr - Codex CLI outputs to stderr
|
||||
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
|
||||
const isLoggedIn = combinedOutput.includes('logged in');
|
||||
console.log('[CodexProvider.checkAuth] isLoggedIn:', isLoggedIn);
|
||||
|
||||
if (result.exitCode === 0 && isLoggedIn) {
|
||||
console.log('[CodexProvider.checkAuth] CLI says logged in, returning authenticated');
|
||||
return { authenticated: true, method: 'oauth' };
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('[CodexProvider.checkAuth] Error running login status:', error);
|
||||
logger.warn('Error running login status command during auth check:', error);
|
||||
}
|
||||
}
|
||||
|
||||
console.log('[CodexProvider.checkAuth] Not authenticated');
|
||||
return { authenticated: false, method: 'none' };
|
||||
}
|
||||
|
||||
|
||||
@@ -30,3 +30,11 @@ export { OpencodeProvider } from './opencode-provider.js';
|
||||
|
||||
// Provider factory
|
||||
export { ProviderFactory } from './provider-factory.js';
|
||||
|
||||
// Simple query service - unified interface for basic AI queries
|
||||
export { simpleQuery, streamingQuery } from './simple-query-service.js';
|
||||
export type {
|
||||
SimpleQueryOptions,
|
||||
SimpleQueryResult,
|
||||
StreamingQueryOptions,
|
||||
} from './simple-query-service.js';
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
254
apps/server/src/providers/simple-query-service.ts
Normal file
254
apps/server/src/providers/simple-query-service.ts
Normal file
@@ -0,0 +1,254 @@
|
||||
/**
|
||||
* Simple Query Service - Simplified interface for basic AI queries
|
||||
*
|
||||
* Use this for routes that need simple text responses without
|
||||
* complex event handling. This service abstracts away the provider
|
||||
* selection and streaming details, providing a clean interface
|
||||
* for common query patterns.
|
||||
*
|
||||
* Benefits:
|
||||
* - No direct SDK imports needed in route files
|
||||
* - Consistent provider routing based on model
|
||||
* - Automatic text extraction from streaming responses
|
||||
* - Structured output support for JSON schema responses
|
||||
* - Eliminates duplicate extractTextFromStream() functions
|
||||
*/
|
||||
|
||||
import { ProviderFactory } from './provider-factory.js';
|
||||
import type {
|
||||
ProviderMessage,
|
||||
ContentBlock,
|
||||
ThinkingLevel,
|
||||
ReasoningEffort,
|
||||
} from '@automaker/types';
|
||||
import { stripProviderPrefix } from '@automaker/types';
|
||||
|
||||
/**
|
||||
* Options for simple query execution
|
||||
*/
|
||||
export interface SimpleQueryOptions {
|
||||
/** The prompt to send to the AI (can be text or multi-part content) */
|
||||
prompt: string | Array<{ type: string; text?: string; source?: object }>;
|
||||
/** Model to use (with or without provider prefix) */
|
||||
model?: string;
|
||||
/** Working directory for the query */
|
||||
cwd: string;
|
||||
/** System prompt (combined with user prompt for some providers) */
|
||||
systemPrompt?: string;
|
||||
/** Maximum turns for agentic operations (default: 1) */
|
||||
maxTurns?: number;
|
||||
/** Tools to allow (default: [] for simple queries) */
|
||||
allowedTools?: string[];
|
||||
/** Abort controller for cancellation */
|
||||
abortController?: AbortController;
|
||||
/** Structured output format for JSON responses */
|
||||
outputFormat?: {
|
||||
type: 'json_schema';
|
||||
schema: Record<string, unknown>;
|
||||
};
|
||||
/** Thinking level for Claude models */
|
||||
thinkingLevel?: ThinkingLevel;
|
||||
/** Reasoning effort for Codex/OpenAI models */
|
||||
reasoningEffort?: ReasoningEffort;
|
||||
/** If true, runs in read-only mode (no file writes) */
|
||||
readOnly?: boolean;
|
||||
/** Setting sources for CLAUDE.md loading */
|
||||
settingSources?: Array<'user' | 'project' | 'local'>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Result from a simple query
|
||||
*/
|
||||
export interface SimpleQueryResult {
|
||||
/** The accumulated text response */
|
||||
text: string;
|
||||
/** Structured output if outputFormat was specified and provider supports it */
|
||||
structured_output?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Options for streaming query execution
|
||||
*/
|
||||
export interface StreamingQueryOptions extends SimpleQueryOptions {
|
||||
/** Callback for each text chunk received */
|
||||
onText?: (text: string) => void;
|
||||
/** Callback for tool use events */
|
||||
onToolUse?: (tool: string, input: unknown) => void;
|
||||
/** Callback for thinking blocks (if available) */
|
||||
onThinking?: (thinking: string) => void;
|
||||
}
|
||||
|
||||
/**
|
||||
* Default model to use when none specified
|
||||
*/
|
||||
const DEFAULT_MODEL = 'claude-sonnet-4-20250514';
|
||||
|
||||
/**
|
||||
* Execute a simple query and return the text result
|
||||
*
|
||||
* Use this for simple, non-streaming queries where you just need
|
||||
* the final text response. For more complex use cases with progress
|
||||
* callbacks, use streamingQuery() instead.
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const result = await simpleQuery({
|
||||
* prompt: 'Generate a title for: user authentication',
|
||||
* cwd: process.cwd(),
|
||||
* systemPrompt: 'You are a title generator...',
|
||||
* maxTurns: 1,
|
||||
* allowedTools: [],
|
||||
* });
|
||||
* console.log(result.text); // "Add user authentication"
|
||||
* ```
|
||||
*/
|
||||
export async function simpleQuery(options: SimpleQueryOptions): Promise<SimpleQueryResult> {
|
||||
const model = options.model || DEFAULT_MODEL;
|
||||
const provider = ProviderFactory.getProviderForModel(model);
|
||||
const bareModel = stripProviderPrefix(model);
|
||||
|
||||
let responseText = '';
|
||||
let structuredOutput: Record<string, unknown> | undefined;
|
||||
|
||||
// Build provider options
|
||||
const providerOptions = {
|
||||
prompt: options.prompt,
|
||||
model: bareModel,
|
||||
originalModel: model,
|
||||
cwd: options.cwd,
|
||||
systemPrompt: options.systemPrompt,
|
||||
maxTurns: options.maxTurns ?? 1,
|
||||
allowedTools: options.allowedTools ?? [],
|
||||
abortController: options.abortController,
|
||||
outputFormat: options.outputFormat,
|
||||
thinkingLevel: options.thinkingLevel,
|
||||
reasoningEffort: options.reasoningEffort,
|
||||
readOnly: options.readOnly,
|
||||
settingSources: options.settingSources,
|
||||
};
|
||||
|
||||
for await (const msg of provider.executeQuery(providerOptions)) {
|
||||
// Handle error messages
|
||||
if (msg.type === 'error') {
|
||||
const errorMessage = msg.error || 'Provider returned an error';
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
|
||||
// Extract text from assistant messages
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
responseText += block.text;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle result messages
|
||||
if (msg.type === 'result') {
|
||||
if (msg.subtype === 'success') {
|
||||
// Use result text if longer than accumulated text
|
||||
if (msg.result && msg.result.length > responseText.length) {
|
||||
responseText = msg.result;
|
||||
}
|
||||
// Capture structured output if present
|
||||
if (msg.structured_output) {
|
||||
structuredOutput = msg.structured_output;
|
||||
}
|
||||
} else if (msg.subtype === 'error_max_turns') {
|
||||
// Max turns reached - return what we have
|
||||
break;
|
||||
} else if (msg.subtype === 'error_max_structured_output_retries') {
|
||||
throw new Error('Could not produce valid structured output after retries');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { text: responseText, structured_output: structuredOutput };
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a streaming query with event callbacks
|
||||
*
|
||||
* Use this for queries where you need real-time progress updates,
|
||||
* such as when displaying streaming output to a user.
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const result = await streamingQuery({
|
||||
* prompt: 'Analyze this project and suggest improvements',
|
||||
* cwd: '/path/to/project',
|
||||
* maxTurns: 250,
|
||||
* allowedTools: ['Read', 'Glob', 'Grep'],
|
||||
* onText: (text) => emitProgress(text),
|
||||
* onToolUse: (tool, input) => emitToolUse(tool, input),
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
export async function streamingQuery(options: StreamingQueryOptions): Promise<SimpleQueryResult> {
|
||||
const model = options.model || DEFAULT_MODEL;
|
||||
const provider = ProviderFactory.getProviderForModel(model);
|
||||
const bareModel = stripProviderPrefix(model);
|
||||
|
||||
let responseText = '';
|
||||
let structuredOutput: Record<string, unknown> | undefined;
|
||||
|
||||
// Build provider options
|
||||
const providerOptions = {
|
||||
prompt: options.prompt,
|
||||
model: bareModel,
|
||||
originalModel: model,
|
||||
cwd: options.cwd,
|
||||
systemPrompt: options.systemPrompt,
|
||||
maxTurns: options.maxTurns ?? 250,
|
||||
allowedTools: options.allowedTools ?? ['Read', 'Glob', 'Grep'],
|
||||
abortController: options.abortController,
|
||||
outputFormat: options.outputFormat,
|
||||
thinkingLevel: options.thinkingLevel,
|
||||
reasoningEffort: options.reasoningEffort,
|
||||
readOnly: options.readOnly,
|
||||
settingSources: options.settingSources,
|
||||
};
|
||||
|
||||
for await (const msg of provider.executeQuery(providerOptions)) {
|
||||
// Handle error messages
|
||||
if (msg.type === 'error') {
|
||||
const errorMessage = msg.error || 'Provider returned an error';
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
|
||||
// Extract content from assistant messages
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
responseText += block.text;
|
||||
options.onText?.(block.text);
|
||||
} else if (block.type === 'tool_use' && block.name) {
|
||||
options.onToolUse?.(block.name, block.input);
|
||||
} else if (block.type === 'thinking' && block.thinking) {
|
||||
options.onThinking?.(block.thinking);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle result messages
|
||||
if (msg.type === 'result') {
|
||||
if (msg.subtype === 'success') {
|
||||
// Use result text if longer than accumulated text
|
||||
if (msg.result && msg.result.length > responseText.length) {
|
||||
responseText = msg.result;
|
||||
}
|
||||
// Capture structured output if present
|
||||
if (msg.structured_output) {
|
||||
structuredOutput = msg.structured_output;
|
||||
}
|
||||
} else if (msg.subtype === 'error_max_turns') {
|
||||
// Max turns reached - return what we have
|
||||
break;
|
||||
} else if (msg.subtype === 'error_max_structured_output_retries') {
|
||||
throw new Error('Could not produce valid structured output after retries');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { text: responseText, structured_output: structuredOutput };
|
||||
}
|
||||
@@ -6,26 +6,57 @@ import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('SpecRegeneration');
|
||||
|
||||
// Shared state for tracking generation status - private
|
||||
let isRunning = false;
|
||||
let currentAbortController: AbortController | null = null;
|
||||
// Shared state for tracking generation status - scoped by project path
|
||||
const runningProjects = new Map<string, boolean>();
|
||||
const abortControllers = new Map<string, AbortController>();
|
||||
|
||||
/**
|
||||
* Get the current running state
|
||||
* Get the running state for a specific project
|
||||
*/
|
||||
export function getSpecRegenerationStatus(): {
|
||||
export function getSpecRegenerationStatus(projectPath?: string): {
|
||||
isRunning: boolean;
|
||||
currentAbortController: AbortController | null;
|
||||
projectPath?: string;
|
||||
} {
|
||||
return { isRunning, currentAbortController };
|
||||
if (projectPath) {
|
||||
return {
|
||||
isRunning: runningProjects.get(projectPath) || false,
|
||||
currentAbortController: abortControllers.get(projectPath) || null,
|
||||
projectPath,
|
||||
};
|
||||
}
|
||||
// Fallback: check if any project is running (for backward compatibility)
|
||||
const isAnyRunning = Array.from(runningProjects.values()).some((running) => running);
|
||||
return { isRunning: isAnyRunning, currentAbortController: null };
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the running state and abort controller
|
||||
* Get the project path that is currently running (if any)
|
||||
*/
|
||||
export function setRunningState(running: boolean, controller: AbortController | null = null): void {
|
||||
isRunning = running;
|
||||
currentAbortController = controller;
|
||||
export function getRunningProjectPath(): string | null {
|
||||
for (const [path, running] of runningProjects.entries()) {
|
||||
if (running) return path;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the running state and abort controller for a specific project
|
||||
*/
|
||||
export function setRunningState(
|
||||
projectPath: string,
|
||||
running: boolean,
|
||||
controller: AbortController | null = null
|
||||
): void {
|
||||
if (running) {
|
||||
runningProjects.set(projectPath, true);
|
||||
if (controller) {
|
||||
abortControllers.set(projectPath, controller);
|
||||
}
|
||||
} else {
|
||||
runningProjects.delete(projectPath);
|
||||
abortControllers.delete(projectPath);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -5,15 +5,12 @@
|
||||
* (defaults to Sonnet for balanced speed and quality).
|
||||
*/
|
||||
|
||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
||||
import * as secureFs from '../../lib/secure-fs.js';
|
||||
import type { EventEmitter } from '../../lib/events.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
|
||||
import { DEFAULT_PHASE_MODELS } from '@automaker/types';
|
||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||
import { createFeatureGenerationOptions } from '../../lib/sdk-options.js';
|
||||
import { ProviderFactory } from '../../providers/provider-factory.js';
|
||||
import { logAuthStatus } from './common.js';
|
||||
import { streamingQuery } from '../../providers/simple-query-service.js';
|
||||
import { parseAndCreateFeatures } from './parse-and-create-features.js';
|
||||
import { getAppSpecPath } from '@automaker/platform';
|
||||
import type { SettingsService } from '../../services/settings-service.js';
|
||||
@@ -115,121 +112,30 @@ IMPORTANT: Do not ask for clarification. The specification is provided above. Ge
|
||||
|
||||
logger.info('Using model:', model);
|
||||
|
||||
let responseText = '';
|
||||
let messageCount = 0;
|
||||
// Use streamingQuery with event callbacks
|
||||
const result = await streamingQuery({
|
||||
prompt,
|
||||
model,
|
||||
cwd: projectPath,
|
||||
maxTurns: 250,
|
||||
allowedTools: ['Read', 'Glob', 'Grep'],
|
||||
abortController,
|
||||
thinkingLevel,
|
||||
readOnly: true, // Feature generation only reads code, doesn't write
|
||||
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
|
||||
onText: (text) => {
|
||||
logger.debug(`Feature text block received (${text.length} chars)`);
|
||||
events.emit('spec-regeneration:event', {
|
||||
type: 'spec_regeneration_progress',
|
||||
content: text,
|
||||
projectPath: projectPath,
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
// Route to appropriate provider based on model type
|
||||
if (isCursorModel(model)) {
|
||||
// Use Cursor provider for Cursor models
|
||||
logger.info('[FeatureGeneration] Using Cursor provider');
|
||||
const responseText = result.text;
|
||||
|
||||
const provider = ProviderFactory.getProviderForModel(model);
|
||||
// Strip provider prefix - providers expect bare model IDs
|
||||
const bareModel = stripProviderPrefix(model);
|
||||
|
||||
// Add explicit instructions for Cursor to return JSON in response
|
||||
const cursorPrompt = `${prompt}
|
||||
|
||||
CRITICAL INSTRUCTIONS:
|
||||
1. DO NOT write any files. Return the JSON in your response only.
|
||||
2. Respond with ONLY a JSON object - no explanations, no markdown, just raw JSON.
|
||||
3. Your entire response should be valid JSON starting with { and ending with }. No text before or after.`;
|
||||
|
||||
for await (const msg of provider.executeQuery({
|
||||
prompt: cursorPrompt,
|
||||
model: bareModel,
|
||||
cwd: projectPath,
|
||||
maxTurns: 250,
|
||||
allowedTools: ['Read', 'Glob', 'Grep'],
|
||||
abortController,
|
||||
readOnly: true, // Feature generation only reads code, doesn't write
|
||||
})) {
|
||||
messageCount++;
|
||||
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
responseText += block.text;
|
||||
logger.debug(`Feature text block received (${block.text.length} chars)`);
|
||||
events.emit('spec-regeneration:event', {
|
||||
type: 'spec_regeneration_progress',
|
||||
content: block.text,
|
||||
projectPath: projectPath,
|
||||
});
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
|
||||
// Use result if it's a final accumulated message
|
||||
if (msg.result.length > responseText.length) {
|
||||
responseText = msg.result;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Use Claude SDK for Claude models
|
||||
logger.info('[FeatureGeneration] Using Claude SDK');
|
||||
|
||||
const options = createFeatureGenerationOptions({
|
||||
cwd: projectPath,
|
||||
abortController,
|
||||
autoLoadClaudeMd,
|
||||
model,
|
||||
thinkingLevel, // Pass thinking level for extended thinking
|
||||
});
|
||||
|
||||
logger.debug('SDK Options:', JSON.stringify(options, null, 2));
|
||||
logger.info('Calling Claude Agent SDK query() for features...');
|
||||
|
||||
logAuthStatus('Right before SDK query() for features');
|
||||
|
||||
let stream;
|
||||
try {
|
||||
stream = query({ prompt, options });
|
||||
logger.debug('query() returned stream successfully');
|
||||
} catch (queryError) {
|
||||
logger.error('❌ query() threw an exception:');
|
||||
logger.error('Error:', queryError);
|
||||
throw queryError;
|
||||
}
|
||||
|
||||
logger.debug('Starting to iterate over feature stream...');
|
||||
|
||||
try {
|
||||
for await (const msg of stream) {
|
||||
messageCount++;
|
||||
logger.debug(
|
||||
`Feature stream message #${messageCount}:`,
|
||||
JSON.stringify({ type: msg.type, subtype: (msg as any).subtype }, null, 2)
|
||||
);
|
||||
|
||||
if (msg.type === 'assistant' && msg.message.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text') {
|
||||
responseText += block.text;
|
||||
logger.debug(`Feature text block received (${block.text.length} chars)`);
|
||||
events.emit('spec-regeneration:event', {
|
||||
type: 'spec_regeneration_progress',
|
||||
content: block.text,
|
||||
projectPath: projectPath,
|
||||
});
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && (msg as any).subtype === 'success') {
|
||||
logger.debug('Received success result for features');
|
||||
responseText = (msg as any).result || responseText;
|
||||
} else if ((msg as { type: string }).type === 'error') {
|
||||
logger.error('❌ Received error message from feature stream:');
|
||||
logger.error('Error message:', JSON.stringify(msg, null, 2));
|
||||
}
|
||||
}
|
||||
} catch (streamError) {
|
||||
logger.error('❌ Error while iterating feature stream:');
|
||||
logger.error('Stream error:', streamError);
|
||||
throw streamError;
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(`Feature stream complete. Total messages: ${messageCount}`);
|
||||
logger.info(`Feature stream complete.`);
|
||||
logger.info(`Feature response length: ${responseText.length} chars`);
|
||||
logger.info('========== FULL RESPONSE TEXT ==========');
|
||||
logger.info(responseText);
|
||||
|
||||
@@ -5,8 +5,6 @@
|
||||
* (defaults to Opus for high-quality specification generation).
|
||||
*/
|
||||
|
||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
||||
import path from 'path';
|
||||
import * as secureFs from '../../lib/secure-fs.js';
|
||||
import type { EventEmitter } from '../../lib/events.js';
|
||||
import {
|
||||
@@ -16,12 +14,10 @@ import {
|
||||
type SpecOutput,
|
||||
} from '../../lib/app-spec-format.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel } from '@automaker/types';
|
||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||
import { createSpecGenerationOptions } from '../../lib/sdk-options.js';
|
||||
import { extractJson } from '../../lib/json-extractor.js';
|
||||
import { ProviderFactory } from '../../providers/provider-factory.js';
|
||||
import { logAuthStatus } from './common.js';
|
||||
import { streamingQuery } from '../../providers/simple-query-service.js';
|
||||
import { generateFeaturesFromSpec } from './generate-features-from-spec.js';
|
||||
import { ensureAutomakerDir, getAppSpecPath } from '@automaker/platform';
|
||||
import type { SettingsService } from '../../services/settings-service.js';
|
||||
@@ -109,21 +105,15 @@ ${getStructuredSpecPromptInstruction()}`;
|
||||
logger.info('Using model:', model);
|
||||
|
||||
let responseText = '';
|
||||
let messageCount = 0;
|
||||
let structuredOutput: SpecOutput | null = null;
|
||||
|
||||
// Route to appropriate provider based on model type
|
||||
if (isCursorModel(model)) {
|
||||
// Use Cursor provider for Cursor models
|
||||
logger.info('[SpecGeneration] Using Cursor provider');
|
||||
// Determine if we should use structured output (Claude supports it, Cursor doesn't)
|
||||
const useStructuredOutput = !isCursorModel(model);
|
||||
|
||||
const provider = ProviderFactory.getProviderForModel(model);
|
||||
// Strip provider prefix - providers expect bare model IDs
|
||||
const bareModel = stripProviderPrefix(model);
|
||||
|
||||
// For Cursor, include the JSON schema in the prompt with clear instructions
|
||||
// to return JSON in the response (not write to a file)
|
||||
const cursorPrompt = `${prompt}
|
||||
// Build the final prompt - for Cursor, include JSON schema instructions
|
||||
let finalPrompt = prompt;
|
||||
if (!useStructuredOutput) {
|
||||
finalPrompt = `${prompt}
|
||||
|
||||
CRITICAL INSTRUCTIONS:
|
||||
1. DO NOT write any files. DO NOT create any files like "project_specification.json".
|
||||
@@ -133,153 +123,57 @@ CRITICAL INSTRUCTIONS:
|
||||
${JSON.stringify(specOutputSchema, null, 2)}
|
||||
|
||||
Your entire response should be valid JSON starting with { and ending with }. No text before or after.`;
|
||||
|
||||
for await (const msg of provider.executeQuery({
|
||||
prompt: cursorPrompt,
|
||||
model: bareModel,
|
||||
cwd: projectPath,
|
||||
maxTurns: 250,
|
||||
allowedTools: ['Read', 'Glob', 'Grep'],
|
||||
abortController,
|
||||
readOnly: true, // Spec generation only reads code, we write the spec ourselves
|
||||
})) {
|
||||
messageCount++;
|
||||
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
responseText += block.text;
|
||||
logger.info(
|
||||
`Text block received (${block.text.length} chars), total now: ${responseText.length} chars`
|
||||
);
|
||||
events.emit('spec-regeneration:event', {
|
||||
type: 'spec_regeneration_progress',
|
||||
content: block.text,
|
||||
projectPath: projectPath,
|
||||
});
|
||||
} else if (block.type === 'tool_use') {
|
||||
logger.info('Tool use:', block.name);
|
||||
events.emit('spec-regeneration:event', {
|
||||
type: 'spec_tool',
|
||||
tool: block.name,
|
||||
input: block.input,
|
||||
});
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
|
||||
// Use result if it's a final accumulated message
|
||||
if (msg.result.length > responseText.length) {
|
||||
responseText = msg.result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse JSON from the response text using shared utility
|
||||
if (responseText) {
|
||||
structuredOutput = extractJson<SpecOutput>(responseText, { logger });
|
||||
}
|
||||
} else {
|
||||
// Use Claude SDK for Claude models
|
||||
logger.info('[SpecGeneration] Using Claude SDK');
|
||||
|
||||
const options = createSpecGenerationOptions({
|
||||
cwd: projectPath,
|
||||
abortController,
|
||||
autoLoadClaudeMd,
|
||||
model,
|
||||
thinkingLevel, // Pass thinking level for extended thinking
|
||||
outputFormat: {
|
||||
type: 'json_schema',
|
||||
schema: specOutputSchema,
|
||||
},
|
||||
});
|
||||
|
||||
logger.debug('SDK Options:', JSON.stringify(options, null, 2));
|
||||
logger.info('Calling Claude Agent SDK query()...');
|
||||
|
||||
// Log auth status right before the SDK call
|
||||
logAuthStatus('Right before SDK query()');
|
||||
|
||||
let stream;
|
||||
try {
|
||||
stream = query({ prompt, options });
|
||||
logger.debug('query() returned stream successfully');
|
||||
} catch (queryError) {
|
||||
logger.error('❌ query() threw an exception:');
|
||||
logger.error('Error:', queryError);
|
||||
throw queryError;
|
||||
}
|
||||
|
||||
logger.info('Starting to iterate over stream...');
|
||||
|
||||
try {
|
||||
for await (const msg of stream) {
|
||||
messageCount++;
|
||||
logger.info(
|
||||
`Stream message #${messageCount}: type=${msg.type}, subtype=${(msg as any).subtype}`
|
||||
);
|
||||
|
||||
if (msg.type === 'assistant') {
|
||||
const msgAny = msg as any;
|
||||
if (msgAny.message?.content) {
|
||||
for (const block of msgAny.message.content) {
|
||||
if (block.type === 'text') {
|
||||
responseText += block.text;
|
||||
logger.info(
|
||||
`Text block received (${block.text.length} chars), total now: ${responseText.length} chars`
|
||||
);
|
||||
events.emit('spec-regeneration:event', {
|
||||
type: 'spec_regeneration_progress',
|
||||
content: block.text,
|
||||
projectPath: projectPath,
|
||||
});
|
||||
} else if (block.type === 'tool_use') {
|
||||
logger.info('Tool use:', block.name);
|
||||
events.emit('spec-regeneration:event', {
|
||||
type: 'spec_tool',
|
||||
tool: block.name,
|
||||
input: block.input,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && (msg as any).subtype === 'success') {
|
||||
logger.info('Received success result');
|
||||
// Check for structured output - this is the reliable way to get spec data
|
||||
const resultMsg = msg as any;
|
||||
if (resultMsg.structured_output) {
|
||||
structuredOutput = resultMsg.structured_output as SpecOutput;
|
||||
logger.info('✅ Received structured output');
|
||||
logger.debug('Structured output:', JSON.stringify(structuredOutput, null, 2));
|
||||
} else {
|
||||
logger.warn('⚠️ No structured output in result, will fall back to text parsing');
|
||||
}
|
||||
} else if (msg.type === 'result') {
|
||||
// Handle error result types
|
||||
const subtype = (msg as any).subtype;
|
||||
logger.info(`Result message: subtype=${subtype}`);
|
||||
if (subtype === 'error_max_turns') {
|
||||
logger.error('❌ Hit max turns limit!');
|
||||
} else if (subtype === 'error_max_structured_output_retries') {
|
||||
logger.error('❌ Failed to produce valid structured output after retries');
|
||||
throw new Error('Could not produce valid spec output');
|
||||
}
|
||||
} else if ((msg as { type: string }).type === 'error') {
|
||||
logger.error('❌ Received error message from stream:');
|
||||
logger.error('Error message:', JSON.stringify(msg, null, 2));
|
||||
} else if (msg.type === 'user') {
|
||||
// Log user messages (tool results)
|
||||
logger.info(`User message (tool result): ${JSON.stringify(msg).substring(0, 500)}`);
|
||||
}
|
||||
}
|
||||
} catch (streamError) {
|
||||
logger.error('❌ Error while iterating stream:');
|
||||
logger.error('Stream error:', streamError);
|
||||
throw streamError;
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(`Stream iteration complete. Total messages: ${messageCount}`);
|
||||
// Use streamingQuery with event callbacks
|
||||
const result = await streamingQuery({
|
||||
prompt: finalPrompt,
|
||||
model,
|
||||
cwd: projectPath,
|
||||
maxTurns: 250,
|
||||
allowedTools: ['Read', 'Glob', 'Grep'],
|
||||
abortController,
|
||||
thinkingLevel,
|
||||
readOnly: true, // Spec generation only reads code, we write the spec ourselves
|
||||
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
|
||||
outputFormat: useStructuredOutput
|
||||
? {
|
||||
type: 'json_schema',
|
||||
schema: specOutputSchema,
|
||||
}
|
||||
: undefined,
|
||||
onText: (text) => {
|
||||
responseText += text;
|
||||
logger.info(
|
||||
`Text block received (${text.length} chars), total now: ${responseText.length} chars`
|
||||
);
|
||||
events.emit('spec-regeneration:event', {
|
||||
type: 'spec_regeneration_progress',
|
||||
content: text,
|
||||
projectPath: projectPath,
|
||||
});
|
||||
},
|
||||
onToolUse: (tool, input) => {
|
||||
logger.info('Tool use:', tool);
|
||||
events.emit('spec-regeneration:event', {
|
||||
type: 'spec_tool',
|
||||
tool,
|
||||
input,
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
// Get structured output if available
|
||||
if (result.structured_output) {
|
||||
structuredOutput = result.structured_output as unknown as SpecOutput;
|
||||
logger.info('✅ Received structured output');
|
||||
logger.debug('Structured output:', JSON.stringify(structuredOutput, null, 2));
|
||||
} else if (!useStructuredOutput && responseText) {
|
||||
// For non-Claude providers, parse JSON from response text
|
||||
structuredOutput = extractJson<SpecOutput>(responseText, { logger });
|
||||
}
|
||||
|
||||
logger.info(`Stream iteration complete.`);
|
||||
logger.info(`Response text length: ${responseText.length} chars`);
|
||||
|
||||
// Determine XML content to save
|
||||
|
||||
@@ -47,17 +47,17 @@ export function createCreateHandler(events: EventEmitter) {
|
||||
return;
|
||||
}
|
||||
|
||||
const { isRunning } = getSpecRegenerationStatus();
|
||||
const { isRunning } = getSpecRegenerationStatus(projectPath);
|
||||
if (isRunning) {
|
||||
logger.warn('Generation already running, rejecting request');
|
||||
res.json({ success: false, error: 'Spec generation already running' });
|
||||
logger.warn('Generation already running for project:', projectPath);
|
||||
res.json({ success: false, error: 'Spec generation already running for this project' });
|
||||
return;
|
||||
}
|
||||
|
||||
logAuthStatus('Before starting generation');
|
||||
|
||||
const abortController = new AbortController();
|
||||
setRunningState(true, abortController);
|
||||
setRunningState(projectPath, true, abortController);
|
||||
logger.info('Starting background generation task...');
|
||||
|
||||
// Start generation in background
|
||||
@@ -80,7 +80,7 @@ export function createCreateHandler(events: EventEmitter) {
|
||||
})
|
||||
.finally(() => {
|
||||
logger.info('Generation task finished (success or error)');
|
||||
setRunningState(false, null);
|
||||
setRunningState(projectPath, false, null);
|
||||
});
|
||||
|
||||
logger.info('Returning success response (generation running in background)');
|
||||
|
||||
@@ -40,17 +40,17 @@ export function createGenerateFeaturesHandler(
|
||||
return;
|
||||
}
|
||||
|
||||
const { isRunning } = getSpecRegenerationStatus();
|
||||
const { isRunning } = getSpecRegenerationStatus(projectPath);
|
||||
if (isRunning) {
|
||||
logger.warn('Generation already running, rejecting request');
|
||||
res.json({ success: false, error: 'Generation already running' });
|
||||
logger.warn('Generation already running for project:', projectPath);
|
||||
res.json({ success: false, error: 'Generation already running for this project' });
|
||||
return;
|
||||
}
|
||||
|
||||
logAuthStatus('Before starting feature generation');
|
||||
|
||||
const abortController = new AbortController();
|
||||
setRunningState(true, abortController);
|
||||
setRunningState(projectPath, true, abortController);
|
||||
logger.info('Starting background feature generation task...');
|
||||
|
||||
generateFeaturesFromSpec(projectPath, events, abortController, maxFeatures, settingsService)
|
||||
@@ -63,7 +63,7 @@ export function createGenerateFeaturesHandler(
|
||||
})
|
||||
.finally(() => {
|
||||
logger.info('Feature generation task finished (success or error)');
|
||||
setRunningState(false, null);
|
||||
setRunningState(projectPath, false, null);
|
||||
});
|
||||
|
||||
logger.info('Returning success response (generation running in background)');
|
||||
|
||||
@@ -48,17 +48,17 @@ export function createGenerateHandler(events: EventEmitter, settingsService?: Se
|
||||
return;
|
||||
}
|
||||
|
||||
const { isRunning } = getSpecRegenerationStatus();
|
||||
const { isRunning } = getSpecRegenerationStatus(projectPath);
|
||||
if (isRunning) {
|
||||
logger.warn('Generation already running, rejecting request');
|
||||
res.json({ success: false, error: 'Spec generation already running' });
|
||||
logger.warn('Generation already running for project:', projectPath);
|
||||
res.json({ success: false, error: 'Spec generation already running for this project' });
|
||||
return;
|
||||
}
|
||||
|
||||
logAuthStatus('Before starting generation');
|
||||
|
||||
const abortController = new AbortController();
|
||||
setRunningState(true, abortController);
|
||||
setRunningState(projectPath, true, abortController);
|
||||
logger.info('Starting background generation task...');
|
||||
|
||||
generateSpec(
|
||||
@@ -81,7 +81,7 @@ export function createGenerateHandler(events: EventEmitter, settingsService?: Se
|
||||
})
|
||||
.finally(() => {
|
||||
logger.info('Generation task finished (success or error)');
|
||||
setRunningState(false, null);
|
||||
setRunningState(projectPath, false, null);
|
||||
});
|
||||
|
||||
logger.info('Returning success response (generation running in background)');
|
||||
|
||||
@@ -6,10 +6,11 @@ import type { Request, Response } from 'express';
|
||||
import { getSpecRegenerationStatus, getErrorMessage } from '../common.js';
|
||||
|
||||
export function createStatusHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { isRunning } = getSpecRegenerationStatus();
|
||||
res.json({ success: true, isRunning });
|
||||
const projectPath = req.query.projectPath as string | undefined;
|
||||
const { isRunning } = getSpecRegenerationStatus(projectPath);
|
||||
res.json({ success: true, isRunning, projectPath });
|
||||
} catch (error) {
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
}
|
||||
|
||||
@@ -6,13 +6,16 @@ import type { Request, Response } from 'express';
|
||||
import { getSpecRegenerationStatus, setRunningState, getErrorMessage } from '../common.js';
|
||||
|
||||
export function createStopHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { currentAbortController } = getSpecRegenerationStatus();
|
||||
const { projectPath } = req.body as { projectPath?: string };
|
||||
const { currentAbortController } = getSpecRegenerationStatus(projectPath);
|
||||
if (currentAbortController) {
|
||||
currentAbortController.abort();
|
||||
}
|
||||
setRunningState(false, null);
|
||||
if (projectPath) {
|
||||
setRunningState(projectPath, false, null);
|
||||
}
|
||||
res.json({ success: true });
|
||||
} catch (error) {
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
|
||||
@@ -17,6 +17,7 @@ import { createAnalyzeProjectHandler } from './routes/analyze-project.js';
|
||||
import { createFollowUpFeatureHandler } from './routes/follow-up-feature.js';
|
||||
import { createCommitFeatureHandler } from './routes/commit-feature.js';
|
||||
import { createApprovePlanHandler } from './routes/approve-plan.js';
|
||||
import { createResumeInterruptedHandler } from './routes/resume-interrupted.js';
|
||||
|
||||
export function createAutoModeRoutes(autoModeService: AutoModeService): Router {
|
||||
const router = Router();
|
||||
@@ -63,6 +64,11 @@ export function createAutoModeRoutes(autoModeService: AutoModeService): Router {
|
||||
validatePathParams('projectPath'),
|
||||
createApprovePlanHandler(autoModeService)
|
||||
);
|
||||
router.post(
|
||||
'/resume-interrupted',
|
||||
validatePathParams('projectPath'),
|
||||
createResumeInterruptedHandler(autoModeService)
|
||||
);
|
||||
|
||||
return router;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
/**
|
||||
* Resume Interrupted Features Handler
|
||||
*
|
||||
* Checks for features that were interrupted (in pipeline steps or in_progress)
|
||||
* when the server was restarted and resumes them.
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
||||
|
||||
const logger = createLogger('ResumeInterrupted');
|
||||
|
||||
interface ResumeInterruptedRequest {
|
||||
projectPath: string;
|
||||
}
|
||||
|
||||
export function createResumeInterruptedHandler(autoModeService: AutoModeService) {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
const { projectPath } = req.body as ResumeInterruptedRequest;
|
||||
|
||||
if (!projectPath) {
|
||||
res.status(400).json({ error: 'Project path is required' });
|
||||
return;
|
||||
}
|
||||
|
||||
logger.info(`Checking for interrupted features in ${projectPath}`);
|
||||
|
||||
try {
|
||||
await autoModeService.resumeInterruptedFeatures(projectPath);
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'Resume check completed',
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Error resuming interrupted features:', error);
|
||||
res.status(500).json({
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -12,11 +12,22 @@ const featureLoader = new FeatureLoader();
|
||||
export function createApplyHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, plan } = req.body as {
|
||||
const {
|
||||
projectPath,
|
||||
plan,
|
||||
branchName: rawBranchName,
|
||||
} = req.body as {
|
||||
projectPath: string;
|
||||
plan: BacklogPlanResult;
|
||||
branchName?: string;
|
||||
};
|
||||
|
||||
// Validate branchName: must be undefined or a non-empty trimmed string
|
||||
const branchName =
|
||||
typeof rawBranchName === 'string' && rawBranchName.trim().length > 0
|
||||
? rawBranchName.trim()
|
||||
: undefined;
|
||||
|
||||
if (!projectPath) {
|
||||
res.status(400).json({ success: false, error: 'projectPath required' });
|
||||
return;
|
||||
@@ -82,6 +93,7 @@ export function createApplyHandler() {
|
||||
dependencies: change.feature.dependencies,
|
||||
priority: change.feature.priority,
|
||||
status: 'backlog',
|
||||
branchName,
|
||||
});
|
||||
|
||||
appliedChanges.push(`added:${newFeature.id}`);
|
||||
|
||||
@@ -34,6 +34,13 @@ export function createClaudeRoutes(service: ClaudeUsageService): Router {
|
||||
error: 'Authentication required',
|
||||
message: "Please run 'claude login' to authenticate",
|
||||
});
|
||||
} else if (message.includes('TRUST_PROMPT_PENDING')) {
|
||||
// Trust prompt appeared but couldn't be auto-approved
|
||||
res.status(200).json({
|
||||
error: 'Trust prompt pending',
|
||||
message:
|
||||
'Claude CLI needs folder permission. Please run "claude" in your terminal and approve access.',
|
||||
});
|
||||
} else if (message.includes('timed out')) {
|
||||
res.status(200).json({
|
||||
error: 'Command timed out',
|
||||
|
||||
@@ -1,17 +1,21 @@
|
||||
import { Router, Request, Response } from 'express';
|
||||
import { CodexUsageService } from '../../services/codex-usage-service.js';
|
||||
import { CodexModelCacheService } from '../../services/codex-model-cache-service.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('Codex');
|
||||
|
||||
export function createCodexRoutes(service: CodexUsageService): Router {
|
||||
export function createCodexRoutes(
|
||||
usageService: CodexUsageService,
|
||||
modelCacheService: CodexModelCacheService
|
||||
): Router {
|
||||
const router = Router();
|
||||
|
||||
// Get current usage (attempts to fetch from Codex CLI)
|
||||
router.get('/usage', async (req: Request, res: Response) => {
|
||||
router.get('/usage', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
// Check if Codex CLI is available first
|
||||
const isAvailable = await service.isAvailable();
|
||||
const isAvailable = await usageService.isAvailable();
|
||||
if (!isAvailable) {
|
||||
// IMPORTANT: This endpoint is behind Automaker session auth already.
|
||||
// Use a 200 + error payload for Codex CLI issues so the UI doesn't
|
||||
@@ -23,7 +27,7 @@ export function createCodexRoutes(service: CodexUsageService): Router {
|
||||
return;
|
||||
}
|
||||
|
||||
const usage = await service.fetchUsageData();
|
||||
const usage = await usageService.fetchUsageData();
|
||||
res.json(usage);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||
@@ -52,5 +56,35 @@ export function createCodexRoutes(service: CodexUsageService): Router {
|
||||
}
|
||||
});
|
||||
|
||||
// Get available Codex models (cached)
|
||||
router.get('/models', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const forceRefresh = req.query.refresh === 'true';
|
||||
const { models, cachedAt } = await modelCacheService.getModelsWithMetadata(forceRefresh);
|
||||
|
||||
if (models.length === 0) {
|
||||
res.status(503).json({
|
||||
success: false,
|
||||
error: 'Codex CLI not available or not authenticated',
|
||||
message: "Please install Codex CLI and run 'codex login' to authenticate",
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
models,
|
||||
cachedAt,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Error fetching models:', error);
|
||||
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: message,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return router;
|
||||
}
|
||||
|
||||
@@ -11,13 +11,11 @@
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
|
||||
import { DEFAULT_PHASE_MODELS } from '@automaker/types';
|
||||
import { PathNotAllowedError } from '@automaker/platform';
|
||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||
import { createCustomOptions } from '../../../lib/sdk-options.js';
|
||||
import { ProviderFactory } from '../../../providers/provider-factory.js';
|
||||
import { simpleQuery } from '../../../providers/simple-query-service.js';
|
||||
import * as secureFs from '../../../lib/secure-fs.js';
|
||||
import * as path from 'path';
|
||||
import type { SettingsService } from '../../../services/settings-service.js';
|
||||
@@ -49,31 +47,6 @@ interface DescribeFileErrorResponse {
|
||||
error: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract text content from Claude SDK response messages
|
||||
*/
|
||||
async function extractTextFromStream(
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
stream: AsyncIterable<any>
|
||||
): Promise<string> {
|
||||
let responseText = '';
|
||||
|
||||
for await (const msg of stream) {
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
const blocks = msg.message.content as Array<{ type: string; text?: string }>;
|
||||
for (const block of blocks) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
responseText += block.text;
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success') {
|
||||
responseText = msg.result || responseText;
|
||||
}
|
||||
}
|
||||
|
||||
return responseText;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the describe-file request handler
|
||||
*
|
||||
@@ -159,16 +132,14 @@ export function createDescribeFileHandler(
|
||||
|
||||
// Build prompt with file content passed as structured data
|
||||
// The file content is included directly, not via tool invocation
|
||||
const instructionText = `Analyze the following file and provide a 1-2 sentence description suitable for use as context in an AI coding assistant. Focus on what the file contains, its purpose, and why an AI agent might want to use this context in the future (e.g., "API documentation for the authentication endpoints", "Configuration file for database connections", "Coding style guidelines for the project").
|
||||
const prompt = `Analyze the following file and provide a 1-2 sentence description suitable for use as context in an AI coding assistant. Focus on what the file contains, its purpose, and why an AI agent might want to use this context in the future (e.g., "API documentation for the authentication endpoints", "Configuration file for database connections", "Coding style guidelines for the project").
|
||||
|
||||
Respond with ONLY the description text, no additional formatting, preamble, or explanation.
|
||||
|
||||
File: ${fileName}${truncated ? ' (truncated)' : ''}`;
|
||||
File: ${fileName}${truncated ? ' (truncated)' : ''}
|
||||
|
||||
const promptContent = [
|
||||
{ type: 'text' as const, text: instructionText },
|
||||
{ type: 'text' as const, text: `\n\n--- FILE CONTENT ---\n${contentToAnalyze}` },
|
||||
];
|
||||
--- FILE CONTENT ---
|
||||
${contentToAnalyze}`;
|
||||
|
||||
// Use the file's directory as the working directory
|
||||
const cwd = path.dirname(resolvedPath);
|
||||
@@ -190,67 +161,19 @@ File: ${fileName}${truncated ? ' (truncated)' : ''}`;
|
||||
|
||||
logger.info(`Resolved model: ${model}, thinkingLevel: ${thinkingLevel}`);
|
||||
|
||||
let description: string;
|
||||
// Use simpleQuery - provider abstraction handles routing to correct provider
|
||||
const result = await simpleQuery({
|
||||
prompt,
|
||||
model,
|
||||
cwd,
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
thinkingLevel,
|
||||
readOnly: true, // File description only reads, doesn't write
|
||||
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
|
||||
});
|
||||
|
||||
// Route to appropriate provider based on model type
|
||||
if (isCursorModel(model)) {
|
||||
// Use Cursor provider for Cursor models
|
||||
logger.info(`Using Cursor provider for model: ${model}`);
|
||||
|
||||
const provider = ProviderFactory.getProviderForModel(model);
|
||||
// Strip provider prefix - providers expect bare model IDs
|
||||
const bareModel = stripProviderPrefix(model);
|
||||
|
||||
// Build a simple text prompt for Cursor (no multi-part content blocks)
|
||||
const cursorPrompt = `${instructionText}\n\n--- FILE CONTENT ---\n${contentToAnalyze}`;
|
||||
|
||||
let responseText = '';
|
||||
for await (const msg of provider.executeQuery({
|
||||
prompt: cursorPrompt,
|
||||
model: bareModel,
|
||||
cwd,
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
readOnly: true, // File description only reads, doesn't write
|
||||
})) {
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
responseText += block.text;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
description = responseText;
|
||||
} else {
|
||||
// Use Claude SDK for Claude models
|
||||
logger.info(`Using Claude SDK for model: ${model}`);
|
||||
|
||||
// Use centralized SDK options with proper cwd validation
|
||||
// No tools needed since we're passing file content directly
|
||||
const sdkOptions = createCustomOptions({
|
||||
cwd,
|
||||
model,
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
autoLoadClaudeMd,
|
||||
thinkingLevel, // Pass thinking level for extended thinking
|
||||
});
|
||||
|
||||
const promptGenerator = (async function* () {
|
||||
yield {
|
||||
type: 'user' as const,
|
||||
session_id: '',
|
||||
message: { role: 'user' as const, content: promptContent },
|
||||
parent_tool_use_id: null,
|
||||
};
|
||||
})();
|
||||
|
||||
const stream = query({ prompt: promptGenerator, options: sdkOptions });
|
||||
|
||||
// Extract the description from the response
|
||||
description = await extractTextFromStream(stream);
|
||||
}
|
||||
const description = result.text;
|
||||
|
||||
if (!description || description.trim().length === 0) {
|
||||
logger.warn('Received empty response from Claude');
|
||||
|
||||
@@ -12,12 +12,10 @@
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
||||
import { createLogger, readImageAsBase64 } from '@automaker/utils';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel } from '@automaker/types';
|
||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||
import { createCustomOptions } from '../../../lib/sdk-options.js';
|
||||
import { ProviderFactory } from '../../../providers/provider-factory.js';
|
||||
import { simpleQuery } from '../../../providers/simple-query-service.js';
|
||||
import * as secureFs from '../../../lib/secure-fs.js';
|
||||
import * as path from 'path';
|
||||
import type { SettingsService } from '../../../services/settings-service.js';
|
||||
@@ -178,57 +176,10 @@ function mapDescribeImageError(rawMessage: string | undefined): {
|
||||
return baseResponse;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract text content from Claude SDK response messages and log high-signal stream events.
|
||||
*/
|
||||
async function extractTextFromStream(
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
stream: AsyncIterable<any>,
|
||||
requestId: string
|
||||
): Promise<string> {
|
||||
let responseText = '';
|
||||
let messageCount = 0;
|
||||
|
||||
logger.info(`[${requestId}] [Stream] Begin reading SDK stream...`);
|
||||
|
||||
for await (const msg of stream) {
|
||||
messageCount++;
|
||||
const msgType = msg?.type;
|
||||
const msgSubtype = msg?.subtype;
|
||||
|
||||
// Keep this concise but informative. Full error object is logged in catch blocks.
|
||||
logger.info(
|
||||
`[${requestId}] [Stream] #${messageCount} type=${String(msgType)} subtype=${String(msgSubtype ?? '')}`
|
||||
);
|
||||
|
||||
if (msgType === 'assistant' && msg.message?.content) {
|
||||
const blocks = msg.message.content as Array<{ type: string; text?: string }>;
|
||||
logger.info(`[${requestId}] [Stream] assistant blocks=${blocks.length}`);
|
||||
for (const block of blocks) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
responseText += block.text;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (msgType === 'result' && msgSubtype === 'success') {
|
||||
if (typeof msg.result === 'string' && msg.result.length > 0) {
|
||||
responseText = msg.result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`[${requestId}] [Stream] End of stream. messages=${messageCount} textLength=${responseText.length}`
|
||||
);
|
||||
|
||||
return responseText;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the describe-image request handler
|
||||
*
|
||||
* Uses Claude SDK query with multi-part content blocks to include the image (base64),
|
||||
* Uses the provider abstraction with multi-part content blocks to include the image (base64),
|
||||
* matching the agent runner behavior.
|
||||
*
|
||||
* @param settingsService - Optional settings service for loading autoLoadClaudeMd setting
|
||||
@@ -309,27 +260,6 @@ export function createDescribeImageHandler(
|
||||
`[${requestId}] image meta filename=${imageData.filename} mime=${imageData.mimeType} base64Len=${base64Length} estBytes=${estimatedBytes}`
|
||||
);
|
||||
|
||||
// Build multi-part prompt with image block (no Read tool required)
|
||||
const instructionText =
|
||||
`Describe this image in 1-2 sentences suitable for use as context in an AI coding assistant. ` +
|
||||
`Focus on what the image shows and its purpose (e.g., "UI mockup showing login form with email/password fields", ` +
|
||||
`"Architecture diagram of microservices", "Screenshot of error message in terminal").\n\n` +
|
||||
`Respond with ONLY the description text, no additional formatting, preamble, or explanation.`;
|
||||
|
||||
const promptContent = [
|
||||
{ type: 'text' as const, text: instructionText },
|
||||
{
|
||||
type: 'image' as const,
|
||||
source: {
|
||||
type: 'base64' as const,
|
||||
media_type: imageData.mimeType,
|
||||
data: imageData.base64,
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
logger.info(`[${requestId}] Built multi-part prompt blocks=${promptContent.length}`);
|
||||
|
||||
const cwd = path.dirname(actualPath);
|
||||
logger.info(`[${requestId}] Using cwd=${cwd}`);
|
||||
|
||||
@@ -348,85 +278,59 @@ export function createDescribeImageHandler(
|
||||
|
||||
logger.info(`[${requestId}] Using model: ${model}`);
|
||||
|
||||
let description: string;
|
||||
// Build the instruction text
|
||||
const instructionText =
|
||||
`Describe this image in 1-2 sentences suitable for use as context in an AI coding assistant. ` +
|
||||
`Focus on what the image shows and its purpose (e.g., "UI mockup showing login form with email/password fields", ` +
|
||||
`"Architecture diagram of microservices", "Screenshot of error message in terminal").\n\n` +
|
||||
`Respond with ONLY the description text, no additional formatting, preamble, or explanation.`;
|
||||
|
||||
// Build prompt based on provider capability
|
||||
// Some providers (like Cursor) may not support image content blocks
|
||||
let prompt: string | Array<{ type: string; text?: string; source?: object }>;
|
||||
|
||||
// Route to appropriate provider based on model type
|
||||
if (isCursorModel(model)) {
|
||||
// Use Cursor provider for Cursor models
|
||||
// Note: Cursor may have limited support for image content blocks
|
||||
logger.info(`[${requestId}] Using Cursor provider for model: ${model}`);
|
||||
|
||||
const provider = ProviderFactory.getProviderForModel(model);
|
||||
// Strip provider prefix - providers expect bare model IDs
|
||||
const bareModel = stripProviderPrefix(model);
|
||||
|
||||
// Build prompt with image reference for Cursor
|
||||
// Note: Cursor CLI may not support base64 image blocks directly,
|
||||
// so we include the image path as context
|
||||
const cursorPrompt = `${instructionText}\n\nImage file: ${actualPath}\nMIME type: ${imageData.mimeType}`;
|
||||
|
||||
let responseText = '';
|
||||
const queryStart = Date.now();
|
||||
for await (const msg of provider.executeQuery({
|
||||
prompt: cursorPrompt,
|
||||
model: bareModel,
|
||||
cwd,
|
||||
maxTurns: 1,
|
||||
allowedTools: ['Read'], // Allow Read tool so Cursor can read the image if needed
|
||||
readOnly: true, // Image description only reads, doesn't write
|
||||
})) {
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
responseText += block.text;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.info(`[${requestId}] Cursor query completed in ${Date.now() - queryStart}ms`);
|
||||
description = responseText;
|
||||
// Cursor may not support base64 image blocks directly
|
||||
// Use text prompt with image path reference
|
||||
logger.info(`[${requestId}] Using text prompt for Cursor model`);
|
||||
prompt = `${instructionText}\n\nImage file: ${actualPath}\nMIME type: ${imageData.mimeType}`;
|
||||
} else {
|
||||
// Use Claude SDK for Claude models (supports image content blocks)
|
||||
logger.info(`[${requestId}] Using Claude SDK for model: ${model}`);
|
||||
|
||||
// Use the same centralized option builder used across the server (validates cwd)
|
||||
const sdkOptions = createCustomOptions({
|
||||
cwd,
|
||||
model,
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
autoLoadClaudeMd,
|
||||
thinkingLevel, // Pass thinking level for extended thinking
|
||||
});
|
||||
|
||||
logger.info(
|
||||
`[${requestId}] SDK options model=${sdkOptions.model} maxTurns=${sdkOptions.maxTurns} allowedTools=${JSON.stringify(
|
||||
sdkOptions.allowedTools
|
||||
)}`
|
||||
);
|
||||
|
||||
const promptGenerator = (async function* () {
|
||||
yield {
|
||||
type: 'user' as const,
|
||||
session_id: '',
|
||||
message: { role: 'user' as const, content: promptContent },
|
||||
parent_tool_use_id: null,
|
||||
};
|
||||
})();
|
||||
|
||||
logger.info(`[${requestId}] Calling query()...`);
|
||||
const queryStart = Date.now();
|
||||
const stream = query({ prompt: promptGenerator, options: sdkOptions });
|
||||
logger.info(`[${requestId}] query() returned stream in ${Date.now() - queryStart}ms`);
|
||||
|
||||
// Extract the description from the response
|
||||
const extractStart = Date.now();
|
||||
description = await extractTextFromStream(stream, requestId);
|
||||
logger.info(`[${requestId}] extractMs=${Date.now() - extractStart}`);
|
||||
// Claude and other vision-capable models support multi-part prompts with images
|
||||
logger.info(`[${requestId}] Using multi-part prompt with image block`);
|
||||
prompt = [
|
||||
{ type: 'text', text: instructionText },
|
||||
{
|
||||
type: 'image',
|
||||
source: {
|
||||
type: 'base64',
|
||||
media_type: imageData.mimeType,
|
||||
data: imageData.base64,
|
||||
},
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
logger.info(`[${requestId}] Calling simpleQuery...`);
|
||||
const queryStart = Date.now();
|
||||
|
||||
// Use simpleQuery - provider abstraction handles routing
|
||||
const result = await simpleQuery({
|
||||
prompt,
|
||||
model,
|
||||
cwd,
|
||||
maxTurns: 1,
|
||||
allowedTools: isCursorModel(model) ? ['Read'] : [], // Allow Read for Cursor to read image if needed
|
||||
thinkingLevel,
|
||||
readOnly: true, // Image description only reads, doesn't write
|
||||
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
|
||||
});
|
||||
|
||||
logger.info(`[${requestId}] simpleQuery completed in ${Date.now() - queryStart}ms`);
|
||||
|
||||
const description = result.text;
|
||||
|
||||
if (!description || description.trim().length === 0) {
|
||||
logger.warn(`[${requestId}] Received empty response from Claude`);
|
||||
logger.warn(`[${requestId}] Received empty response from AI`);
|
||||
const response: DescribeImageErrorResponse = {
|
||||
success: false,
|
||||
error: 'Failed to generate description - empty response',
|
||||
|
||||
@@ -1,22 +1,16 @@
|
||||
/**
|
||||
* POST /enhance-prompt endpoint - Enhance user input text
|
||||
*
|
||||
* Uses Claude AI or Cursor to enhance text based on the specified enhancement mode.
|
||||
* Supports modes: improve, technical, simplify, acceptance
|
||||
* Uses the provider abstraction to enhance text based on the specified
|
||||
* enhancement mode. Works with any configured provider (Claude, Cursor, etc.).
|
||||
* Supports modes: improve, technical, simplify, acceptance, ux-reviewer
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { resolveModelString } from '@automaker/model-resolver';
|
||||
import {
|
||||
CLAUDE_MODEL_MAP,
|
||||
isCursorModel,
|
||||
stripProviderPrefix,
|
||||
ThinkingLevel,
|
||||
getThinkingTokenBudget,
|
||||
} from '@automaker/types';
|
||||
import { ProviderFactory } from '../../../providers/provider-factory.js';
|
||||
import { CLAUDE_MODEL_MAP, type ThinkingLevel } from '@automaker/types';
|
||||
import { simpleQuery } from '../../../providers/simple-query-service.js';
|
||||
import type { SettingsService } from '../../../services/settings-service.js';
|
||||
import { getPromptCustomization } from '../../../lib/settings-helpers.js';
|
||||
import {
|
||||
@@ -37,7 +31,7 @@ interface EnhanceRequestBody {
|
||||
enhancementMode: string;
|
||||
/** Optional model override */
|
||||
model?: string;
|
||||
/** Optional thinking level for Claude models (ignored for Cursor models) */
|
||||
/** Optional thinking level for Claude models */
|
||||
thinkingLevel?: ThinkingLevel;
|
||||
}
|
||||
|
||||
@@ -57,76 +51,6 @@ interface EnhanceErrorResponse {
|
||||
error: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract text content from Claude SDK response messages
|
||||
*
|
||||
* @param stream - The async iterable from the query function
|
||||
* @returns The extracted text content
|
||||
*/
|
||||
async function extractTextFromStream(
|
||||
stream: AsyncIterable<{
|
||||
type: string;
|
||||
subtype?: string;
|
||||
result?: string;
|
||||
message?: {
|
||||
content?: Array<{ type: string; text?: string }>;
|
||||
};
|
||||
}>
|
||||
): Promise<string> {
|
||||
let responseText = '';
|
||||
|
||||
for await (const msg of stream) {
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
responseText += block.text;
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success') {
|
||||
responseText = msg.result || responseText;
|
||||
}
|
||||
}
|
||||
|
||||
return responseText;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute enhancement using Cursor provider
|
||||
*
|
||||
* @param prompt - The enhancement prompt
|
||||
* @param model - The Cursor model to use
|
||||
* @returns The enhanced text
|
||||
*/
|
||||
async function executeWithCursor(prompt: string, model: string): Promise<string> {
|
||||
const provider = ProviderFactory.getProviderForModel(model);
|
||||
// Strip provider prefix - providers expect bare model IDs
|
||||
const bareModel = stripProviderPrefix(model);
|
||||
|
||||
let responseText = '';
|
||||
|
||||
for await (const msg of provider.executeQuery({
|
||||
prompt,
|
||||
model: bareModel,
|
||||
cwd: process.cwd(), // Enhancement doesn't need a specific working directory
|
||||
readOnly: true, // Prompt enhancement only generates text, doesn't write files
|
||||
})) {
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
responseText += block.text;
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
|
||||
// Use result if it's a final accumulated message
|
||||
if (msg.result.length > responseText.length) {
|
||||
responseText = msg.result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return responseText;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the enhance request handler
|
||||
*
|
||||
@@ -188,13 +112,13 @@ export function createEnhanceHandler(
|
||||
technical: prompts.enhancement.technicalSystemPrompt,
|
||||
simplify: prompts.enhancement.simplifySystemPrompt,
|
||||
acceptance: prompts.enhancement.acceptanceSystemPrompt,
|
||||
'ux-reviewer': prompts.enhancement.uxReviewerSystemPrompt,
|
||||
};
|
||||
const systemPrompt = systemPromptMap[validMode];
|
||||
|
||||
logger.debug(`Using ${validMode} system prompt (length: ${systemPrompt.length} chars)`);
|
||||
|
||||
// Build the user prompt with few-shot examples
|
||||
// This helps the model understand this is text transformation, not a coding task
|
||||
const userPrompt = buildUserPrompt(validMode, trimmedText, true);
|
||||
|
||||
// Resolve the model - use the passed model, default to sonnet for quality
|
||||
@@ -202,40 +126,20 @@ export function createEnhanceHandler(
|
||||
|
||||
logger.debug(`Using model: ${resolvedModel}`);
|
||||
|
||||
let enhancedText: string;
|
||||
// Use simpleQuery - provider abstraction handles routing to correct provider
|
||||
// The system prompt is combined with user prompt since some providers
|
||||
// don't have a separate system prompt concept
|
||||
const result = await simpleQuery({
|
||||
prompt: `${systemPrompt}\n\n${userPrompt}`,
|
||||
model: resolvedModel,
|
||||
cwd: process.cwd(), // Enhancement doesn't need a specific working directory
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
thinkingLevel,
|
||||
readOnly: true, // Prompt enhancement only generates text, doesn't write files
|
||||
});
|
||||
|
||||
// Route to appropriate provider based on model
|
||||
if (isCursorModel(resolvedModel)) {
|
||||
// Use Cursor provider for Cursor models
|
||||
logger.info(`Using Cursor provider for model: ${resolvedModel}`);
|
||||
|
||||
// Cursor doesn't have a separate system prompt concept, so combine them
|
||||
const combinedPrompt = `${systemPrompt}\n\n${userPrompt}`;
|
||||
enhancedText = await executeWithCursor(combinedPrompt, resolvedModel);
|
||||
} else {
|
||||
// Use Claude SDK for Claude models
|
||||
logger.info(`Using Claude provider for model: ${resolvedModel}`);
|
||||
|
||||
// Convert thinkingLevel to maxThinkingTokens for SDK
|
||||
const maxThinkingTokens = getThinkingTokenBudget(thinkingLevel);
|
||||
const queryOptions: Parameters<typeof query>[0]['options'] = {
|
||||
model: resolvedModel,
|
||||
systemPrompt,
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
permissionMode: 'acceptEdits',
|
||||
};
|
||||
if (maxThinkingTokens) {
|
||||
queryOptions.maxThinkingTokens = maxThinkingTokens;
|
||||
}
|
||||
|
||||
const stream = query({
|
||||
prompt: userPrompt,
|
||||
options: queryOptions,
|
||||
});
|
||||
|
||||
enhancedText = await extractTextFromStream(stream);
|
||||
}
|
||||
const enhancedText = result.text;
|
||||
|
||||
if (!enhancedText || enhancedText.trim().length === 0) {
|
||||
logger.warn('Received empty response from AI');
|
||||
|
||||
@@ -10,6 +10,7 @@ import { createGetHandler } from './routes/get.js';
|
||||
import { createCreateHandler } from './routes/create.js';
|
||||
import { createUpdateHandler } from './routes/update.js';
|
||||
import { createBulkUpdateHandler } from './routes/bulk-update.js';
|
||||
import { createBulkDeleteHandler } from './routes/bulk-delete.js';
|
||||
import { createDeleteHandler } from './routes/delete.js';
|
||||
import { createAgentOutputHandler, createRawOutputHandler } from './routes/agent-output.js';
|
||||
import { createGenerateTitleHandler } from './routes/generate-title.js';
|
||||
@@ -26,6 +27,11 @@ export function createFeaturesRoutes(featureLoader: FeatureLoader): Router {
|
||||
validatePathParams('projectPath'),
|
||||
createBulkUpdateHandler(featureLoader)
|
||||
);
|
||||
router.post(
|
||||
'/bulk-delete',
|
||||
validatePathParams('projectPath'),
|
||||
createBulkDeleteHandler(featureLoader)
|
||||
);
|
||||
router.post('/delete', validatePathParams('projectPath'), createDeleteHandler(featureLoader));
|
||||
router.post('/agent-output', createAgentOutputHandler(featureLoader));
|
||||
router.post('/raw-output', createRawOutputHandler(featureLoader));
|
||||
|
||||
61
apps/server/src/routes/features/routes/bulk-delete.ts
Normal file
61
apps/server/src/routes/features/routes/bulk-delete.ts
Normal file
@@ -0,0 +1,61 @@
|
||||
/**
|
||||
* POST /bulk-delete endpoint - Delete multiple features at once
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { FeatureLoader } from '../../../services/feature-loader.js';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
|
||||
interface BulkDeleteRequest {
|
||||
projectPath: string;
|
||||
featureIds: string[];
|
||||
}
|
||||
|
||||
interface BulkDeleteResult {
|
||||
featureId: string;
|
||||
success: boolean;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
export function createBulkDeleteHandler(featureLoader: FeatureLoader) {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, featureIds } = req.body as BulkDeleteRequest;
|
||||
|
||||
if (!projectPath || !featureIds || !Array.isArray(featureIds) || featureIds.length === 0) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'projectPath and featureIds (non-empty array) are required',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const results = await Promise.all(
|
||||
featureIds.map(async (featureId) => {
|
||||
const success = await featureLoader.delete(projectPath, featureId);
|
||||
if (success) {
|
||||
return { featureId, success: true };
|
||||
}
|
||||
return {
|
||||
featureId,
|
||||
success: false,
|
||||
error: 'Deletion failed. Check server logs for details.',
|
||||
};
|
||||
})
|
||||
);
|
||||
|
||||
const successCount = results.reduce((count, r) => count + (r.success ? 1 : 0), 0);
|
||||
const failureCount = results.length - successCount;
|
||||
|
||||
res.json({
|
||||
success: failureCount === 0,
|
||||
deletedCount: successCount,
|
||||
failedCount: failureCount,
|
||||
results,
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Bulk delete features failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -1,13 +1,14 @@
|
||||
/**
|
||||
* POST /features/generate-title endpoint - Generate a concise title from description
|
||||
*
|
||||
* Uses Claude Haiku to generate a short, descriptive title from feature description.
|
||||
* Uses the provider abstraction to generate a short, descriptive title
|
||||
* from a feature description. Works with any configured provider (Claude, Cursor, etc.).
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { CLAUDE_MODEL_MAP } from '@automaker/model-resolver';
|
||||
import { simpleQuery } from '../../../providers/simple-query-service.js';
|
||||
|
||||
const logger = createLogger('GenerateTitle');
|
||||
|
||||
@@ -34,33 +35,6 @@ Rules:
|
||||
- No quotes, periods, or extra formatting
|
||||
- Capture the essence of the feature in a scannable way`;
|
||||
|
||||
async function extractTextFromStream(
|
||||
stream: AsyncIterable<{
|
||||
type: string;
|
||||
subtype?: string;
|
||||
result?: string;
|
||||
message?: {
|
||||
content?: Array<{ type: string; text?: string }>;
|
||||
};
|
||||
}>
|
||||
): Promise<string> {
|
||||
let responseText = '';
|
||||
|
||||
for await (const msg of stream) {
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
responseText += block.text;
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success') {
|
||||
responseText = msg.result || responseText;
|
||||
}
|
||||
}
|
||||
|
||||
return responseText;
|
||||
}
|
||||
|
||||
export function createGenerateTitleHandler(): (req: Request, res: Response) => Promise<void> {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
@@ -89,21 +63,19 @@ export function createGenerateTitleHandler(): (req: Request, res: Response) => P
|
||||
|
||||
const userPrompt = `Generate a concise title for this feature:\n\n${trimmedDescription}`;
|
||||
|
||||
const stream = query({
|
||||
prompt: userPrompt,
|
||||
options: {
|
||||
model: CLAUDE_MODEL_MAP.haiku,
|
||||
systemPrompt: SYSTEM_PROMPT,
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
permissionMode: 'default',
|
||||
},
|
||||
// Use simpleQuery - provider abstraction handles all the streaming/extraction
|
||||
const result = await simpleQuery({
|
||||
prompt: `${SYSTEM_PROMPT}\n\n${userPrompt}`,
|
||||
model: CLAUDE_MODEL_MAP.haiku,
|
||||
cwd: process.cwd(),
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
});
|
||||
|
||||
const title = await extractTextFromStream(stream);
|
||||
const title = result.text;
|
||||
|
||||
if (!title || title.trim().length === 0) {
|
||||
logger.warn('Received empty response from Claude');
|
||||
logger.warn('Received empty response from AI');
|
||||
const response: GenerateTitleErrorResponse = {
|
||||
success: false,
|
||||
error: 'Failed to generate title - empty response',
|
||||
|
||||
@@ -10,14 +10,21 @@ import { getErrorMessage, logError } from '../common.js';
|
||||
export function createUpdateHandler(featureLoader: FeatureLoader) {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, featureId, updates, descriptionHistorySource, enhancementMode } =
|
||||
req.body as {
|
||||
projectPath: string;
|
||||
featureId: string;
|
||||
updates: Partial<Feature>;
|
||||
descriptionHistorySource?: 'enhance' | 'edit';
|
||||
enhancementMode?: 'improve' | 'technical' | 'simplify' | 'acceptance';
|
||||
};
|
||||
const {
|
||||
projectPath,
|
||||
featureId,
|
||||
updates,
|
||||
descriptionHistorySource,
|
||||
enhancementMode,
|
||||
preEnhancementDescription,
|
||||
} = req.body as {
|
||||
projectPath: string;
|
||||
featureId: string;
|
||||
updates: Partial<Feature>;
|
||||
descriptionHistorySource?: 'enhance' | 'edit';
|
||||
enhancementMode?: 'improve' | 'technical' | 'simplify' | 'acceptance' | 'ux-reviewer';
|
||||
preEnhancementDescription?: string;
|
||||
};
|
||||
|
||||
if (!projectPath || !featureId || !updates) {
|
||||
res.status(400).json({
|
||||
@@ -32,7 +39,8 @@ export function createUpdateHandler(featureLoader: FeatureLoader) {
|
||||
featureId,
|
||||
updates,
|
||||
descriptionHistorySource,
|
||||
enhancementMode
|
||||
enhancementMode,
|
||||
preEnhancementDescription
|
||||
);
|
||||
res.json({ success: true, feature: updated });
|
||||
} catch (error) {
|
||||
|
||||
@@ -5,6 +5,43 @@
|
||||
import type { Request, Response } from 'express';
|
||||
import { execAsync, execEnv, getErrorMessage, logError } from './common.js';
|
||||
|
||||
const GIT_REMOTE_ORIGIN_COMMAND = 'git remote get-url origin';
|
||||
const GH_REPO_VIEW_COMMAND = 'gh repo view --json name,owner';
|
||||
const GITHUB_REPO_URL_PREFIX = 'https://github.com/';
|
||||
const GITHUB_HTTPS_REMOTE_REGEX = /https:\/\/github\.com\/([^/]+)\/([^/.]+)/;
|
||||
const GITHUB_SSH_REMOTE_REGEX = /git@github\.com:([^/]+)\/([^/.]+)/;
|
||||
|
||||
interface GhRepoViewResponse {
|
||||
name?: string;
|
||||
owner?: {
|
||||
login?: string;
|
||||
};
|
||||
}
|
||||
|
||||
async function resolveRepoFromGh(projectPath: string): Promise<{
|
||||
owner: string;
|
||||
repo: string;
|
||||
} | null> {
|
||||
try {
|
||||
const { stdout } = await execAsync(GH_REPO_VIEW_COMMAND, {
|
||||
cwd: projectPath,
|
||||
env: execEnv,
|
||||
});
|
||||
|
||||
const data = JSON.parse(stdout) as GhRepoViewResponse;
|
||||
const owner = typeof data.owner?.login === 'string' ? data.owner.login : null;
|
||||
const repo = typeof data.name === 'string' ? data.name : null;
|
||||
|
||||
if (!owner || !repo) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return { owner, repo };
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export interface GitHubRemoteStatus {
|
||||
hasGitHubRemote: boolean;
|
||||
remoteUrl: string | null;
|
||||
@@ -21,19 +58,38 @@ export async function checkGitHubRemote(projectPath: string): Promise<GitHubRemo
|
||||
};
|
||||
|
||||
try {
|
||||
// Get the remote URL (origin by default)
|
||||
const { stdout } = await execAsync('git remote get-url origin', {
|
||||
cwd: projectPath,
|
||||
env: execEnv,
|
||||
});
|
||||
let remoteUrl = '';
|
||||
try {
|
||||
// Get the remote URL (origin by default)
|
||||
const { stdout } = await execAsync(GIT_REMOTE_ORIGIN_COMMAND, {
|
||||
cwd: projectPath,
|
||||
env: execEnv,
|
||||
});
|
||||
remoteUrl = stdout.trim();
|
||||
status.remoteUrl = remoteUrl || null;
|
||||
} catch {
|
||||
// Ignore missing origin remote
|
||||
}
|
||||
|
||||
const remoteUrl = stdout.trim();
|
||||
status.remoteUrl = remoteUrl;
|
||||
const ghRepo = await resolveRepoFromGh(projectPath);
|
||||
if (ghRepo) {
|
||||
status.hasGitHubRemote = true;
|
||||
status.owner = ghRepo.owner;
|
||||
status.repo = ghRepo.repo;
|
||||
if (!status.remoteUrl) {
|
||||
status.remoteUrl = `${GITHUB_REPO_URL_PREFIX}${ghRepo.owner}/${ghRepo.repo}`;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
// Check if it's a GitHub URL
|
||||
// Formats: https://github.com/owner/repo.git, git@github.com:owner/repo.git
|
||||
const httpsMatch = remoteUrl.match(/https:\/\/github\.com\/([^/]+)\/([^/.]+)/);
|
||||
const sshMatch = remoteUrl.match(/git@github\.com:([^/]+)\/([^/.]+)/);
|
||||
if (!remoteUrl) {
|
||||
return status;
|
||||
}
|
||||
|
||||
const httpsMatch = remoteUrl.match(GITHUB_HTTPS_REMOTE_REGEX);
|
||||
const sshMatch = remoteUrl.match(GITHUB_SSH_REMOTE_REGEX);
|
||||
|
||||
const match = httpsMatch || sshMatch;
|
||||
if (match) {
|
||||
|
||||
@@ -25,19 +25,24 @@ interface GraphQLComment {
|
||||
updatedAt: string;
|
||||
}
|
||||
|
||||
interface GraphQLCommentConnection {
|
||||
totalCount: number;
|
||||
pageInfo: {
|
||||
hasNextPage: boolean;
|
||||
endCursor: string | null;
|
||||
};
|
||||
nodes: GraphQLComment[];
|
||||
}
|
||||
|
||||
interface GraphQLIssueOrPullRequest {
|
||||
__typename: 'Issue' | 'PullRequest';
|
||||
comments: GraphQLCommentConnection;
|
||||
}
|
||||
|
||||
interface GraphQLResponse {
|
||||
data?: {
|
||||
repository?: {
|
||||
issue?: {
|
||||
comments: {
|
||||
totalCount: number;
|
||||
pageInfo: {
|
||||
hasNextPage: boolean;
|
||||
endCursor: string | null;
|
||||
};
|
||||
nodes: GraphQLComment[];
|
||||
};
|
||||
};
|
||||
issueOrPullRequest?: GraphQLIssueOrPullRequest | null;
|
||||
};
|
||||
};
|
||||
errors?: Array<{ message: string }>;
|
||||
@@ -45,6 +50,7 @@ interface GraphQLResponse {
|
||||
|
||||
/** Timeout for GitHub API requests in milliseconds */
|
||||
const GITHUB_API_TIMEOUT_MS = 30000;
|
||||
const COMMENTS_PAGE_SIZE = 50;
|
||||
|
||||
/**
|
||||
* Validate cursor format (GraphQL cursors are typically base64 strings)
|
||||
@@ -54,7 +60,7 @@ function isValidCursor(cursor: string): boolean {
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch comments for a specific issue using GitHub GraphQL API
|
||||
* Fetch comments for a specific issue or pull request using GitHub GraphQL API
|
||||
*/
|
||||
async function fetchIssueComments(
|
||||
projectPath: string,
|
||||
@@ -70,24 +76,52 @@ async function fetchIssueComments(
|
||||
|
||||
// Use GraphQL variables instead of string interpolation for safety
|
||||
const query = `
|
||||
query GetIssueComments($owner: String!, $repo: String!, $issueNumber: Int!, $cursor: String) {
|
||||
query GetIssueComments(
|
||||
$owner: String!
|
||||
$repo: String!
|
||||
$issueNumber: Int!
|
||||
$cursor: String
|
||||
$pageSize: Int!
|
||||
) {
|
||||
repository(owner: $owner, name: $repo) {
|
||||
issue(number: $issueNumber) {
|
||||
comments(first: 50, after: $cursor) {
|
||||
totalCount
|
||||
pageInfo {
|
||||
hasNextPage
|
||||
endCursor
|
||||
}
|
||||
nodes {
|
||||
id
|
||||
author {
|
||||
login
|
||||
avatarUrl
|
||||
issueOrPullRequest(number: $issueNumber) {
|
||||
__typename
|
||||
... on Issue {
|
||||
comments(first: $pageSize, after: $cursor) {
|
||||
totalCount
|
||||
pageInfo {
|
||||
hasNextPage
|
||||
endCursor
|
||||
}
|
||||
nodes {
|
||||
id
|
||||
author {
|
||||
login
|
||||
avatarUrl
|
||||
}
|
||||
body
|
||||
createdAt
|
||||
updatedAt
|
||||
}
|
||||
}
|
||||
}
|
||||
... on PullRequest {
|
||||
comments(first: $pageSize, after: $cursor) {
|
||||
totalCount
|
||||
pageInfo {
|
||||
hasNextPage
|
||||
endCursor
|
||||
}
|
||||
nodes {
|
||||
id
|
||||
author {
|
||||
login
|
||||
avatarUrl
|
||||
}
|
||||
body
|
||||
createdAt
|
||||
updatedAt
|
||||
}
|
||||
body
|
||||
createdAt
|
||||
updatedAt
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -99,6 +133,7 @@ async function fetchIssueComments(
|
||||
repo,
|
||||
issueNumber,
|
||||
cursor: cursor || null,
|
||||
pageSize: COMMENTS_PAGE_SIZE,
|
||||
};
|
||||
|
||||
const requestBody = JSON.stringify({ query, variables });
|
||||
@@ -140,10 +175,10 @@ async function fetchIssueComments(
|
||||
throw new Error(response.errors[0].message);
|
||||
}
|
||||
|
||||
const commentsData = response.data?.repository?.issue?.comments;
|
||||
const commentsData = response.data?.repository?.issueOrPullRequest?.comments;
|
||||
|
||||
if (!commentsData) {
|
||||
throw new Error('Issue not found or no comments data available');
|
||||
throw new Error('Issue or pull request not found or no comments data available');
|
||||
}
|
||||
|
||||
const comments: GitHubComment[] = commentsData.nodes.map((node) => ({
|
||||
|
||||
@@ -9,6 +9,17 @@ import { checkGitHubRemote } from './check-github-remote.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('ListIssues');
|
||||
const OPEN_ISSUES_LIMIT = 100;
|
||||
const CLOSED_ISSUES_LIMIT = 50;
|
||||
const ISSUE_LIST_FIELDS = 'number,title,state,author,createdAt,labels,url,body,assignees';
|
||||
const ISSUE_STATE_OPEN = 'open';
|
||||
const ISSUE_STATE_CLOSED = 'closed';
|
||||
const GH_ISSUE_LIST_COMMAND = 'gh issue list';
|
||||
const GH_STATE_FLAG = '--state';
|
||||
const GH_JSON_FLAG = '--json';
|
||||
const GH_LIMIT_FLAG = '--limit';
|
||||
const LINKED_PRS_BATCH_SIZE = 20;
|
||||
const LINKED_PRS_TIMELINE_ITEMS = 10;
|
||||
|
||||
export interface GitHubLabel {
|
||||
name: string;
|
||||
@@ -69,34 +80,68 @@ async function fetchLinkedPRs(
|
||||
|
||||
// Build GraphQL query for batch fetching linked PRs
|
||||
// We fetch up to 20 issues at a time to avoid query limits
|
||||
const batchSize = 20;
|
||||
for (let i = 0; i < issueNumbers.length; i += batchSize) {
|
||||
const batch = issueNumbers.slice(i, i + batchSize);
|
||||
for (let i = 0; i < issueNumbers.length; i += LINKED_PRS_BATCH_SIZE) {
|
||||
const batch = issueNumbers.slice(i, i + LINKED_PRS_BATCH_SIZE);
|
||||
|
||||
const issueQueries = batch
|
||||
.map(
|
||||
(num, idx) => `
|
||||
issue${idx}: issue(number: ${num}) {
|
||||
number
|
||||
timelineItems(first: 10, itemTypes: [CROSS_REFERENCED_EVENT, CONNECTED_EVENT]) {
|
||||
nodes {
|
||||
... on CrossReferencedEvent {
|
||||
source {
|
||||
... on PullRequest {
|
||||
number
|
||||
title
|
||||
state
|
||||
url
|
||||
issue${idx}: issueOrPullRequest(number: ${num}) {
|
||||
... on Issue {
|
||||
number
|
||||
timelineItems(
|
||||
first: ${LINKED_PRS_TIMELINE_ITEMS}
|
||||
itemTypes: [CROSS_REFERENCED_EVENT, CONNECTED_EVENT]
|
||||
) {
|
||||
nodes {
|
||||
... on CrossReferencedEvent {
|
||||
source {
|
||||
... on PullRequest {
|
||||
number
|
||||
title
|
||||
state
|
||||
url
|
||||
}
|
||||
}
|
||||
}
|
||||
... on ConnectedEvent {
|
||||
subject {
|
||||
... on PullRequest {
|
||||
number
|
||||
title
|
||||
state
|
||||
url
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
... on ConnectedEvent {
|
||||
subject {
|
||||
... on PullRequest {
|
||||
number
|
||||
title
|
||||
state
|
||||
url
|
||||
}
|
||||
}
|
||||
... on PullRequest {
|
||||
number
|
||||
timelineItems(
|
||||
first: ${LINKED_PRS_TIMELINE_ITEMS}
|
||||
itemTypes: [CROSS_REFERENCED_EVENT, CONNECTED_EVENT]
|
||||
) {
|
||||
nodes {
|
||||
... on CrossReferencedEvent {
|
||||
source {
|
||||
... on PullRequest {
|
||||
number
|
||||
title
|
||||
state
|
||||
url
|
||||
}
|
||||
}
|
||||
}
|
||||
... on ConnectedEvent {
|
||||
subject {
|
||||
... on PullRequest {
|
||||
number
|
||||
title
|
||||
state
|
||||
url
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -213,16 +258,35 @@ export function createListIssuesHandler() {
|
||||
}
|
||||
|
||||
// Fetch open and closed issues in parallel (now including assignees)
|
||||
const repoQualifier =
|
||||
remoteStatus.owner && remoteStatus.repo ? `${remoteStatus.owner}/${remoteStatus.repo}` : '';
|
||||
const repoFlag = repoQualifier ? `-R ${repoQualifier}` : '';
|
||||
const [openResult, closedResult] = await Promise.all([
|
||||
execAsync(
|
||||
'gh issue list --state open --json number,title,state,author,createdAt,labels,url,body,assignees --limit 100',
|
||||
[
|
||||
GH_ISSUE_LIST_COMMAND,
|
||||
repoFlag,
|
||||
`${GH_STATE_FLAG} ${ISSUE_STATE_OPEN}`,
|
||||
`${GH_JSON_FLAG} ${ISSUE_LIST_FIELDS}`,
|
||||
`${GH_LIMIT_FLAG} ${OPEN_ISSUES_LIMIT}`,
|
||||
]
|
||||
.filter(Boolean)
|
||||
.join(' '),
|
||||
{
|
||||
cwd: projectPath,
|
||||
env: execEnv,
|
||||
}
|
||||
),
|
||||
execAsync(
|
||||
'gh issue list --state closed --json number,title,state,author,createdAt,labels,url,body,assignees --limit 50',
|
||||
[
|
||||
GH_ISSUE_LIST_COMMAND,
|
||||
repoFlag,
|
||||
`${GH_STATE_FLAG} ${ISSUE_STATE_CLOSED}`,
|
||||
`${GH_JSON_FLAG} ${ISSUE_LIST_FIELDS}`,
|
||||
`${GH_LIMIT_FLAG} ${CLOSED_ISSUES_LIMIT}`,
|
||||
]
|
||||
.filter(Boolean)
|
||||
.join(' '),
|
||||
{
|
||||
cwd: projectPath,
|
||||
env: execEnv,
|
||||
|
||||
@@ -6,6 +6,17 @@ import type { Request, Response } from 'express';
|
||||
import { execAsync, execEnv, getErrorMessage, logError } from './common.js';
|
||||
import { checkGitHubRemote } from './check-github-remote.js';
|
||||
|
||||
const OPEN_PRS_LIMIT = 100;
|
||||
const MERGED_PRS_LIMIT = 50;
|
||||
const PR_LIST_FIELDS =
|
||||
'number,title,state,author,createdAt,labels,url,isDraft,headRefName,reviewDecision,mergeable,body';
|
||||
const PR_STATE_OPEN = 'open';
|
||||
const PR_STATE_MERGED = 'merged';
|
||||
const GH_PR_LIST_COMMAND = 'gh pr list';
|
||||
const GH_STATE_FLAG = '--state';
|
||||
const GH_JSON_FLAG = '--json';
|
||||
const GH_LIMIT_FLAG = '--limit';
|
||||
|
||||
export interface GitHubLabel {
|
||||
name: string;
|
||||
color: string;
|
||||
@@ -57,16 +68,36 @@ export function createListPRsHandler() {
|
||||
return;
|
||||
}
|
||||
|
||||
const repoQualifier =
|
||||
remoteStatus.owner && remoteStatus.repo ? `${remoteStatus.owner}/${remoteStatus.repo}` : '';
|
||||
const repoFlag = repoQualifier ? `-R ${repoQualifier}` : '';
|
||||
|
||||
const [openResult, mergedResult] = await Promise.all([
|
||||
execAsync(
|
||||
'gh pr list --state open --json number,title,state,author,createdAt,labels,url,isDraft,headRefName,reviewDecision,mergeable,body --limit 100',
|
||||
[
|
||||
GH_PR_LIST_COMMAND,
|
||||
repoFlag,
|
||||
`${GH_STATE_FLAG} ${PR_STATE_OPEN}`,
|
||||
`${GH_JSON_FLAG} ${PR_LIST_FIELDS}`,
|
||||
`${GH_LIMIT_FLAG} ${OPEN_PRS_LIMIT}`,
|
||||
]
|
||||
.filter(Boolean)
|
||||
.join(' '),
|
||||
{
|
||||
cwd: projectPath,
|
||||
env: execEnv,
|
||||
}
|
||||
),
|
||||
execAsync(
|
||||
'gh pr list --state merged --json number,title,state,author,createdAt,labels,url,isDraft,headRefName,reviewDecision,mergeable,body --limit 50',
|
||||
[
|
||||
GH_PR_LIST_COMMAND,
|
||||
repoFlag,
|
||||
`${GH_STATE_FLAG} ${PR_STATE_MERGED}`,
|
||||
`${GH_JSON_FLAG} ${PR_LIST_FIELDS}`,
|
||||
`${GH_LIMIT_FLAG} ${MERGED_PRS_LIMIT}`,
|
||||
]
|
||||
.filter(Boolean)
|
||||
.join(' '),
|
||||
{
|
||||
cwd: projectPath,
|
||||
env: execEnv,
|
||||
|
||||
@@ -1,29 +1,33 @@
|
||||
/**
|
||||
* POST /validate-issue endpoint - Validate a GitHub issue using Claude SDK or Cursor (async)
|
||||
* POST /validate-issue endpoint - Validate a GitHub issue using provider abstraction (async)
|
||||
*
|
||||
* Scans the codebase to determine if an issue is valid, invalid, or needs clarification.
|
||||
* Runs asynchronously and emits events for progress and completion.
|
||||
* Supports both Claude models and Cursor models.
|
||||
* Supports Claude, Codex, Cursor, and OpenCode models.
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
||||
import type { EventEmitter } from '../../../lib/events.js';
|
||||
import type {
|
||||
IssueValidationResult,
|
||||
IssueValidationEvent,
|
||||
ModelAlias,
|
||||
CursorModelId,
|
||||
ModelId,
|
||||
GitHubComment,
|
||||
LinkedPRInfo,
|
||||
ThinkingLevel,
|
||||
ReasoningEffort,
|
||||
} from '@automaker/types';
|
||||
import {
|
||||
DEFAULT_PHASE_MODELS,
|
||||
isClaudeModel,
|
||||
isCodexModel,
|
||||
isCursorModel,
|
||||
isOpencodeModel,
|
||||
} from '@automaker/types';
|
||||
import { isCursorModel, DEFAULT_PHASE_MODELS, stripProviderPrefix } from '@automaker/types';
|
||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||
import { createSuggestionsOptions } from '../../../lib/sdk-options.js';
|
||||
import { extractJson } from '../../../lib/json-extractor.js';
|
||||
import { writeValidation } from '../../../lib/validation-storage.js';
|
||||
import { ProviderFactory } from '../../../providers/provider-factory.js';
|
||||
import { streamingQuery } from '../../../providers/simple-query-service.js';
|
||||
import {
|
||||
issueValidationSchema,
|
||||
ISSUE_VALIDATION_SYSTEM_PROMPT,
|
||||
@@ -41,9 +45,6 @@ import {
|
||||
import type { SettingsService } from '../../../services/settings-service.js';
|
||||
import { getAutoLoadClaudeMdSetting } from '../../../lib/settings-helpers.js';
|
||||
|
||||
/** Valid Claude model values for validation */
|
||||
const VALID_CLAUDE_MODELS: readonly ModelAlias[] = ['opus', 'sonnet', 'haiku'] as const;
|
||||
|
||||
/**
|
||||
* Request body for issue validation
|
||||
*/
|
||||
@@ -53,10 +54,12 @@ interface ValidateIssueRequestBody {
|
||||
issueTitle: string;
|
||||
issueBody: string;
|
||||
issueLabels?: string[];
|
||||
/** Model to use for validation (opus, sonnet, haiku, or cursor model IDs) */
|
||||
model?: ModelAlias | CursorModelId;
|
||||
/** Thinking level for Claude models (ignored for Cursor models) */
|
||||
/** Model to use for validation (Claude alias or provider model ID) */
|
||||
model?: ModelId;
|
||||
/** Thinking level for Claude models (ignored for non-Claude models) */
|
||||
thinkingLevel?: ThinkingLevel;
|
||||
/** Reasoning effort for Codex models (ignored for non-Codex models) */
|
||||
reasoningEffort?: ReasoningEffort;
|
||||
/** Comments to include in validation analysis */
|
||||
comments?: GitHubComment[];
|
||||
/** Linked pull requests for this issue */
|
||||
@@ -68,7 +71,7 @@ interface ValidateIssueRequestBody {
|
||||
*
|
||||
* Emits events for start, progress, complete, and error.
|
||||
* Stores result on completion.
|
||||
* Supports both Claude models (with structured output) and Cursor models (with JSON parsing).
|
||||
* Supports Claude/Codex models (structured output) and Cursor/OpenCode models (JSON parsing).
|
||||
*/
|
||||
async function runValidation(
|
||||
projectPath: string,
|
||||
@@ -76,13 +79,14 @@ async function runValidation(
|
||||
issueTitle: string,
|
||||
issueBody: string,
|
||||
issueLabels: string[] | undefined,
|
||||
model: ModelAlias | CursorModelId,
|
||||
model: ModelId,
|
||||
events: EventEmitter,
|
||||
abortController: AbortController,
|
||||
settingsService?: SettingsService,
|
||||
comments?: ValidationComment[],
|
||||
linkedPRs?: ValidationLinkedPR[],
|
||||
thinkingLevel?: ThinkingLevel
|
||||
thinkingLevel?: ThinkingLevel,
|
||||
reasoningEffort?: ReasoningEffort
|
||||
): Promise<void> {
|
||||
// Emit start event
|
||||
const startEvent: IssueValidationEvent = {
|
||||
@@ -102,7 +106,7 @@ async function runValidation(
|
||||
|
||||
try {
|
||||
// Build the prompt (include comments and linked PRs if provided)
|
||||
const prompt = buildValidationPrompt(
|
||||
const basePrompt = buildValidationPrompt(
|
||||
issueNumber,
|
||||
issueTitle,
|
||||
issueBody,
|
||||
@@ -111,20 +115,15 @@ async function runValidation(
|
||||
linkedPRs
|
||||
);
|
||||
|
||||
let validationResult: IssueValidationResult | null = null;
|
||||
let responseText = '';
|
||||
|
||||
// Route to appropriate provider based on model
|
||||
if (isCursorModel(model)) {
|
||||
// Use Cursor provider for Cursor models
|
||||
logger.info(`Using Cursor provider for validation with model: ${model}`);
|
||||
// Determine if we should use structured output (Claude/Codex support it, Cursor/OpenCode don't)
|
||||
const useStructuredOutput = isClaudeModel(model) || isCodexModel(model);
|
||||
|
||||
const provider = ProviderFactory.getProviderForModel(model);
|
||||
// Strip provider prefix - providers expect bare model IDs
|
||||
const bareModel = stripProviderPrefix(model);
|
||||
|
||||
// For Cursor, include the system prompt and schema in the user prompt
|
||||
const cursorPrompt = `${ISSUE_VALIDATION_SYSTEM_PROMPT}
|
||||
// Build the final prompt - for Cursor, include system prompt and JSON schema instructions
|
||||
let finalPrompt = basePrompt;
|
||||
if (!useStructuredOutput) {
|
||||
finalPrompt = `${ISSUE_VALIDATION_SYSTEM_PROMPT}
|
||||
|
||||
CRITICAL INSTRUCTIONS:
|
||||
1. DO NOT write any files. Return the JSON in your response only.
|
||||
@@ -135,121 +134,78 @@ ${JSON.stringify(issueValidationSchema, null, 2)}
|
||||
|
||||
Your entire response should be valid JSON starting with { and ending with }. No text before or after.
|
||||
|
||||
${prompt}`;
|
||||
${basePrompt}`;
|
||||
}
|
||||
|
||||
for await (const msg of provider.executeQuery({
|
||||
prompt: cursorPrompt,
|
||||
model: bareModel,
|
||||
cwd: projectPath,
|
||||
readOnly: true, // Issue validation only reads code, doesn't write
|
||||
})) {
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
responseText += block.text;
|
||||
// Load autoLoadClaudeMd setting
|
||||
const autoLoadClaudeMd = await getAutoLoadClaudeMdSetting(
|
||||
projectPath,
|
||||
settingsService,
|
||||
'[ValidateIssue]'
|
||||
);
|
||||
|
||||
// Emit progress event
|
||||
const progressEvent: IssueValidationEvent = {
|
||||
type: 'issue_validation_progress',
|
||||
issueNumber,
|
||||
content: block.text,
|
||||
projectPath,
|
||||
};
|
||||
events.emit('issue-validation:event', progressEvent);
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
|
||||
// Use result if it's a final accumulated message
|
||||
if (msg.result.length > responseText.length) {
|
||||
responseText = msg.result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse JSON from the response text using shared utility
|
||||
if (responseText) {
|
||||
validationResult = extractJson<IssueValidationResult>(responseText, { logger });
|
||||
}
|
||||
} else {
|
||||
// Use Claude SDK for Claude models
|
||||
logger.info(`Using Claude provider for validation with model: ${model}`);
|
||||
|
||||
// Load autoLoadClaudeMd setting
|
||||
const autoLoadClaudeMd = await getAutoLoadClaudeMdSetting(
|
||||
projectPath,
|
||||
settingsService,
|
||||
'[ValidateIssue]'
|
||||
);
|
||||
|
||||
// Use thinkingLevel from request if provided, otherwise fall back to settings
|
||||
let effectiveThinkingLevel: ThinkingLevel | undefined = thinkingLevel;
|
||||
// Use request overrides if provided, otherwise fall back to settings
|
||||
let effectiveThinkingLevel: ThinkingLevel | undefined = thinkingLevel;
|
||||
let effectiveReasoningEffort: ReasoningEffort | undefined = reasoningEffort;
|
||||
if (!effectiveThinkingLevel || !effectiveReasoningEffort) {
|
||||
const settings = await settingsService?.getGlobalSettings();
|
||||
const phaseModelEntry =
|
||||
settings?.phaseModels?.validationModel || DEFAULT_PHASE_MODELS.validationModel;
|
||||
const resolved = resolvePhaseModel(phaseModelEntry);
|
||||
if (!effectiveThinkingLevel) {
|
||||
const settings = await settingsService?.getGlobalSettings();
|
||||
const phaseModelEntry =
|
||||
settings?.phaseModels?.validationModel || DEFAULT_PHASE_MODELS.validationModel;
|
||||
const resolved = resolvePhaseModel(phaseModelEntry);
|
||||
effectiveThinkingLevel = resolved.thinkingLevel;
|
||||
}
|
||||
|
||||
// Create SDK options with structured output and abort controller
|
||||
const options = createSuggestionsOptions({
|
||||
cwd: projectPath,
|
||||
model: model as ModelAlias,
|
||||
systemPrompt: ISSUE_VALIDATION_SYSTEM_PROMPT,
|
||||
abortController,
|
||||
autoLoadClaudeMd,
|
||||
thinkingLevel: effectiveThinkingLevel,
|
||||
outputFormat: {
|
||||
type: 'json_schema',
|
||||
schema: issueValidationSchema as Record<string, unknown>,
|
||||
},
|
||||
});
|
||||
|
||||
// Execute the query
|
||||
const stream = query({ prompt, options });
|
||||
|
||||
for await (const msg of stream) {
|
||||
// Collect assistant text for debugging and emit progress
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text') {
|
||||
responseText += block.text;
|
||||
|
||||
// Emit progress event
|
||||
const progressEvent: IssueValidationEvent = {
|
||||
type: 'issue_validation_progress',
|
||||
issueNumber,
|
||||
content: block.text,
|
||||
projectPath,
|
||||
};
|
||||
events.emit('issue-validation:event', progressEvent);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract structured output on success
|
||||
if (msg.type === 'result' && msg.subtype === 'success') {
|
||||
const resultMsg = msg as { structured_output?: IssueValidationResult };
|
||||
if (resultMsg.structured_output) {
|
||||
validationResult = resultMsg.structured_output;
|
||||
logger.debug('Received structured output:', validationResult);
|
||||
}
|
||||
}
|
||||
|
||||
// Handle errors
|
||||
if (msg.type === 'result') {
|
||||
const resultMsg = msg as { subtype?: string };
|
||||
if (resultMsg.subtype === 'error_max_structured_output_retries') {
|
||||
logger.error('Failed to produce valid structured output after retries');
|
||||
throw new Error('Could not produce valid validation output');
|
||||
}
|
||||
}
|
||||
if (!effectiveReasoningEffort && typeof phaseModelEntry !== 'string') {
|
||||
effectiveReasoningEffort = phaseModelEntry.reasoningEffort;
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(`Using model: ${model}`);
|
||||
|
||||
// Use streamingQuery with event callbacks
|
||||
const result = await streamingQuery({
|
||||
prompt: finalPrompt,
|
||||
model: model as string,
|
||||
cwd: projectPath,
|
||||
systemPrompt: useStructuredOutput ? ISSUE_VALIDATION_SYSTEM_PROMPT : undefined,
|
||||
abortController,
|
||||
thinkingLevel: effectiveThinkingLevel,
|
||||
reasoningEffort: effectiveReasoningEffort,
|
||||
readOnly: true, // Issue validation only reads code, doesn't write
|
||||
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
|
||||
outputFormat: useStructuredOutput
|
||||
? {
|
||||
type: 'json_schema',
|
||||
schema: issueValidationSchema as Record<string, unknown>,
|
||||
}
|
||||
: undefined,
|
||||
onText: (text) => {
|
||||
responseText += text;
|
||||
// Emit progress event
|
||||
const progressEvent: IssueValidationEvent = {
|
||||
type: 'issue_validation_progress',
|
||||
issueNumber,
|
||||
content: text,
|
||||
projectPath,
|
||||
};
|
||||
events.emit('issue-validation:event', progressEvent);
|
||||
},
|
||||
});
|
||||
|
||||
// Clear timeout
|
||||
clearTimeout(timeoutId);
|
||||
|
||||
// Get validation result from structured output or parse from text
|
||||
let validationResult: IssueValidationResult | null = null;
|
||||
|
||||
if (result.structured_output) {
|
||||
validationResult = result.structured_output as unknown as IssueValidationResult;
|
||||
logger.debug('Received structured output:', validationResult);
|
||||
} else if (responseText) {
|
||||
// Parse JSON from response text
|
||||
validationResult = extractJson<IssueValidationResult>(responseText, { logger });
|
||||
}
|
||||
|
||||
// Require validation result
|
||||
if (!validationResult) {
|
||||
logger.error('No validation result received from AI provider');
|
||||
@@ -299,7 +255,7 @@ ${prompt}`;
|
||||
/**
|
||||
* Creates the handler for validating GitHub issues against the codebase.
|
||||
*
|
||||
* Uses Claude SDK with:
|
||||
* Uses the provider abstraction with:
|
||||
* - Read-only tools (Read, Glob, Grep) for codebase analysis
|
||||
* - JSON schema structured output for reliable parsing
|
||||
* - System prompt guiding the validation process
|
||||
@@ -319,6 +275,7 @@ export function createValidateIssueHandler(
|
||||
issueLabels,
|
||||
model = 'opus',
|
||||
thinkingLevel,
|
||||
reasoningEffort,
|
||||
comments: rawComments,
|
||||
linkedPRs: rawLinkedPRs,
|
||||
} = req.body as ValidateIssueRequestBody;
|
||||
@@ -366,14 +323,17 @@ export function createValidateIssueHandler(
|
||||
return;
|
||||
}
|
||||
|
||||
// Validate model parameter at runtime - accept Claude models or Cursor models
|
||||
const isValidClaudeModel = VALID_CLAUDE_MODELS.includes(model as ModelAlias);
|
||||
const isValidCursorModel = isCursorModel(model);
|
||||
// Validate model parameter at runtime - accept any supported provider model
|
||||
const isValidModel =
|
||||
isClaudeModel(model) ||
|
||||
isCursorModel(model) ||
|
||||
isCodexModel(model) ||
|
||||
isOpencodeModel(model);
|
||||
|
||||
if (!isValidClaudeModel && !isValidCursorModel) {
|
||||
if (!isValidModel) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: `Invalid model. Must be one of: ${VALID_CLAUDE_MODELS.join(', ')}, or a Cursor model ID`,
|
||||
error: 'Invalid model. Must be a Claude, Cursor, Codex, or OpenCode model ID (or alias).',
|
||||
});
|
||||
return;
|
||||
}
|
||||
@@ -404,7 +364,8 @@ export function createValidateIssueHandler(
|
||||
settingsService,
|
||||
validationComments,
|
||||
validationLinkedPRs,
|
||||
thinkingLevel
|
||||
thinkingLevel,
|
||||
reasoningEffort
|
||||
)
|
||||
.catch(() => {
|
||||
// Error is already handled inside runValidation (event emitted)
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
* Each provider shows: `{ configured: boolean, masked: string }`
|
||||
* Masked shows first 4 and last 4 characters for verification.
|
||||
*
|
||||
* Response: `{ "success": true, "credentials": { anthropic } }`
|
||||
* Response: `{ "success": true, "credentials": { anthropic, google, openai } }`
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* PUT /api/settings/credentials - Update API credentials
|
||||
*
|
||||
* Updates API keys for Anthropic. Partial updates supported.
|
||||
* Updates API keys for supported providers. Partial updates supported.
|
||||
* Returns masked credentials for verification without exposing full keys.
|
||||
*
|
||||
* Request body: `Partial<Credentials>` (usually just apiKeys)
|
||||
|
||||
@@ -24,6 +24,12 @@ import { createDeauthCursorHandler } from './routes/deauth-cursor.js';
|
||||
import { createAuthOpencodeHandler } from './routes/auth-opencode.js';
|
||||
import { createDeauthOpencodeHandler } from './routes/deauth-opencode.js';
|
||||
import { createOpencodeStatusHandler } from './routes/opencode-status.js';
|
||||
import {
|
||||
createGetOpencodeModelsHandler,
|
||||
createRefreshOpencodeModelsHandler,
|
||||
createGetOpencodeProvidersHandler,
|
||||
createClearOpencodeCacheHandler,
|
||||
} from './routes/opencode-models.js';
|
||||
import {
|
||||
createGetCursorConfigHandler,
|
||||
createSetCursorDefaultModelHandler,
|
||||
@@ -65,6 +71,12 @@ export function createSetupRoutes(): Router {
|
||||
router.get('/opencode-status', createOpencodeStatusHandler());
|
||||
router.post('/auth-opencode', createAuthOpencodeHandler());
|
||||
router.post('/deauth-opencode', createDeauthOpencodeHandler());
|
||||
|
||||
// OpenCode Dynamic Model Discovery routes
|
||||
router.get('/opencode/models', createGetOpencodeModelsHandler());
|
||||
router.post('/opencode/models/refresh', createRefreshOpencodeModelsHandler());
|
||||
router.get('/opencode/providers', createGetOpencodeProvidersHandler());
|
||||
router.post('/opencode/cache/clear', createClearOpencodeCacheHandler());
|
||||
router.get('/cursor-config', createGetCursorConfigHandler());
|
||||
router.post('/cursor-config/default-model', createSetCursorDefaultModelHandler());
|
||||
router.post('/cursor-config/models', createSetCursorModelsHandler());
|
||||
|
||||
@@ -11,6 +11,7 @@ export function createApiKeysHandler() {
|
||||
res.json({
|
||||
success: true,
|
||||
hasAnthropicKey: !!getApiKey('anthropic') || !!process.env.ANTHROPIC_API_KEY,
|
||||
hasGoogleKey: !!getApiKey('google'),
|
||||
hasOpenaiKey: !!getApiKey('openai') || !!process.env.OPENAI_API_KEY,
|
||||
});
|
||||
} catch (error) {
|
||||
|
||||
189
apps/server/src/routes/setup/routes/opencode-models.ts
Normal file
189
apps/server/src/routes/setup/routes/opencode-models.ts
Normal file
@@ -0,0 +1,189 @@
|
||||
/**
|
||||
* OpenCode Dynamic Models API Routes
|
||||
*
|
||||
* Provides endpoints for:
|
||||
* - GET /api/setup/opencode/models - Get available models (cached or refreshed)
|
||||
* - POST /api/setup/opencode/models/refresh - Force refresh models from CLI
|
||||
* - GET /api/setup/opencode/providers - Get authenticated providers
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import {
|
||||
OpencodeProvider,
|
||||
type OpenCodeProviderInfo,
|
||||
} from '../../../providers/opencode-provider.js';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
import type { ModelDefinition } from '@automaker/types';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('OpenCodeModelsRoute');
|
||||
|
||||
// Singleton provider instance for caching
|
||||
let providerInstance: OpencodeProvider | null = null;
|
||||
|
||||
function getProvider(): OpencodeProvider {
|
||||
if (!providerInstance) {
|
||||
providerInstance = new OpencodeProvider();
|
||||
}
|
||||
return providerInstance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Response type for models endpoint
|
||||
*/
|
||||
interface ModelsResponse {
|
||||
success: boolean;
|
||||
models?: ModelDefinition[];
|
||||
count?: number;
|
||||
cached?: boolean;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Response type for providers endpoint
|
||||
*/
|
||||
interface ProvidersResponse {
|
||||
success: boolean;
|
||||
providers?: OpenCodeProviderInfo[];
|
||||
authenticated?: OpenCodeProviderInfo[];
|
||||
error?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates handler for GET /api/setup/opencode/models
|
||||
*
|
||||
* Returns currently available models (from cache if available).
|
||||
* Query params:
|
||||
* - refresh=true: Force refresh from CLI before returning
|
||||
*
|
||||
* Note: If cache is empty, this will trigger a refresh to get dynamic models.
|
||||
*/
|
||||
export function createGetOpencodeModelsHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const provider = getProvider();
|
||||
const forceRefresh = req.query.refresh === 'true';
|
||||
|
||||
let models: ModelDefinition[];
|
||||
let cached = true;
|
||||
|
||||
if (forceRefresh) {
|
||||
models = await provider.refreshModels();
|
||||
cached = false;
|
||||
} else {
|
||||
// Check if we have cached models
|
||||
const cachedModels = provider.getAvailableModels();
|
||||
|
||||
// If cache only has default models (provider.hasCachedModels() would be false),
|
||||
// trigger a refresh to get dynamic models
|
||||
if (!provider.hasCachedModels()) {
|
||||
models = await provider.refreshModels();
|
||||
cached = false;
|
||||
} else {
|
||||
models = cachedModels;
|
||||
}
|
||||
}
|
||||
|
||||
const response: ModelsResponse = {
|
||||
success: true,
|
||||
models,
|
||||
count: models.length,
|
||||
cached,
|
||||
};
|
||||
|
||||
res.json(response);
|
||||
} catch (error) {
|
||||
logError(error, 'Get OpenCode models failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
} as ModelsResponse);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates handler for POST /api/setup/opencode/models/refresh
|
||||
*
|
||||
* Forces a refresh of models from the OpenCode CLI.
|
||||
*/
|
||||
export function createRefreshOpencodeModelsHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const provider = getProvider();
|
||||
const models = await provider.refreshModels();
|
||||
|
||||
const response: ModelsResponse = {
|
||||
success: true,
|
||||
models,
|
||||
count: models.length,
|
||||
cached: false,
|
||||
};
|
||||
|
||||
res.json(response);
|
||||
} catch (error) {
|
||||
logError(error, 'Refresh OpenCode models failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
} as ModelsResponse);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates handler for GET /api/setup/opencode/providers
|
||||
*
|
||||
* Returns authenticated providers from OpenCode CLI.
|
||||
* This calls `opencode auth list` to get provider status.
|
||||
*/
|
||||
export function createGetOpencodeProvidersHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const provider = getProvider();
|
||||
const providers = await provider.fetchAuthenticatedProviders();
|
||||
|
||||
// Filter to only authenticated providers
|
||||
const authenticated = providers.filter((p) => p.authenticated);
|
||||
|
||||
const response: ProvidersResponse = {
|
||||
success: true,
|
||||
providers,
|
||||
authenticated,
|
||||
};
|
||||
|
||||
res.json(response);
|
||||
} catch (error) {
|
||||
logError(error, 'Get OpenCode providers failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
} as ProvidersResponse);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates handler for POST /api/setup/opencode/cache/clear
|
||||
*
|
||||
* Clears the model cache, forcing a fresh fetch on next access.
|
||||
*/
|
||||
export function createClearOpencodeCacheHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const provider = getProvider();
|
||||
provider.clearModelCache();
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'OpenCode model cache cleared',
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Clear OpenCode cache failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -21,22 +21,25 @@ export function createStoreApiKeyHandler() {
|
||||
return;
|
||||
}
|
||||
|
||||
setApiKey(provider, apiKey);
|
||||
|
||||
// Also set as environment variable and persist to .env
|
||||
if (provider === 'anthropic' || provider === 'anthropic_oauth_token') {
|
||||
// Both API key and OAuth token use ANTHROPIC_API_KEY
|
||||
process.env.ANTHROPIC_API_KEY = apiKey;
|
||||
await persistApiKeyToEnv('ANTHROPIC_API_KEY', apiKey);
|
||||
logger.info('[Setup] Stored API key as ANTHROPIC_API_KEY');
|
||||
} else {
|
||||
const providerEnvMap: Record<string, string> = {
|
||||
anthropic: 'ANTHROPIC_API_KEY',
|
||||
anthropic_oauth_token: 'ANTHROPIC_API_KEY',
|
||||
openai: 'OPENAI_API_KEY',
|
||||
};
|
||||
const envKey = providerEnvMap[provider];
|
||||
if (!envKey) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: `Unsupported provider: ${provider}. Only anthropic is supported.`,
|
||||
error: `Unsupported provider: ${provider}. Only anthropic and openai are supported.`,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
setApiKey(provider, apiKey);
|
||||
process.env[envKey] = apiKey;
|
||||
await persistApiKeyToEnv(envKey, apiKey);
|
||||
logger.info(`[Setup] Stored API key as ${envKey}`);
|
||||
|
||||
res.json({ success: true });
|
||||
} catch (error) {
|
||||
logError(error, 'Store API key failed');
|
||||
|
||||
@@ -5,19 +5,12 @@
|
||||
* (AI Suggestions in the UI). Supports both Claude and Cursor models.
|
||||
*/
|
||||
|
||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
||||
import type { EventEmitter } from '../../lib/events.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import {
|
||||
DEFAULT_PHASE_MODELS,
|
||||
isCursorModel,
|
||||
stripProviderPrefix,
|
||||
type ThinkingLevel,
|
||||
} from '@automaker/types';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel, type ThinkingLevel } from '@automaker/types';
|
||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||
import { createSuggestionsOptions } from '../../lib/sdk-options.js';
|
||||
import { extractJsonWithArray } from '../../lib/json-extractor.js';
|
||||
import { ProviderFactory } from '../../providers/provider-factory.js';
|
||||
import { streamingQuery } from '../../providers/simple-query-service.js';
|
||||
import { FeatureLoader } from '../../services/feature-loader.js';
|
||||
import { getAppSpecPath } from '@automaker/platform';
|
||||
import * as secureFs from '../../lib/secure-fs.js';
|
||||
@@ -204,19 +197,14 @@ The response will be automatically formatted as structured JSON.`;
|
||||
logger.info('[Suggestions] Using model:', model);
|
||||
|
||||
let responseText = '';
|
||||
let structuredOutput: { suggestions: Array<Record<string, unknown>> } | null = null;
|
||||
|
||||
// Route to appropriate provider based on model type
|
||||
if (isCursorModel(model)) {
|
||||
// Use Cursor provider for Cursor models
|
||||
logger.info('[Suggestions] Using Cursor provider');
|
||||
// Determine if we should use structured output (Claude supports it, Cursor doesn't)
|
||||
const useStructuredOutput = !isCursorModel(model);
|
||||
|
||||
const provider = ProviderFactory.getProviderForModel(model);
|
||||
// Strip provider prefix - providers expect bare model IDs
|
||||
const bareModel = stripProviderPrefix(model);
|
||||
|
||||
// For Cursor, include the JSON schema in the prompt with clear instructions
|
||||
const cursorPrompt = `${prompt}
|
||||
// Build the final prompt - for Cursor, include JSON schema instructions
|
||||
let finalPrompt = prompt;
|
||||
if (!useStructuredOutput) {
|
||||
finalPrompt = `${prompt}
|
||||
|
||||
CRITICAL INSTRUCTIONS:
|
||||
1. DO NOT write any files. Return the JSON in your response only.
|
||||
@@ -226,104 +214,60 @@ CRITICAL INSTRUCTIONS:
|
||||
${JSON.stringify(suggestionsSchema, null, 2)}
|
||||
|
||||
Your entire response should be valid JSON starting with { and ending with }. No text before or after.`;
|
||||
|
||||
for await (const msg of provider.executeQuery({
|
||||
prompt: cursorPrompt,
|
||||
model: bareModel,
|
||||
cwd: projectPath,
|
||||
maxTurns: 250,
|
||||
allowedTools: ['Read', 'Glob', 'Grep'],
|
||||
abortController,
|
||||
readOnly: true, // Suggestions only reads code, doesn't write
|
||||
})) {
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
responseText += block.text;
|
||||
events.emit('suggestions:event', {
|
||||
type: 'suggestions_progress',
|
||||
content: block.text,
|
||||
});
|
||||
} else if (block.type === 'tool_use') {
|
||||
events.emit('suggestions:event', {
|
||||
type: 'suggestions_tool',
|
||||
tool: block.name,
|
||||
input: block.input,
|
||||
});
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
|
||||
// Use result if it's a final accumulated message (from Cursor provider)
|
||||
logger.info('[Suggestions] Received result from Cursor, length:', msg.result.length);
|
||||
logger.info('[Suggestions] Previous responseText length:', responseText.length);
|
||||
if (msg.result.length > responseText.length) {
|
||||
logger.info('[Suggestions] Using Cursor result (longer than accumulated text)');
|
||||
responseText = msg.result;
|
||||
} else {
|
||||
logger.info('[Suggestions] Keeping accumulated text (longer than Cursor result)');
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Use Claude SDK for Claude models
|
||||
logger.info('[Suggestions] Using Claude SDK');
|
||||
|
||||
const options = createSuggestionsOptions({
|
||||
cwd: projectPath,
|
||||
abortController,
|
||||
autoLoadClaudeMd,
|
||||
model, // Pass the model from settings
|
||||
thinkingLevel, // Pass thinking level for extended thinking
|
||||
outputFormat: {
|
||||
type: 'json_schema',
|
||||
schema: suggestionsSchema,
|
||||
},
|
||||
});
|
||||
|
||||
const stream = query({ prompt, options });
|
||||
|
||||
for await (const msg of stream) {
|
||||
if (msg.type === 'assistant' && msg.message.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text') {
|
||||
responseText += block.text;
|
||||
events.emit('suggestions:event', {
|
||||
type: 'suggestions_progress',
|
||||
content: block.text,
|
||||
});
|
||||
} else if (block.type === 'tool_use') {
|
||||
events.emit('suggestions:event', {
|
||||
type: 'suggestions_tool',
|
||||
tool: block.name,
|
||||
input: block.input,
|
||||
});
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success') {
|
||||
// Check for structured output
|
||||
const resultMsg = msg as any;
|
||||
if (resultMsg.structured_output) {
|
||||
structuredOutput = resultMsg.structured_output as {
|
||||
suggestions: Array<Record<string, unknown>>;
|
||||
};
|
||||
logger.debug('Received structured output:', structuredOutput);
|
||||
}
|
||||
} else if (msg.type === 'result') {
|
||||
const resultMsg = msg as any;
|
||||
if (resultMsg.subtype === 'error_max_structured_output_retries') {
|
||||
logger.error('Failed to produce valid structured output after retries');
|
||||
throw new Error('Could not produce valid suggestions output');
|
||||
} else if (resultMsg.subtype === 'error_max_turns') {
|
||||
logger.error('Hit max turns limit before completing suggestions generation');
|
||||
logger.warn(`Response text length: ${responseText.length} chars`);
|
||||
// Still try to parse what we have
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Use streamingQuery with event callbacks
|
||||
const result = await streamingQuery({
|
||||
prompt: finalPrompt,
|
||||
model,
|
||||
cwd: projectPath,
|
||||
maxTurns: 250,
|
||||
allowedTools: ['Read', 'Glob', 'Grep'],
|
||||
abortController,
|
||||
thinkingLevel,
|
||||
readOnly: true, // Suggestions only reads code, doesn't write
|
||||
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
|
||||
outputFormat: useStructuredOutput
|
||||
? {
|
||||
type: 'json_schema',
|
||||
schema: suggestionsSchema,
|
||||
}
|
||||
: undefined,
|
||||
onText: (text) => {
|
||||
responseText += text;
|
||||
events.emit('suggestions:event', {
|
||||
type: 'suggestions_progress',
|
||||
content: text,
|
||||
});
|
||||
},
|
||||
onToolUse: (tool, input) => {
|
||||
events.emit('suggestions:event', {
|
||||
type: 'suggestions_tool',
|
||||
tool,
|
||||
input,
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
// Use structured output if available, otherwise fall back to parsing text
|
||||
try {
|
||||
let structuredOutput: { suggestions: Array<Record<string, unknown>> } | null = null;
|
||||
|
||||
if (result.structured_output) {
|
||||
structuredOutput = result.structured_output as {
|
||||
suggestions: Array<Record<string, unknown>>;
|
||||
};
|
||||
logger.debug('Received structured output:', structuredOutput);
|
||||
} else if (responseText) {
|
||||
// Fallback: try to parse from text using shared extraction utility
|
||||
logger.warn('No structured output received, attempting to parse from text');
|
||||
structuredOutput = extractJsonWithArray<{ suggestions: Array<Record<string, unknown>> }>(
|
||||
responseText,
|
||||
'suggestions',
|
||||
{ logger }
|
||||
);
|
||||
}
|
||||
|
||||
if (structuredOutput && structuredOutput.suggestions) {
|
||||
// Use structured output directly
|
||||
events.emit('suggestions:event', {
|
||||
@@ -334,24 +278,7 @@ Your entire response should be valid JSON starting with { and ending with }. No
|
||||
})),
|
||||
});
|
||||
} else {
|
||||
// Fallback: try to parse from text using shared extraction utility
|
||||
logger.warn('No structured output received, attempting to parse from text');
|
||||
const parsed = extractJsonWithArray<{ suggestions: Array<Record<string, unknown>> }>(
|
||||
responseText,
|
||||
'suggestions',
|
||||
{ logger }
|
||||
);
|
||||
if (parsed && parsed.suggestions) {
|
||||
events.emit('suggestions:event', {
|
||||
type: 'suggestions_complete',
|
||||
suggestions: parsed.suggestions.map((s: Record<string, unknown>, i: number) => ({
|
||||
...s,
|
||||
id: s.id || `suggestion-${Date.now()}-${i}`,
|
||||
})),
|
||||
});
|
||||
} else {
|
||||
throw new Error('No valid JSON found in response');
|
||||
}
|
||||
throw new Error('No valid JSON found in response');
|
||||
}
|
||||
} catch (error) {
|
||||
// Log the parsing error for debugging
|
||||
|
||||
@@ -3,15 +3,51 @@
|
||||
*/
|
||||
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { spawnProcess } from '@automaker/platform';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import path from 'path';
|
||||
import { getErrorMessage as getErrorMessageShared, createLogError } from '../common.js';
|
||||
import { FeatureLoader } from '../../services/feature-loader.js';
|
||||
|
||||
const logger = createLogger('Worktree');
|
||||
export const execAsync = promisify(exec);
|
||||
const featureLoader = new FeatureLoader();
|
||||
|
||||
// ============================================================================
|
||||
// Secure Command Execution
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Execute git command with array arguments to prevent command injection.
|
||||
* Uses spawnProcess from @automaker/platform for secure, cross-platform execution.
|
||||
*
|
||||
* @param args - Array of git command arguments (e.g., ['worktree', 'add', path])
|
||||
* @param cwd - Working directory to execute the command in
|
||||
* @returns Promise resolving to stdout output
|
||||
* @throws Error with stderr message if command fails
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* // Safe: no injection possible
|
||||
* await execGitCommand(['branch', '-D', branchName], projectPath);
|
||||
*
|
||||
* // Instead of unsafe:
|
||||
* // await execAsync(`git branch -D ${branchName}`, { cwd });
|
||||
* ```
|
||||
*/
|
||||
export async function execGitCommand(args: string[], cwd: string): Promise<string> {
|
||||
const result = await spawnProcess({
|
||||
command: 'git',
|
||||
args,
|
||||
cwd,
|
||||
});
|
||||
|
||||
// spawnProcess returns { stdout, stderr, exitCode }
|
||||
if (result.exitCode === 0) {
|
||||
return result.stdout;
|
||||
} else {
|
||||
const errorMessage = result.stderr || `Git command failed with code ${result.exitCode}`;
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Constants
|
||||
@@ -99,18 +135,6 @@ export function normalizePath(p: string): string {
|
||||
return p.replace(/\\/g, '/');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a path is a git repo
|
||||
*/
|
||||
export async function isGitRepo(repoPath: string): Promise<boolean> {
|
||||
try {
|
||||
await execAsync('git rev-parse --is-inside-work-tree', { cwd: repoPath });
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a git repository has at least one commit (i.e., HEAD exists)
|
||||
* Returns false for freshly initialized repos with no commits
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
*/
|
||||
|
||||
import { Router } from 'express';
|
||||
import type { EventEmitter } from '../../lib/events.js';
|
||||
import { validatePathParams } from '../../middleware/validate-paths.js';
|
||||
import { requireValidWorktree, requireValidProject, requireGitRepoOnly } from './middleware.js';
|
||||
import { createInfoHandler } from './routes/info.js';
|
||||
@@ -16,6 +17,7 @@ import { createDeleteHandler } from './routes/delete.js';
|
||||
import { createCreatePRHandler } from './routes/create-pr.js';
|
||||
import { createPRInfoHandler } from './routes/pr-info.js';
|
||||
import { createCommitHandler } from './routes/commit.js';
|
||||
import { createGenerateCommitMessageHandler } from './routes/generate-commit-message.js';
|
||||
import { createPushHandler } from './routes/push.js';
|
||||
import { createPullHandler } from './routes/pull.js';
|
||||
import { createCheckoutBranchHandler } from './routes/checkout-branch.js';
|
||||
@@ -24,14 +26,27 @@ import { createSwitchBranchHandler } from './routes/switch-branch.js';
|
||||
import {
|
||||
createOpenInEditorHandler,
|
||||
createGetDefaultEditorHandler,
|
||||
createGetAvailableEditorsHandler,
|
||||
createRefreshEditorsHandler,
|
||||
} from './routes/open-in-editor.js';
|
||||
import { createInitGitHandler } from './routes/init-git.js';
|
||||
import { createMigrateHandler } from './routes/migrate.js';
|
||||
import { createStartDevHandler } from './routes/start-dev.js';
|
||||
import { createStopDevHandler } from './routes/stop-dev.js';
|
||||
import { createListDevServersHandler } from './routes/list-dev-servers.js';
|
||||
import { createGetDevServerLogsHandler } from './routes/dev-server-logs.js';
|
||||
import {
|
||||
createGetInitScriptHandler,
|
||||
createPutInitScriptHandler,
|
||||
createDeleteInitScriptHandler,
|
||||
createRunInitScriptHandler,
|
||||
} from './routes/init-script.js';
|
||||
import type { SettingsService } from '../../services/settings-service.js';
|
||||
|
||||
export function createWorktreeRoutes(): Router {
|
||||
export function createWorktreeRoutes(
|
||||
events: EventEmitter,
|
||||
settingsService?: SettingsService
|
||||
): Router {
|
||||
const router = Router();
|
||||
|
||||
router.post('/info', validatePathParams('projectPath'), createInfoHandler());
|
||||
@@ -45,7 +60,7 @@ export function createWorktreeRoutes(): Router {
|
||||
requireValidProject,
|
||||
createMergeHandler()
|
||||
);
|
||||
router.post('/create', validatePathParams('projectPath'), createCreateHandler());
|
||||
router.post('/create', validatePathParams('projectPath'), createCreateHandler(events));
|
||||
router.post('/delete', validatePathParams('projectPath', 'worktreePath'), createDeleteHandler());
|
||||
router.post('/create-pr', createCreatePRHandler());
|
||||
router.post('/pr-info', createPRInfoHandler());
|
||||
@@ -55,6 +70,12 @@ export function createWorktreeRoutes(): Router {
|
||||
requireGitRepoOnly,
|
||||
createCommitHandler()
|
||||
);
|
||||
router.post(
|
||||
'/generate-commit-message',
|
||||
validatePathParams('worktreePath'),
|
||||
requireGitRepoOnly,
|
||||
createGenerateCommitMessageHandler(settingsService)
|
||||
);
|
||||
router.post(
|
||||
'/push',
|
||||
validatePathParams('worktreePath'),
|
||||
@@ -77,6 +98,8 @@ export function createWorktreeRoutes(): Router {
|
||||
router.post('/switch-branch', requireValidWorktree, createSwitchBranchHandler());
|
||||
router.post('/open-in-editor', validatePathParams('worktreePath'), createOpenInEditorHandler());
|
||||
router.get('/default-editor', createGetDefaultEditorHandler());
|
||||
router.get('/available-editors', createGetAvailableEditorsHandler());
|
||||
router.post('/refresh-editors', createRefreshEditorsHandler());
|
||||
router.post('/init-git', validatePathParams('projectPath'), createInitGitHandler());
|
||||
router.post('/migrate', createMigrateHandler());
|
||||
router.post(
|
||||
@@ -86,6 +109,21 @@ export function createWorktreeRoutes(): Router {
|
||||
);
|
||||
router.post('/stop-dev', createStopDevHandler());
|
||||
router.post('/list-dev-servers', createListDevServersHandler());
|
||||
router.get(
|
||||
'/dev-server-logs',
|
||||
validatePathParams('worktreePath'),
|
||||
createGetDevServerLogsHandler()
|
||||
);
|
||||
|
||||
// Init script routes
|
||||
router.get('/init-script', createGetInitScriptHandler());
|
||||
router.put('/init-script', validatePathParams('projectPath'), createPutInitScriptHandler());
|
||||
router.delete('/init-script', validatePathParams('projectPath'), createDeleteInitScriptHandler());
|
||||
router.post(
|
||||
'/run-init-script',
|
||||
validatePathParams('projectPath', 'worktreePath'),
|
||||
createRunInitScriptHandler(events)
|
||||
);
|
||||
|
||||
return router;
|
||||
}
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
*/
|
||||
|
||||
import type { Request, Response, NextFunction } from 'express';
|
||||
import { isGitRepo, hasCommits } from './common.js';
|
||||
import { isGitRepo } from '@automaker/git-utils';
|
||||
import { hasCommits } from './common.js';
|
||||
|
||||
interface ValidationOptions {
|
||||
/** Check if the path is a git repository (default: true) */
|
||||
|
||||
@@ -70,9 +70,8 @@ export function createCreatePRHandler() {
|
||||
logger.debug(`Changed files:\n${status}`);
|
||||
}
|
||||
|
||||
// If there are changes, commit them
|
||||
// If there are changes, commit them before creating the PR
|
||||
let commitHash: string | null = null;
|
||||
let commitError: string | null = null;
|
||||
if (hasChanges) {
|
||||
const message = commitMessage || `Changes from ${branchName}`;
|
||||
logger.debug(`Committing changes with message: ${message}`);
|
||||
@@ -98,14 +97,13 @@ export function createCreatePRHandler() {
|
||||
logger.info(`Commit successful: ${commitHash}`);
|
||||
} catch (commitErr: unknown) {
|
||||
const err = commitErr as { stderr?: string; message?: string };
|
||||
commitError = err.stderr || err.message || 'Commit failed';
|
||||
const commitError = err.stderr || err.message || 'Commit failed';
|
||||
logger.error(`Commit failed: ${commitError}`);
|
||||
|
||||
// Return error immediately - don't proceed with push/PR if commit fails
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: `Failed to commit changes: ${commitError}`,
|
||||
commitError,
|
||||
});
|
||||
return;
|
||||
}
|
||||
@@ -381,9 +379,8 @@ export function createCreatePRHandler() {
|
||||
success: true,
|
||||
result: {
|
||||
branch: branchName,
|
||||
committed: hasChanges && !commitError,
|
||||
committed: hasChanges,
|
||||
commitHash,
|
||||
commitError: commitError || undefined,
|
||||
pushed: true,
|
||||
prUrl,
|
||||
prNumber,
|
||||
|
||||
@@ -12,15 +12,19 @@ import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import path from 'path';
|
||||
import * as secureFs from '../../../lib/secure-fs.js';
|
||||
import type { EventEmitter } from '../../../lib/events.js';
|
||||
import { isGitRepo } from '@automaker/git-utils';
|
||||
import {
|
||||
isGitRepo,
|
||||
getErrorMessage,
|
||||
logError,
|
||||
normalizePath,
|
||||
ensureInitialCommit,
|
||||
isValidBranchName,
|
||||
execGitCommand,
|
||||
} from '../common.js';
|
||||
import { trackBranch } from './branch-tracking.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { runInitScript } from '../../../services/init-script-service.js';
|
||||
|
||||
const logger = createLogger('Worktree');
|
||||
|
||||
@@ -77,7 +81,7 @@ async function findExistingWorktreeForBranch(
|
||||
}
|
||||
}
|
||||
|
||||
export function createCreateHandler() {
|
||||
export function createCreateHandler(events: EventEmitter) {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, branchName, baseBranch } = req.body as {
|
||||
@@ -94,6 +98,26 @@ export function createCreateHandler() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Validate branch name to prevent command injection
|
||||
if (!isValidBranchName(branchName)) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error:
|
||||
'Invalid branch name. Branch names must contain only letters, numbers, dots, hyphens, underscores, and forward slashes.',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Validate base branch if provided
|
||||
if (baseBranch && !isValidBranchName(baseBranch) && baseBranch !== 'HEAD') {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error:
|
||||
'Invalid base branch name. Branch names must contain only letters, numbers, dots, hyphens, underscores, and forward slashes.',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(await isGitRepo(projectPath))) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
@@ -143,30 +167,28 @@ export function createCreateHandler() {
|
||||
// Create worktrees directory if it doesn't exist
|
||||
await secureFs.mkdir(worktreesDir, { recursive: true });
|
||||
|
||||
// Check if branch exists
|
||||
// Check if branch exists (using array arguments to prevent injection)
|
||||
let branchExists = false;
|
||||
try {
|
||||
await execAsync(`git rev-parse --verify ${branchName}`, {
|
||||
cwd: projectPath,
|
||||
});
|
||||
await execGitCommand(['rev-parse', '--verify', branchName], projectPath);
|
||||
branchExists = true;
|
||||
} catch {
|
||||
// Branch doesn't exist
|
||||
}
|
||||
|
||||
// Create worktree
|
||||
let createCmd: string;
|
||||
// Create worktree (using array arguments to prevent injection)
|
||||
if (branchExists) {
|
||||
// Use existing branch
|
||||
createCmd = `git worktree add "${worktreePath}" ${branchName}`;
|
||||
await execGitCommand(['worktree', 'add', worktreePath, branchName], projectPath);
|
||||
} else {
|
||||
// Create new branch from base or HEAD
|
||||
const base = baseBranch || 'HEAD';
|
||||
createCmd = `git worktree add -b ${branchName} "${worktreePath}" ${base}`;
|
||||
await execGitCommand(
|
||||
['worktree', 'add', '-b', branchName, worktreePath, base],
|
||||
projectPath
|
||||
);
|
||||
}
|
||||
|
||||
await execAsync(createCmd, { cwd: projectPath });
|
||||
|
||||
// Note: We intentionally do NOT symlink .automaker to worktrees
|
||||
// Features and config are always accessed from the main project path
|
||||
// This avoids symlink loop issues when activating worktrees
|
||||
@@ -177,6 +199,8 @@ export function createCreateHandler() {
|
||||
// Resolve to absolute path for cross-platform compatibility
|
||||
// normalizePath converts to forward slashes for API consistency
|
||||
const absoluteWorktreePath = path.resolve(worktreePath);
|
||||
|
||||
// Respond immediately (non-blocking)
|
||||
res.json({
|
||||
success: true,
|
||||
worktree: {
|
||||
@@ -185,6 +209,17 @@ export function createCreateHandler() {
|
||||
isNew: !branchExists,
|
||||
},
|
||||
});
|
||||
|
||||
// Trigger init script asynchronously after response
|
||||
// runInitScript internally checks if script exists and hasn't already run
|
||||
runInitScript({
|
||||
projectPath,
|
||||
worktreePath: absoluteWorktreePath,
|
||||
branch: branchName,
|
||||
emitter: events,
|
||||
}).catch((err) => {
|
||||
logger.error(`Init script failed for ${branchName}:`, err);
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Create worktree failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
|
||||
@@ -6,9 +6,11 @@ import type { Request, Response } from 'express';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import { isGitRepo } from '@automaker/git-utils';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
import { getErrorMessage, logError, isValidBranchName, execGitCommand } from '../common.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
const logger = createLogger('Worktree');
|
||||
|
||||
export function createDeleteHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
@@ -46,22 +48,28 @@ export function createDeleteHandler() {
|
||||
// Could not get branch name
|
||||
}
|
||||
|
||||
// Remove the worktree
|
||||
// Remove the worktree (using array arguments to prevent injection)
|
||||
try {
|
||||
await execAsync(`git worktree remove "${worktreePath}" --force`, {
|
||||
cwd: projectPath,
|
||||
});
|
||||
await execGitCommand(['worktree', 'remove', worktreePath, '--force'], projectPath);
|
||||
} catch (error) {
|
||||
// Try with prune if remove fails
|
||||
await execAsync('git worktree prune', { cwd: projectPath });
|
||||
await execGitCommand(['worktree', 'prune'], projectPath);
|
||||
}
|
||||
|
||||
// Optionally delete the branch
|
||||
let branchDeleted = false;
|
||||
if (deleteBranch && branchName && branchName !== 'main' && branchName !== 'master') {
|
||||
try {
|
||||
await execAsync(`git branch -D ${branchName}`, { cwd: projectPath });
|
||||
} catch {
|
||||
// Branch deletion failed, not critical
|
||||
// Validate branch name to prevent command injection
|
||||
if (!isValidBranchName(branchName)) {
|
||||
logger.warn(`Invalid branch name detected, skipping deletion: ${branchName}`);
|
||||
} else {
|
||||
try {
|
||||
await execGitCommand(['branch', '-D', branchName], projectPath);
|
||||
branchDeleted = true;
|
||||
} catch {
|
||||
// Branch deletion failed, not critical
|
||||
logger.warn(`Failed to delete branch: ${branchName}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,7 +77,8 @@ export function createDeleteHandler() {
|
||||
success: true,
|
||||
deleted: {
|
||||
worktreePath,
|
||||
branch: deleteBranch ? branchName : null,
|
||||
branch: branchDeleted ? branchName : null,
|
||||
branchDeleted,
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
|
||||
52
apps/server/src/routes/worktree/routes/dev-server-logs.ts
Normal file
52
apps/server/src/routes/worktree/routes/dev-server-logs.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
/**
|
||||
* GET /dev-server-logs endpoint - Get buffered logs for a worktree's dev server
|
||||
*
|
||||
* Returns the scrollback buffer containing historical log output for a running
|
||||
* dev server. Used by clients to populate the log panel on initial connection
|
||||
* before subscribing to real-time updates via WebSocket.
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { getDevServerService } from '../../../services/dev-server-service.js';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
|
||||
export function createGetDevServerLogsHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { worktreePath } = req.query as {
|
||||
worktreePath?: string;
|
||||
};
|
||||
|
||||
if (!worktreePath) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'worktreePath query parameter is required',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const devServerService = getDevServerService();
|
||||
const result = devServerService.getServerLogs(worktreePath);
|
||||
|
||||
if (result.success && result.result) {
|
||||
res.json({
|
||||
success: true,
|
||||
result: {
|
||||
worktreePath: result.result.worktreePath,
|
||||
port: result.result.port,
|
||||
logs: result.result.logs,
|
||||
startedAt: result.result.startedAt,
|
||||
},
|
||||
});
|
||||
} else {
|
||||
res.status(404).json({
|
||||
success: false,
|
||||
error: result.error || 'Failed to get dev server logs',
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error, 'Get dev server logs failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,275 @@
|
||||
/**
|
||||
* POST /worktree/generate-commit-message endpoint - Generate an AI commit message from git diff
|
||||
*
|
||||
* Uses the configured model (via phaseModels.commitMessageModel) to generate a concise,
|
||||
* conventional commit message from git changes. Defaults to Claude Haiku for speed.
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import { existsSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
|
||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||
import { mergeCommitMessagePrompts } from '@automaker/prompts';
|
||||
import { ProviderFactory } from '../../../providers/provider-factory.js';
|
||||
import type { SettingsService } from '../../../services/settings-service.js';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
|
||||
const logger = createLogger('GenerateCommitMessage');
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
/** Timeout for AI provider calls in milliseconds (30 seconds) */
|
||||
const AI_TIMEOUT_MS = 30_000;
|
||||
|
||||
/**
|
||||
* Wraps an async generator with a timeout.
|
||||
* If the generator takes longer than the timeout, it throws an error.
|
||||
*/
|
||||
async function* withTimeout<T>(
|
||||
generator: AsyncIterable<T>,
|
||||
timeoutMs: number
|
||||
): AsyncGenerator<T, void, unknown> {
|
||||
const timeoutPromise = new Promise<never>((_, reject) => {
|
||||
setTimeout(() => reject(new Error(`AI provider timed out after ${timeoutMs}ms`)), timeoutMs);
|
||||
});
|
||||
|
||||
const iterator = generator[Symbol.asyncIterator]();
|
||||
let done = false;
|
||||
|
||||
while (!done) {
|
||||
const result = await Promise.race([iterator.next(), timeoutPromise]);
|
||||
if (result.done) {
|
||||
done = true;
|
||||
} else {
|
||||
yield result.value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the effective system prompt for commit message generation.
|
||||
* Uses custom prompt from settings if enabled, otherwise falls back to default.
|
||||
*/
|
||||
async function getSystemPrompt(settingsService?: SettingsService): Promise<string> {
|
||||
const settings = await settingsService?.getGlobalSettings();
|
||||
const prompts = mergeCommitMessagePrompts(settings?.promptCustomization?.commitMessage);
|
||||
return prompts.systemPrompt;
|
||||
}
|
||||
|
||||
interface GenerateCommitMessageRequestBody {
|
||||
worktreePath: string;
|
||||
}
|
||||
|
||||
interface GenerateCommitMessageSuccessResponse {
|
||||
success: true;
|
||||
message: string;
|
||||
}
|
||||
|
||||
interface GenerateCommitMessageErrorResponse {
|
||||
success: false;
|
||||
error: string;
|
||||
}
|
||||
|
||||
async function extractTextFromStream(
|
||||
stream: AsyncIterable<{
|
||||
type: string;
|
||||
subtype?: string;
|
||||
result?: string;
|
||||
message?: {
|
||||
content?: Array<{ type: string; text?: string }>;
|
||||
};
|
||||
}>
|
||||
): Promise<string> {
|
||||
let responseText = '';
|
||||
|
||||
for await (const msg of stream) {
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
responseText += block.text;
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success') {
|
||||
responseText = msg.result || responseText;
|
||||
}
|
||||
}
|
||||
|
||||
return responseText;
|
||||
}
|
||||
|
||||
export function createGenerateCommitMessageHandler(
|
||||
settingsService?: SettingsService
|
||||
): (req: Request, res: Response) => Promise<void> {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { worktreePath } = req.body as GenerateCommitMessageRequestBody;
|
||||
|
||||
if (!worktreePath || typeof worktreePath !== 'string') {
|
||||
const response: GenerateCommitMessageErrorResponse = {
|
||||
success: false,
|
||||
error: 'worktreePath is required and must be a string',
|
||||
};
|
||||
res.status(400).json(response);
|
||||
return;
|
||||
}
|
||||
|
||||
// Validate that the directory exists
|
||||
if (!existsSync(worktreePath)) {
|
||||
const response: GenerateCommitMessageErrorResponse = {
|
||||
success: false,
|
||||
error: 'worktreePath does not exist',
|
||||
};
|
||||
res.status(400).json(response);
|
||||
return;
|
||||
}
|
||||
|
||||
// Validate that it's a git repository (check for .git folder or file for worktrees)
|
||||
const gitPath = join(worktreePath, '.git');
|
||||
if (!existsSync(gitPath)) {
|
||||
const response: GenerateCommitMessageErrorResponse = {
|
||||
success: false,
|
||||
error: 'worktreePath is not a git repository',
|
||||
};
|
||||
res.status(400).json(response);
|
||||
return;
|
||||
}
|
||||
|
||||
logger.info(`Generating commit message for worktree: ${worktreePath}`);
|
||||
|
||||
// Get git diff of staged and unstaged changes
|
||||
let diff = '';
|
||||
try {
|
||||
// First try to get staged changes
|
||||
const { stdout: stagedDiff } = await execAsync('git diff --cached', {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 1024 * 1024 * 5, // 5MB buffer
|
||||
});
|
||||
|
||||
// If no staged changes, get unstaged changes
|
||||
if (!stagedDiff.trim()) {
|
||||
const { stdout: unstagedDiff } = await execAsync('git diff', {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 1024 * 1024 * 5, // 5MB buffer
|
||||
});
|
||||
diff = unstagedDiff;
|
||||
} else {
|
||||
diff = stagedDiff;
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to get git diff:', error);
|
||||
const response: GenerateCommitMessageErrorResponse = {
|
||||
success: false,
|
||||
error: 'Failed to get git changes',
|
||||
};
|
||||
res.status(500).json(response);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!diff.trim()) {
|
||||
const response: GenerateCommitMessageErrorResponse = {
|
||||
success: false,
|
||||
error: 'No changes to commit',
|
||||
};
|
||||
res.status(400).json(response);
|
||||
return;
|
||||
}
|
||||
|
||||
// Truncate diff if too long (keep first 10000 characters to avoid token limits)
|
||||
const truncatedDiff =
|
||||
diff.length > 10000 ? diff.substring(0, 10000) + '\n\n[... diff truncated ...]' : diff;
|
||||
|
||||
const userPrompt = `Generate a commit message for these changes:\n\n\`\`\`diff\n${truncatedDiff}\n\`\`\``;
|
||||
|
||||
// Get model from phase settings
|
||||
const settings = await settingsService?.getGlobalSettings();
|
||||
const phaseModelEntry =
|
||||
settings?.phaseModels?.commitMessageModel || DEFAULT_PHASE_MODELS.commitMessageModel;
|
||||
const { model } = resolvePhaseModel(phaseModelEntry);
|
||||
|
||||
logger.info(`Using model for commit message: ${model}`);
|
||||
|
||||
// Get the effective system prompt (custom or default)
|
||||
const systemPrompt = await getSystemPrompt(settingsService);
|
||||
|
||||
let message: string;
|
||||
|
||||
// Route to appropriate provider based on model type
|
||||
if (isCursorModel(model)) {
|
||||
// Use Cursor provider for Cursor models
|
||||
logger.info(`Using Cursor provider for model: ${model}`);
|
||||
|
||||
const provider = ProviderFactory.getProviderForModel(model);
|
||||
const bareModel = stripProviderPrefix(model);
|
||||
|
||||
const cursorPrompt = `${systemPrompt}\n\n${userPrompt}`;
|
||||
|
||||
let responseText = '';
|
||||
const cursorStream = provider.executeQuery({
|
||||
prompt: cursorPrompt,
|
||||
model: bareModel,
|
||||
cwd: worktreePath,
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
readOnly: true,
|
||||
});
|
||||
|
||||
// Wrap with timeout to prevent indefinite hangs
|
||||
for await (const msg of withTimeout(cursorStream, AI_TIMEOUT_MS)) {
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
responseText += block.text;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
message = responseText.trim();
|
||||
} else {
|
||||
// Use Claude SDK for Claude models
|
||||
const stream = query({
|
||||
prompt: userPrompt,
|
||||
options: {
|
||||
model,
|
||||
systemPrompt,
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
permissionMode: 'default',
|
||||
},
|
||||
});
|
||||
|
||||
// Wrap with timeout to prevent indefinite hangs
|
||||
message = await extractTextFromStream(withTimeout(stream, AI_TIMEOUT_MS));
|
||||
}
|
||||
|
||||
if (!message || message.trim().length === 0) {
|
||||
logger.warn('Received empty response from model');
|
||||
const response: GenerateCommitMessageErrorResponse = {
|
||||
success: false,
|
||||
error: 'Failed to generate commit message - empty response',
|
||||
};
|
||||
res.status(500).json(response);
|
||||
return;
|
||||
}
|
||||
|
||||
logger.info(`Generated commit message: ${message.trim().substring(0, 100)}...`);
|
||||
|
||||
const response: GenerateCommitMessageSuccessResponse = {
|
||||
success: true,
|
||||
message: message.trim(),
|
||||
};
|
||||
res.json(response);
|
||||
} catch (error) {
|
||||
logError(error, 'Generate commit message failed');
|
||||
const response: GenerateCommitMessageErrorResponse = {
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
};
|
||||
res.status(500).json(response);
|
||||
}
|
||||
};
|
||||
}
|
||||
280
apps/server/src/routes/worktree/routes/init-script.ts
Normal file
280
apps/server/src/routes/worktree/routes/init-script.ts
Normal file
@@ -0,0 +1,280 @@
|
||||
/**
|
||||
* Init Script routes - Read/write/run the worktree-init.sh file
|
||||
*
|
||||
* POST /init-script - Read the init script content
|
||||
* PUT /init-script - Write content to the init script file
|
||||
* DELETE /init-script - Delete the init script file
|
||||
* POST /run-init-script - Run the init script for a worktree
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import path from 'path';
|
||||
import * as secureFs from '../../../lib/secure-fs.js';
|
||||
import { getErrorMessage, logError, isValidBranchName } from '../common.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import type { EventEmitter } from '../../../lib/events.js';
|
||||
import { forceRunInitScript } from '../../../services/init-script-service.js';
|
||||
|
||||
const logger = createLogger('InitScript');
|
||||
|
||||
/** Fixed path for init script within .automaker directory */
|
||||
const INIT_SCRIPT_FILENAME = 'worktree-init.sh';
|
||||
|
||||
/** Maximum allowed size for init scripts (1MB) */
|
||||
const MAX_SCRIPT_SIZE_BYTES = 1024 * 1024;
|
||||
|
||||
/**
|
||||
* Get the full path to the init script for a project
|
||||
*/
|
||||
function getInitScriptPath(projectPath: string): string {
|
||||
return path.join(projectPath, '.automaker', INIT_SCRIPT_FILENAME);
|
||||
}
|
||||
|
||||
/**
|
||||
* GET /init-script - Read the init script content
|
||||
*/
|
||||
export function createGetInitScriptHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const rawProjectPath = req.query.projectPath;
|
||||
|
||||
// Validate projectPath is a non-empty string (not an array or undefined)
|
||||
if (!rawProjectPath || typeof rawProjectPath !== 'string') {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'projectPath query parameter is required',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const projectPath = rawProjectPath.trim();
|
||||
if (!projectPath) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'projectPath cannot be empty',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const scriptPath = getInitScriptPath(projectPath);
|
||||
|
||||
try {
|
||||
const content = await secureFs.readFile(scriptPath, 'utf-8');
|
||||
res.json({
|
||||
success: true,
|
||||
exists: true,
|
||||
content: content as string,
|
||||
path: scriptPath,
|
||||
});
|
||||
} catch {
|
||||
// File doesn't exist
|
||||
res.json({
|
||||
success: true,
|
||||
exists: false,
|
||||
content: '',
|
||||
path: scriptPath,
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error, 'Read init script failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* PUT /init-script - Write content to the init script file
|
||||
*/
|
||||
export function createPutInitScriptHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, content } = req.body as {
|
||||
projectPath: string;
|
||||
content: string;
|
||||
};
|
||||
|
||||
if (!projectPath) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'projectPath is required',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (typeof content !== 'string') {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'content must be a string',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Validate script size to prevent disk exhaustion
|
||||
const sizeBytes = Buffer.byteLength(content, 'utf-8');
|
||||
if (sizeBytes > MAX_SCRIPT_SIZE_BYTES) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: `Script size (${Math.round(sizeBytes / 1024)}KB) exceeds maximum allowed size (${Math.round(MAX_SCRIPT_SIZE_BYTES / 1024)}KB)`,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Log warning if potentially dangerous patterns are detected (non-blocking)
|
||||
const dangerousPatterns = [
|
||||
/rm\s+-rf\s+\/(?!\s*\$)/i, // rm -rf / (not followed by variable)
|
||||
/curl\s+.*\|\s*(?:bash|sh)/i, // curl | bash
|
||||
/wget\s+.*\|\s*(?:bash|sh)/i, // wget | sh
|
||||
];
|
||||
|
||||
for (const pattern of dangerousPatterns) {
|
||||
if (pattern.test(content)) {
|
||||
logger.warn(
|
||||
`Init script contains potentially dangerous pattern: ${pattern.source}. User responsibility to verify script safety.`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const scriptPath = getInitScriptPath(projectPath);
|
||||
const automakerDir = path.dirname(scriptPath);
|
||||
|
||||
// Ensure .automaker directory exists
|
||||
await secureFs.mkdir(automakerDir, { recursive: true });
|
||||
|
||||
// Write the script content
|
||||
await secureFs.writeFile(scriptPath, content, 'utf-8');
|
||||
|
||||
logger.info(`Wrote init script to ${scriptPath}`);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
path: scriptPath,
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Write init script failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* DELETE /init-script - Delete the init script file
|
||||
*/
|
||||
export function createDeleteInitScriptHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath } = req.body as { projectPath: string };
|
||||
|
||||
if (!projectPath) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'projectPath is required',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const scriptPath = getInitScriptPath(projectPath);
|
||||
|
||||
await secureFs.rm(scriptPath, { force: true });
|
||||
logger.info(`Deleted init script at ${scriptPath}`);
|
||||
res.json({
|
||||
success: true,
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Delete init script failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* POST /run-init-script - Run (or re-run) the init script for a worktree
|
||||
*/
|
||||
export function createRunInitScriptHandler(events: EventEmitter) {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, worktreePath, branch } = req.body as {
|
||||
projectPath: string;
|
||||
worktreePath: string;
|
||||
branch: string;
|
||||
};
|
||||
|
||||
if (!projectPath) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'projectPath is required',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (!worktreePath) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'worktreePath is required',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (!branch) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'branch is required',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Validate branch name to prevent injection via environment variables
|
||||
if (!isValidBranchName(branch)) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error:
|
||||
'Invalid branch name. Branch names must contain only letters, numbers, dots, hyphens, underscores, and forward slashes.',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const scriptPath = getInitScriptPath(projectPath);
|
||||
|
||||
// Check if script exists
|
||||
try {
|
||||
await secureFs.access(scriptPath);
|
||||
} catch {
|
||||
res.status(404).json({
|
||||
success: false,
|
||||
error: 'No init script found. Create one in Settings > Worktrees.',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
logger.info(`Running init script for branch "${branch}" (forced)`);
|
||||
|
||||
// Run the script asynchronously (non-blocking)
|
||||
forceRunInitScript({
|
||||
projectPath,
|
||||
worktreePath,
|
||||
branch,
|
||||
emitter: events,
|
||||
});
|
||||
|
||||
// Return immediately - progress will be streamed via WebSocket events
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'Init script started',
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Run init script failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/**
|
||||
* POST /list-branches endpoint - List all local branches
|
||||
* POST /list-branches endpoint - List all local branches and optionally remote branches
|
||||
*
|
||||
* Note: Git repository validation (isGitRepo, hasCommits) is handled by
|
||||
* the requireValidWorktree middleware in index.ts
|
||||
@@ -21,8 +21,9 @@ interface BranchInfo {
|
||||
export function createListBranchesHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { worktreePath } = req.body as {
|
||||
const { worktreePath, includeRemote = false } = req.body as {
|
||||
worktreePath: string;
|
||||
includeRemote?: boolean;
|
||||
};
|
||||
|
||||
if (!worktreePath) {
|
||||
@@ -60,6 +61,55 @@ export function createListBranchesHandler() {
|
||||
};
|
||||
});
|
||||
|
||||
// Fetch remote branches if requested
|
||||
if (includeRemote) {
|
||||
try {
|
||||
// Fetch latest remote refs (silently, don't fail if offline)
|
||||
try {
|
||||
await execAsync('git fetch --all --quiet', {
|
||||
cwd: worktreePath,
|
||||
timeout: 10000, // 10 second timeout
|
||||
});
|
||||
} catch {
|
||||
// Ignore fetch errors - we'll use cached remote refs
|
||||
}
|
||||
|
||||
// List remote branches
|
||||
const { stdout: remoteBranchesOutput } = await execAsync(
|
||||
'git branch -r --format="%(refname:short)"',
|
||||
{ cwd: worktreePath }
|
||||
);
|
||||
|
||||
const localBranchNames = new Set(branches.map((b) => b.name));
|
||||
|
||||
remoteBranchesOutput
|
||||
.trim()
|
||||
.split('\n')
|
||||
.filter((b) => b.trim())
|
||||
.forEach((name) => {
|
||||
// Remove any surrounding quotes
|
||||
const cleanName = name.trim().replace(/^['"]|['"]$/g, '');
|
||||
// Skip HEAD pointers like "origin/HEAD"
|
||||
if (cleanName.includes('/HEAD')) return;
|
||||
|
||||
// Only add remote branches if a branch with the exact same name isn't already
|
||||
// in the list. This avoids duplicates if a local branch is named like a remote one.
|
||||
// Note: We intentionally include remote branches even when a local branch with the
|
||||
// same base name exists (e.g., show "origin/main" even if local "main" exists),
|
||||
// since users need to select remote branches as PR base targets.
|
||||
if (!localBranchNames.has(cleanName)) {
|
||||
branches.push({
|
||||
name: cleanName, // Keep full name like "origin/main"
|
||||
isCurrent: false,
|
||||
isRemote: true,
|
||||
});
|
||||
}
|
||||
});
|
||||
} catch {
|
||||
// Ignore errors fetching remote branches - return local branches only
|
||||
}
|
||||
}
|
||||
|
||||
// Get ahead/behind count for current branch
|
||||
let aheadCount = 0;
|
||||
let behindCount = 0;
|
||||
|
||||
@@ -2,18 +2,23 @@
|
||||
* POST /list endpoint - List all git worktrees
|
||||
*
|
||||
* Returns actual git worktrees from `git worktree list`.
|
||||
* Also scans .worktrees/ directory to discover worktrees that may have been
|
||||
* created externally or whose git state was corrupted.
|
||||
* Does NOT include tracked branches - only real worktrees with separate directories.
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import path from 'path';
|
||||
import * as secureFs from '../../../lib/secure-fs.js';
|
||||
import { isGitRepo } from '@automaker/git-utils';
|
||||
import { getErrorMessage, logError, normalizePath } from '../common.js';
|
||||
import { getErrorMessage, logError, normalizePath, execEnv, isGhCliAvailable } from '../common.js';
|
||||
import { readAllWorktreeMetadata, type WorktreePRInfo } from '../../../lib/worktree-metadata.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
const logger = createLogger('Worktree');
|
||||
|
||||
interface WorktreeInfo {
|
||||
path: string;
|
||||
@@ -35,6 +40,133 @@ async function getCurrentBranch(cwd: string): Promise<string> {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Scan the .worktrees directory to discover worktrees that may exist on disk
|
||||
* but are not registered with git (e.g., created externally or corrupted state).
|
||||
*/
|
||||
async function scanWorktreesDirectory(
|
||||
projectPath: string,
|
||||
knownWorktreePaths: Set<string>
|
||||
): Promise<Array<{ path: string; branch: string }>> {
|
||||
const discovered: Array<{ path: string; branch: string }> = [];
|
||||
const worktreesDir = path.join(projectPath, '.worktrees');
|
||||
|
||||
try {
|
||||
// Check if .worktrees directory exists
|
||||
await secureFs.access(worktreesDir);
|
||||
} catch {
|
||||
// .worktrees directory doesn't exist
|
||||
return discovered;
|
||||
}
|
||||
|
||||
try {
|
||||
const entries = await secureFs.readdir(worktreesDir, { withFileTypes: true });
|
||||
|
||||
for (const entry of entries) {
|
||||
if (!entry.isDirectory()) continue;
|
||||
|
||||
const worktreePath = path.join(worktreesDir, entry.name);
|
||||
const normalizedPath = normalizePath(worktreePath);
|
||||
|
||||
// Skip if already known from git worktree list
|
||||
if (knownWorktreePaths.has(normalizedPath)) continue;
|
||||
|
||||
// Check if this is a valid git repository
|
||||
const gitPath = path.join(worktreePath, '.git');
|
||||
try {
|
||||
const gitStat = await secureFs.stat(gitPath);
|
||||
|
||||
// Git worktrees have a .git FILE (not directory) that points to the parent repo
|
||||
// Regular repos have a .git DIRECTORY
|
||||
if (gitStat.isFile() || gitStat.isDirectory()) {
|
||||
// Try to get the branch name
|
||||
const branch = await getCurrentBranch(worktreePath);
|
||||
if (branch) {
|
||||
logger.info(
|
||||
`Discovered worktree in .worktrees/ not in git worktree list: ${entry.name} (branch: ${branch})`
|
||||
);
|
||||
discovered.push({
|
||||
path: normalizedPath,
|
||||
branch,
|
||||
});
|
||||
} else {
|
||||
// Try to get branch from HEAD if branch --show-current fails (detached HEAD)
|
||||
try {
|
||||
const { stdout: headRef } = await execAsync('git rev-parse --abbrev-ref HEAD', {
|
||||
cwd: worktreePath,
|
||||
});
|
||||
const headBranch = headRef.trim();
|
||||
if (headBranch && headBranch !== 'HEAD') {
|
||||
logger.info(
|
||||
`Discovered worktree in .worktrees/ not in git worktree list: ${entry.name} (branch: ${headBranch})`
|
||||
);
|
||||
discovered.push({
|
||||
path: normalizedPath,
|
||||
branch: headBranch,
|
||||
});
|
||||
}
|
||||
} catch {
|
||||
// Can't determine branch, skip this directory
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Not a git repo, skip
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn(`Failed to scan .worktrees directory: ${getErrorMessage(error)}`);
|
||||
}
|
||||
|
||||
return discovered;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch open PRs from GitHub and create a map of branch name to PR info.
|
||||
* This allows detecting PRs that were created outside the app.
|
||||
*/
|
||||
async function fetchGitHubPRs(projectPath: string): Promise<Map<string, WorktreePRInfo>> {
|
||||
const prMap = new Map<string, WorktreePRInfo>();
|
||||
|
||||
try {
|
||||
// Check if gh CLI is available
|
||||
const ghAvailable = await isGhCliAvailable();
|
||||
if (!ghAvailable) {
|
||||
return prMap;
|
||||
}
|
||||
|
||||
// Fetch open PRs from GitHub
|
||||
const { stdout } = await execAsync(
|
||||
'gh pr list --state open --json number,title,url,state,headRefName,createdAt --limit 1000',
|
||||
{ cwd: projectPath, env: execEnv, timeout: 15000 }
|
||||
);
|
||||
|
||||
const prs = JSON.parse(stdout || '[]') as Array<{
|
||||
number: number;
|
||||
title: string;
|
||||
url: string;
|
||||
state: string;
|
||||
headRefName: string;
|
||||
createdAt: string;
|
||||
}>;
|
||||
|
||||
for (const pr of prs) {
|
||||
prMap.set(pr.headRefName, {
|
||||
number: pr.number,
|
||||
url: pr.url,
|
||||
title: pr.title,
|
||||
state: pr.state,
|
||||
createdAt: pr.createdAt,
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
// Silently fail - PR detection is optional
|
||||
logger.warn(`Failed to fetch GitHub PRs: ${getErrorMessage(error)}`);
|
||||
}
|
||||
|
||||
return prMap;
|
||||
}
|
||||
|
||||
export function createListHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
@@ -116,6 +248,22 @@ export function createListHandler() {
|
||||
}
|
||||
}
|
||||
|
||||
// Scan .worktrees directory to discover worktrees that exist on disk
|
||||
// but are not registered with git (e.g., created externally)
|
||||
const knownPaths = new Set(worktrees.map((w) => w.path));
|
||||
const discoveredWorktrees = await scanWorktreesDirectory(projectPath, knownPaths);
|
||||
|
||||
// Add discovered worktrees to the list
|
||||
for (const discovered of discoveredWorktrees) {
|
||||
worktrees.push({
|
||||
path: discovered.path,
|
||||
branch: discovered.branch,
|
||||
isMain: false,
|
||||
isCurrent: discovered.branch === currentBranch,
|
||||
hasWorktree: true,
|
||||
});
|
||||
}
|
||||
|
||||
// Read all worktree metadata to get PR info
|
||||
const allMetadata = await readAllWorktreeMetadata(projectPath);
|
||||
|
||||
@@ -139,11 +287,23 @@ export function createListHandler() {
|
||||
}
|
||||
}
|
||||
|
||||
// Add PR info from metadata for each worktree
|
||||
// Add PR info from metadata or GitHub for each worktree
|
||||
// Only fetch GitHub PRs if includeDetails is requested (performance optimization)
|
||||
const githubPRs = includeDetails
|
||||
? await fetchGitHubPRs(projectPath)
|
||||
: new Map<string, WorktreePRInfo>();
|
||||
|
||||
for (const worktree of worktrees) {
|
||||
const metadata = allMetadata.get(worktree.branch);
|
||||
if (metadata?.pr) {
|
||||
// Use stored metadata (more complete info)
|
||||
worktree.pr = metadata.pr;
|
||||
} else if (includeDetails) {
|
||||
// Fall back to GitHub PR detection only when includeDetails is requested
|
||||
const githubPR = githubPRs.get(worktree.branch);
|
||||
if (githubPR) {
|
||||
worktree.pr = githubPR;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
import type { Request, Response } from 'express';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import path from 'path';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
@@ -16,28 +15,31 @@ const execAsync = promisify(exec);
|
||||
export function createMergeHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, featureId, options } = req.body as {
|
||||
const { projectPath, branchName, worktreePath, options } = req.body as {
|
||||
projectPath: string;
|
||||
featureId: string;
|
||||
branchName: string;
|
||||
worktreePath: string;
|
||||
options?: { squash?: boolean; message?: string };
|
||||
};
|
||||
|
||||
if (!projectPath || !featureId) {
|
||||
if (!projectPath || !branchName || !worktreePath) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'projectPath and featureId required',
|
||||
error: 'projectPath, branchName, and worktreePath are required',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const branchName = `feature/${featureId}`;
|
||||
// Git worktrees are stored in project directory
|
||||
const worktreePath = path.join(projectPath, '.worktrees', featureId);
|
||||
|
||||
// Get current branch
|
||||
const { stdout: currentBranch } = await execAsync('git rev-parse --abbrev-ref HEAD', {
|
||||
cwd: projectPath,
|
||||
});
|
||||
// Validate branch exists
|
||||
try {
|
||||
await execAsync(`git rev-parse --verify ${branchName}`, { cwd: projectPath });
|
||||
} catch {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: `Branch "${branchName}" does not exist`,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Merge the feature branch
|
||||
const mergeCmd = options?.squash
|
||||
|
||||
@@ -1,78 +1,40 @@
|
||||
/**
|
||||
* POST /open-in-editor endpoint - Open a worktree directory in the default code editor
|
||||
* GET /default-editor endpoint - Get the name of the default code editor
|
||||
* POST /refresh-editors endpoint - Clear editor cache and re-detect available editors
|
||||
*
|
||||
* This module uses @automaker/platform for cross-platform editor detection and launching.
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import { isAbsolute } from 'path';
|
||||
import {
|
||||
clearEditorCache,
|
||||
detectAllEditors,
|
||||
detectDefaultEditor,
|
||||
openInEditor,
|
||||
openInFileManager,
|
||||
} from '@automaker/platform';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
const logger = createLogger('open-in-editor');
|
||||
|
||||
// Editor detection with caching
|
||||
interface EditorInfo {
|
||||
name: string;
|
||||
command: string;
|
||||
}
|
||||
|
||||
let cachedEditor: EditorInfo | null = null;
|
||||
|
||||
/**
|
||||
* Detect which code editor is available on the system
|
||||
*/
|
||||
async function detectDefaultEditor(): Promise<EditorInfo> {
|
||||
// Return cached result if available
|
||||
if (cachedEditor) {
|
||||
return cachedEditor;
|
||||
}
|
||||
|
||||
// Try Cursor first (if user has Cursor, they probably prefer it)
|
||||
try {
|
||||
await execAsync('which cursor || where cursor');
|
||||
cachedEditor = { name: 'Cursor', command: 'cursor' };
|
||||
return cachedEditor;
|
||||
} catch {
|
||||
// Cursor not found
|
||||
}
|
||||
|
||||
// Try VS Code
|
||||
try {
|
||||
await execAsync('which code || where code');
|
||||
cachedEditor = { name: 'VS Code', command: 'code' };
|
||||
return cachedEditor;
|
||||
} catch {
|
||||
// VS Code not found
|
||||
}
|
||||
|
||||
// Try Zed
|
||||
try {
|
||||
await execAsync('which zed || where zed');
|
||||
cachedEditor = { name: 'Zed', command: 'zed' };
|
||||
return cachedEditor;
|
||||
} catch {
|
||||
// Zed not found
|
||||
}
|
||||
|
||||
// Try Sublime Text
|
||||
try {
|
||||
await execAsync('which subl || where subl');
|
||||
cachedEditor = { name: 'Sublime Text', command: 'subl' };
|
||||
return cachedEditor;
|
||||
} catch {
|
||||
// Sublime not found
|
||||
}
|
||||
|
||||
// Fallback to file manager
|
||||
const platform = process.platform;
|
||||
if (platform === 'darwin') {
|
||||
cachedEditor = { name: 'Finder', command: 'open' };
|
||||
} else if (platform === 'win32') {
|
||||
cachedEditor = { name: 'Explorer', command: 'explorer' };
|
||||
} else {
|
||||
cachedEditor = { name: 'File Manager', command: 'xdg-open' };
|
||||
}
|
||||
return cachedEditor;
|
||||
export function createGetAvailableEditorsHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const editors = await detectAllEditors();
|
||||
res.json({
|
||||
success: true,
|
||||
result: {
|
||||
editors,
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Get available editors failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
export function createGetDefaultEditorHandler() {
|
||||
@@ -93,11 +55,41 @@ export function createGetDefaultEditorHandler() {
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Handler to refresh the editor cache and re-detect available editors
|
||||
* Useful when the user has installed/uninstalled editors
|
||||
*/
|
||||
export function createRefreshEditorsHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
// Clear the cache
|
||||
clearEditorCache();
|
||||
|
||||
// Re-detect editors (this will repopulate the cache)
|
||||
const editors = await detectAllEditors();
|
||||
|
||||
logger.info(`Editor cache refreshed, found ${editors.length} editors`);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
result: {
|
||||
editors,
|
||||
message: `Found ${editors.length} available editors`,
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Refresh editors failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
export function createOpenInEditorHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { worktreePath } = req.body as {
|
||||
const { worktreePath, editorCommand } = req.body as {
|
||||
worktreePath: string;
|
||||
editorCommand?: string;
|
||||
};
|
||||
|
||||
if (!worktreePath) {
|
||||
@@ -108,42 +100,44 @@ export function createOpenInEditorHandler() {
|
||||
return;
|
||||
}
|
||||
|
||||
const editor = await detectDefaultEditor();
|
||||
// Security: Validate that worktreePath is an absolute path
|
||||
if (!isAbsolute(worktreePath)) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'worktreePath must be an absolute path',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
await execAsync(`${editor.command} "${worktreePath}"`);
|
||||
// Use the platform utility to open in editor
|
||||
const result = await openInEditor(worktreePath, editorCommand);
|
||||
res.json({
|
||||
success: true,
|
||||
result: {
|
||||
message: `Opened ${worktreePath} in ${editor.name}`,
|
||||
editorName: editor.name,
|
||||
message: `Opened ${worktreePath} in ${result.editorName}`,
|
||||
editorName: result.editorName,
|
||||
},
|
||||
});
|
||||
} catch (editorError) {
|
||||
// If the detected editor fails, try opening in default file manager as fallback
|
||||
const platform = process.platform;
|
||||
let openCommand: string;
|
||||
let fallbackName: string;
|
||||
// If the specified editor fails, try opening in default file manager as fallback
|
||||
logger.warn(
|
||||
`Failed to open in editor, falling back to file manager: ${getErrorMessage(editorError)}`
|
||||
);
|
||||
|
||||
if (platform === 'darwin') {
|
||||
openCommand = `open "${worktreePath}"`;
|
||||
fallbackName = 'Finder';
|
||||
} else if (platform === 'win32') {
|
||||
openCommand = `explorer "${worktreePath}"`;
|
||||
fallbackName = 'Explorer';
|
||||
} else {
|
||||
openCommand = `xdg-open "${worktreePath}"`;
|
||||
fallbackName = 'File Manager';
|
||||
try {
|
||||
const result = await openInFileManager(worktreePath);
|
||||
res.json({
|
||||
success: true,
|
||||
result: {
|
||||
message: `Opened ${worktreePath} in ${result.editorName}`,
|
||||
editorName: result.editorName,
|
||||
},
|
||||
});
|
||||
} catch (fallbackError) {
|
||||
// Both editor and file manager failed
|
||||
throw fallbackError;
|
||||
}
|
||||
|
||||
await execAsync(openCommand);
|
||||
res.json({
|
||||
success: true,
|
||||
result: {
|
||||
message: `Opened ${worktreePath} in ${fallbackName}`,
|
||||
editorName: fallbackName,
|
||||
},
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error, 'Open in editor failed');
|
||||
|
||||
@@ -10,15 +10,18 @@
|
||||
*/
|
||||
|
||||
import { ProviderFactory } from '../providers/provider-factory.js';
|
||||
import { simpleQuery } from '../providers/simple-query-service.js';
|
||||
import type {
|
||||
ExecuteOptions,
|
||||
Feature,
|
||||
ModelProvider,
|
||||
PipelineStep,
|
||||
FeatureStatusWithPipeline,
|
||||
PipelineConfig,
|
||||
ThinkingLevel,
|
||||
PlanningMode,
|
||||
} from '@automaker/types';
|
||||
import { DEFAULT_PHASE_MODELS, stripProviderPrefix } from '@automaker/types';
|
||||
import { DEFAULT_PHASE_MODELS, isClaudeModel, stripProviderPrefix } from '@automaker/types';
|
||||
import {
|
||||
buildPromptWithImages,
|
||||
classifyError,
|
||||
@@ -31,7 +34,13 @@ import {
|
||||
const logger = createLogger('AutoMode');
|
||||
import { resolveModelString, resolvePhaseModel, DEFAULT_MODELS } from '@automaker/model-resolver';
|
||||
import { resolveDependencies, areDependenciesSatisfied } from '@automaker/dependency-resolver';
|
||||
import { getFeatureDir, getAutomakerDir, getFeaturesDir } from '@automaker/platform';
|
||||
import {
|
||||
getFeatureDir,
|
||||
getAutomakerDir,
|
||||
getFeaturesDir,
|
||||
getExecutionStatePath,
|
||||
ensureAutomakerDir,
|
||||
} from '@automaker/platform';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import path from 'path';
|
||||
@@ -77,6 +86,26 @@ interface PlanSpec {
|
||||
tasks?: ParsedTask[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Information about pipeline status when resuming a feature.
|
||||
* Used to determine how to handle features stuck in pipeline execution.
|
||||
*
|
||||
* @property {boolean} isPipeline - Whether the feature is in a pipeline step
|
||||
* @property {string | null} stepId - ID of the current pipeline step (e.g., 'step_123')
|
||||
* @property {number} stepIndex - Index of the step in the sorted pipeline steps (-1 if not found)
|
||||
* @property {number} totalSteps - Total number of steps in the pipeline
|
||||
* @property {PipelineStep | null} step - The pipeline step configuration, or null if step not found
|
||||
* @property {PipelineConfig | null} config - The full pipeline configuration, or null if no pipeline
|
||||
*/
|
||||
interface PipelineStatusInfo {
|
||||
isPipeline: boolean;
|
||||
stepId: string | null;
|
||||
stepIndex: number;
|
||||
totalSteps: number;
|
||||
step: PipelineStep | null;
|
||||
config: PipelineConfig | null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse tasks from generated spec content
|
||||
* Looks for the ```tasks code block and extracts task lines
|
||||
@@ -201,6 +230,29 @@ interface AutoModeConfig {
|
||||
projectPath: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execution state for recovery after server restart
|
||||
* Tracks which features were running and auto-loop configuration
|
||||
*/
|
||||
interface ExecutionState {
|
||||
version: 1;
|
||||
autoLoopWasRunning: boolean;
|
||||
maxConcurrency: number;
|
||||
projectPath: string;
|
||||
runningFeatureIds: string[];
|
||||
savedAt: string;
|
||||
}
|
||||
|
||||
// Default empty execution state
|
||||
const DEFAULT_EXECUTION_STATE: ExecutionState = {
|
||||
version: 1,
|
||||
autoLoopWasRunning: false,
|
||||
maxConcurrency: 3,
|
||||
projectPath: '',
|
||||
runningFeatureIds: [],
|
||||
savedAt: '',
|
||||
};
|
||||
|
||||
// Constants for consecutive failure tracking
|
||||
const CONSECUTIVE_FAILURE_THRESHOLD = 3; // Pause after 3 consecutive failures
|
||||
const FAILURE_WINDOW_MS = 60000; // Failures within 1 minute count as consecutive
|
||||
@@ -322,6 +374,9 @@ export class AutoModeService {
|
||||
projectPath,
|
||||
});
|
||||
|
||||
// Save execution state for recovery after restart
|
||||
await this.saveExecutionState(projectPath);
|
||||
|
||||
// Note: Memory folder initialization is now handled by loadContextFiles
|
||||
|
||||
// Run the loop in the background
|
||||
@@ -390,17 +445,23 @@ export class AutoModeService {
|
||||
*/
|
||||
async stopAutoLoop(): Promise<number> {
|
||||
const wasRunning = this.autoLoopRunning;
|
||||
const projectPath = this.config?.projectPath;
|
||||
this.autoLoopRunning = false;
|
||||
if (this.autoLoopAbortController) {
|
||||
this.autoLoopAbortController.abort();
|
||||
this.autoLoopAbortController = null;
|
||||
}
|
||||
|
||||
// Clear execution state when auto-loop is explicitly stopped
|
||||
if (projectPath) {
|
||||
await this.clearExecutionState(projectPath);
|
||||
}
|
||||
|
||||
// Emit stop event immediately when user explicitly stops
|
||||
if (wasRunning) {
|
||||
this.emitAutoModeEvent('auto_mode_stopped', {
|
||||
message: 'Auto mode stopped',
|
||||
projectPath: this.config?.projectPath,
|
||||
projectPath,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -441,6 +502,11 @@ export class AutoModeService {
|
||||
};
|
||||
this.runningFeatures.set(featureId, tempRunningFeature);
|
||||
|
||||
// Save execution state when feature starts
|
||||
if (isAutoMode) {
|
||||
await this.saveExecutionState(projectPath);
|
||||
}
|
||||
|
||||
try {
|
||||
// Validate that project path is allowed using centralized validation
|
||||
validateWorkingDirectory(projectPath);
|
||||
@@ -695,6 +761,11 @@ export class AutoModeService {
|
||||
`Pending approvals at cleanup: ${Array.from(this.pendingApprovals.keys()).join(', ') || 'none'}`
|
||||
);
|
||||
this.runningFeatures.delete(featureId);
|
||||
|
||||
// Update execution state after feature completes
|
||||
if (this.autoLoopRunning && projectPath) {
|
||||
await this.saveExecutionState(projectPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -869,6 +940,25 @@ Complete the pipeline step instructions above. Review the previous work and appl
|
||||
throw new Error('already running');
|
||||
}
|
||||
|
||||
// Load feature to check status
|
||||
const feature = await this.loadFeature(projectPath, featureId);
|
||||
if (!feature) {
|
||||
throw new Error(`Feature ${featureId} not found`);
|
||||
}
|
||||
|
||||
// Check if feature is stuck in a pipeline step
|
||||
const pipelineInfo = await this.detectPipelineStatus(
|
||||
projectPath,
|
||||
featureId,
|
||||
(feature.status || '') as FeatureStatusWithPipeline
|
||||
);
|
||||
|
||||
if (pipelineInfo.isPipeline) {
|
||||
// Feature stuck in pipeline - use pipeline resume
|
||||
return this.resumePipelineFeature(projectPath, feature, useWorktrees, pipelineInfo);
|
||||
}
|
||||
|
||||
// Normal resume flow for non-pipeline features
|
||||
// Check if context exists in .automaker directory
|
||||
const featureDir = getFeatureDir(projectPath, featureId);
|
||||
const contextPath = path.join(featureDir, 'agent-output.md');
|
||||
@@ -888,11 +978,252 @@ Complete the pipeline step instructions above. Review the previous work and appl
|
||||
}
|
||||
|
||||
// No context, start fresh - executeFeature will handle adding to runningFeatures
|
||||
// Remove the temporary entry we added
|
||||
this.runningFeatures.delete(featureId);
|
||||
return this.executeFeature(projectPath, featureId, useWorktrees, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resume a feature that crashed during pipeline execution.
|
||||
* Handles multiple edge cases to ensure robust recovery:
|
||||
* - No context file: Restart entire pipeline from beginning
|
||||
* - Step deleted from config: Complete feature without remaining pipeline steps
|
||||
* - Valid step exists: Resume from the crashed step and continue
|
||||
*
|
||||
* @param {string} projectPath - Absolute path to the project directory
|
||||
* @param {Feature} feature - The feature object (already loaded to avoid redundant reads)
|
||||
* @param {boolean} useWorktrees - Whether to use git worktrees for isolation
|
||||
* @param {PipelineStatusInfo} pipelineInfo - Information about the pipeline status from detectPipelineStatus()
|
||||
* @returns {Promise<void>} Resolves when resume operation completes or throws on error
|
||||
* @throws {Error} If pipeline config is null but stepIndex is valid (should never happen)
|
||||
* @private
|
||||
*/
|
||||
private async resumePipelineFeature(
|
||||
projectPath: string,
|
||||
feature: Feature,
|
||||
useWorktrees: boolean,
|
||||
pipelineInfo: PipelineStatusInfo
|
||||
): Promise<void> {
|
||||
const featureId = feature.id;
|
||||
console.log(
|
||||
`[AutoMode] Resuming feature ${featureId} from pipeline step ${pipelineInfo.stepId}`
|
||||
);
|
||||
|
||||
// Check for context file
|
||||
const featureDir = getFeatureDir(projectPath, featureId);
|
||||
const contextPath = path.join(featureDir, 'agent-output.md');
|
||||
|
||||
let hasContext = false;
|
||||
try {
|
||||
await secureFs.access(contextPath);
|
||||
hasContext = true;
|
||||
} catch {
|
||||
// No context
|
||||
}
|
||||
|
||||
// Edge Case 1: No context file - restart entire pipeline from beginning
|
||||
if (!hasContext) {
|
||||
console.warn(
|
||||
`[AutoMode] No context found for pipeline feature ${featureId}, restarting from beginning`
|
||||
);
|
||||
|
||||
// Reset status to in_progress and start fresh
|
||||
await this.updateFeatureStatus(projectPath, featureId, 'in_progress');
|
||||
|
||||
return this.executeFeature(projectPath, featureId, useWorktrees, false);
|
||||
}
|
||||
|
||||
// Edge Case 2: Step no longer exists in pipeline config
|
||||
if (pipelineInfo.stepIndex === -1) {
|
||||
console.warn(
|
||||
`[AutoMode] Step ${pipelineInfo.stepId} no longer exists in pipeline, completing feature without pipeline`
|
||||
);
|
||||
|
||||
const finalStatus = feature.skipTests ? 'waiting_approval' : 'verified';
|
||||
|
||||
await this.updateFeatureStatus(projectPath, featureId, finalStatus);
|
||||
|
||||
this.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
passes: true,
|
||||
message:
|
||||
'Pipeline step no longer exists - feature completed without remaining pipeline steps',
|
||||
projectPath,
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Normal case: Valid pipeline step exists, has context
|
||||
// Resume from the stuck step (re-execute the step that crashed)
|
||||
if (!pipelineInfo.config) {
|
||||
throw new Error('Pipeline config is null but stepIndex is valid - this should not happen');
|
||||
}
|
||||
|
||||
return this.resumeFromPipelineStep(
|
||||
projectPath,
|
||||
feature,
|
||||
useWorktrees,
|
||||
pipelineInfo.stepIndex,
|
||||
pipelineInfo.config
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resume pipeline execution from a specific step index.
|
||||
* Re-executes the step that crashed (to handle partial completion),
|
||||
* then continues executing all remaining pipeline steps in order.
|
||||
*
|
||||
* This method handles the complete pipeline resume workflow:
|
||||
* - Validates feature and step index
|
||||
* - Locates or creates git worktree if needed
|
||||
* - Executes remaining steps starting from the crashed step
|
||||
* - Updates feature status to verified/waiting_approval when complete
|
||||
* - Emits progress events throughout execution
|
||||
*
|
||||
* @param {string} projectPath - Absolute path to the project directory
|
||||
* @param {Feature} feature - The feature object (already loaded to avoid redundant reads)
|
||||
* @param {boolean} useWorktrees - Whether to use git worktrees for isolation
|
||||
* @param {number} startFromStepIndex - Zero-based index of the step to resume from
|
||||
* @param {PipelineConfig} pipelineConfig - Pipeline config passed from detectPipelineStatus to avoid re-reading
|
||||
* @returns {Promise<void>} Resolves when pipeline execution completes successfully
|
||||
* @throws {Error} If feature not found, step index invalid, or pipeline execution fails
|
||||
* @private
|
||||
*/
|
||||
private async resumeFromPipelineStep(
|
||||
projectPath: string,
|
||||
feature: Feature,
|
||||
useWorktrees: boolean,
|
||||
startFromStepIndex: number,
|
||||
pipelineConfig: PipelineConfig
|
||||
): Promise<void> {
|
||||
const featureId = feature.id;
|
||||
|
||||
const sortedSteps = [...pipelineConfig.steps].sort((a, b) => a.order - b.order);
|
||||
|
||||
// Validate step index
|
||||
if (startFromStepIndex < 0 || startFromStepIndex >= sortedSteps.length) {
|
||||
throw new Error(`Invalid step index: ${startFromStepIndex}`);
|
||||
}
|
||||
|
||||
// Get steps to execute (from startFromStepIndex onwards)
|
||||
const stepsToExecute = sortedSteps.slice(startFromStepIndex);
|
||||
|
||||
console.log(
|
||||
`[AutoMode] Resuming pipeline for feature ${featureId} from step ${startFromStepIndex + 1}/${sortedSteps.length}`
|
||||
);
|
||||
|
||||
// Add to running features immediately
|
||||
const abortController = new AbortController();
|
||||
this.runningFeatures.set(featureId, {
|
||||
featureId,
|
||||
projectPath,
|
||||
worktreePath: null, // Will be set below
|
||||
branchName: feature.branchName ?? null,
|
||||
abortController,
|
||||
isAutoMode: false,
|
||||
startTime: Date.now(),
|
||||
});
|
||||
|
||||
try {
|
||||
// Validate project path
|
||||
validateWorkingDirectory(projectPath);
|
||||
|
||||
// Derive workDir from feature.branchName
|
||||
let worktreePath: string | null = null;
|
||||
const branchName = feature.branchName;
|
||||
|
||||
if (useWorktrees && branchName) {
|
||||
worktreePath = await this.findExistingWorktreeForBranch(projectPath, branchName);
|
||||
if (worktreePath) {
|
||||
console.log(`[AutoMode] Using worktree for branch "${branchName}": ${worktreePath}`);
|
||||
} else {
|
||||
console.warn(
|
||||
`[AutoMode] Worktree for branch "${branchName}" not found, using project path`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const workDir = worktreePath ? path.resolve(worktreePath) : path.resolve(projectPath);
|
||||
validateWorkingDirectory(workDir);
|
||||
|
||||
// Update running feature with worktree info
|
||||
const runningFeature = this.runningFeatures.get(featureId);
|
||||
if (runningFeature) {
|
||||
runningFeature.worktreePath = worktreePath;
|
||||
runningFeature.branchName = branchName ?? null;
|
||||
}
|
||||
|
||||
// Emit resume event
|
||||
this.emitAutoModeEvent('auto_mode_feature_start', {
|
||||
featureId,
|
||||
projectPath,
|
||||
feature: {
|
||||
id: featureId,
|
||||
title: feature.title || 'Resuming Pipeline',
|
||||
description: feature.description,
|
||||
},
|
||||
});
|
||||
|
||||
this.emitAutoModeEvent('auto_mode_progress', {
|
||||
featureId,
|
||||
content: `Resuming from pipeline step ${startFromStepIndex + 1}/${sortedSteps.length}`,
|
||||
projectPath,
|
||||
});
|
||||
|
||||
// Load autoLoadClaudeMd setting
|
||||
const autoLoadClaudeMd = await getAutoLoadClaudeMdSetting(
|
||||
projectPath,
|
||||
this.settingsService,
|
||||
'[AutoMode]'
|
||||
);
|
||||
|
||||
// Execute remaining pipeline steps (starting from crashed step)
|
||||
await this.executePipelineSteps(
|
||||
projectPath,
|
||||
featureId,
|
||||
feature,
|
||||
stepsToExecute,
|
||||
workDir,
|
||||
abortController,
|
||||
autoLoadClaudeMd
|
||||
);
|
||||
|
||||
// Determine final status
|
||||
const finalStatus = feature.skipTests ? 'waiting_approval' : 'verified';
|
||||
await this.updateFeatureStatus(projectPath, featureId, finalStatus);
|
||||
|
||||
console.log('[AutoMode] Pipeline resume completed successfully');
|
||||
|
||||
this.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
passes: true,
|
||||
message: 'Pipeline resumed and completed successfully',
|
||||
projectPath,
|
||||
});
|
||||
} catch (error) {
|
||||
const errorInfo = classifyError(error);
|
||||
|
||||
if (errorInfo.isAbort) {
|
||||
this.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
passes: false,
|
||||
message: 'Pipeline resume stopped by user',
|
||||
projectPath,
|
||||
});
|
||||
} else {
|
||||
console.error(`[AutoMode] Pipeline resume failed for feature ${featureId}:`, error);
|
||||
await this.updateFeatureStatus(projectPath, featureId, 'backlog');
|
||||
this.emitAutoModeEvent('auto_mode_error', {
|
||||
featureId,
|
||||
error: errorInfo.message,
|
||||
errorType: errorInfo.type,
|
||||
projectPath,
|
||||
});
|
||||
}
|
||||
} finally {
|
||||
this.runningFeatures.delete(featureId);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Follow up on a feature with additional instructions
|
||||
*/
|
||||
@@ -2837,6 +3168,111 @@ Review the previous work and continue the implementation. If the feature appears
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect if a feature is stuck in a pipeline step and extract step information.
|
||||
* Parses the feature status to determine if it's a pipeline status (e.g., 'pipeline_step_xyz'),
|
||||
* loads the pipeline configuration, and validates that the step still exists.
|
||||
*
|
||||
* This method handles several scenarios:
|
||||
* - Non-pipeline status: Returns default PipelineStatusInfo with isPipeline=false
|
||||
* - Invalid pipeline status format: Returns isPipeline=true but null step info
|
||||
* - Step deleted from config: Returns stepIndex=-1 to signal missing step
|
||||
* - Valid pipeline step: Returns full step information and config
|
||||
*
|
||||
* @param {string} projectPath - Absolute path to the project directory
|
||||
* @param {string} featureId - Unique identifier of the feature
|
||||
* @param {FeatureStatusWithPipeline} currentStatus - Current feature status (may include pipeline step info)
|
||||
* @returns {Promise<PipelineStatusInfo>} Information about the pipeline status and step
|
||||
* @private
|
||||
*/
|
||||
private async detectPipelineStatus(
|
||||
projectPath: string,
|
||||
featureId: string,
|
||||
currentStatus: FeatureStatusWithPipeline
|
||||
): Promise<PipelineStatusInfo> {
|
||||
// Check if status is pipeline format using PipelineService
|
||||
const isPipeline = pipelineService.isPipelineStatus(currentStatus);
|
||||
|
||||
if (!isPipeline) {
|
||||
return {
|
||||
isPipeline: false,
|
||||
stepId: null,
|
||||
stepIndex: -1,
|
||||
totalSteps: 0,
|
||||
step: null,
|
||||
config: null,
|
||||
};
|
||||
}
|
||||
|
||||
// Extract step ID using PipelineService
|
||||
const stepId = pipelineService.getStepIdFromStatus(currentStatus);
|
||||
|
||||
if (!stepId) {
|
||||
console.warn(
|
||||
`[AutoMode] Feature ${featureId} has invalid pipeline status format: ${currentStatus}`
|
||||
);
|
||||
return {
|
||||
isPipeline: true,
|
||||
stepId: null,
|
||||
stepIndex: -1,
|
||||
totalSteps: 0,
|
||||
step: null,
|
||||
config: null,
|
||||
};
|
||||
}
|
||||
|
||||
// Load pipeline config
|
||||
const config = await pipelineService.getPipelineConfig(projectPath);
|
||||
|
||||
if (!config || config.steps.length === 0) {
|
||||
// Pipeline config doesn't exist or empty - feature stuck with invalid pipeline status
|
||||
console.warn(
|
||||
`[AutoMode] Feature ${featureId} has pipeline status but no pipeline config exists`
|
||||
);
|
||||
return {
|
||||
isPipeline: true,
|
||||
stepId,
|
||||
stepIndex: -1,
|
||||
totalSteps: 0,
|
||||
step: null,
|
||||
config: null,
|
||||
};
|
||||
}
|
||||
|
||||
// Find the step directly from config (already loaded, avoid redundant file read)
|
||||
const sortedSteps = [...config.steps].sort((a, b) => a.order - b.order);
|
||||
const stepIndex = sortedSteps.findIndex((s) => s.id === stepId);
|
||||
const step = stepIndex === -1 ? null : sortedSteps[stepIndex];
|
||||
|
||||
if (!step) {
|
||||
// Step not found in current config - step was deleted/changed
|
||||
console.warn(
|
||||
`[AutoMode] Feature ${featureId} stuck in step ${stepId} which no longer exists in pipeline config`
|
||||
);
|
||||
return {
|
||||
isPipeline: true,
|
||||
stepId,
|
||||
stepIndex: -1,
|
||||
totalSteps: sortedSteps.length,
|
||||
step: null,
|
||||
config,
|
||||
};
|
||||
}
|
||||
|
||||
console.log(
|
||||
`[AutoMode] Detected pipeline status for feature ${featureId}: step ${stepIndex + 1}/${sortedSteps.length} (${step.name})`
|
||||
);
|
||||
|
||||
return {
|
||||
isPipeline: true,
|
||||
stepId,
|
||||
stepIndex,
|
||||
totalSteps: sortedSteps.length,
|
||||
step,
|
||||
config,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a focused prompt for executing a single task.
|
||||
* Each task gets minimal context to keep the agent focused.
|
||||
@@ -2950,6 +3386,149 @@ Begin implementing task ${task.id} now.`;
|
||||
});
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Execution State Persistence - For recovery after server restart
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Save execution state to disk for recovery after server restart
|
||||
*/
|
||||
private async saveExecutionState(projectPath: string): Promise<void> {
|
||||
try {
|
||||
await ensureAutomakerDir(projectPath);
|
||||
const statePath = getExecutionStatePath(projectPath);
|
||||
const state: ExecutionState = {
|
||||
version: 1,
|
||||
autoLoopWasRunning: this.autoLoopRunning,
|
||||
maxConcurrency: this.config?.maxConcurrency ?? 3,
|
||||
projectPath,
|
||||
runningFeatureIds: Array.from(this.runningFeatures.keys()),
|
||||
savedAt: new Date().toISOString(),
|
||||
};
|
||||
await secureFs.writeFile(statePath, JSON.stringify(state, null, 2), 'utf-8');
|
||||
logger.info(`Saved execution state: ${state.runningFeatureIds.length} running features`);
|
||||
} catch (error) {
|
||||
logger.error('Failed to save execution state:', error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Load execution state from disk
|
||||
*/
|
||||
private async loadExecutionState(projectPath: string): Promise<ExecutionState> {
|
||||
try {
|
||||
const statePath = getExecutionStatePath(projectPath);
|
||||
const content = (await secureFs.readFile(statePath, 'utf-8')) as string;
|
||||
const state = JSON.parse(content) as ExecutionState;
|
||||
return state;
|
||||
} catch (error) {
|
||||
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
|
||||
logger.error('Failed to load execution state:', error);
|
||||
}
|
||||
return DEFAULT_EXECUTION_STATE;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear execution state (called on successful shutdown or when auto-loop stops)
|
||||
*/
|
||||
private async clearExecutionState(projectPath: string): Promise<void> {
|
||||
try {
|
||||
const statePath = getExecutionStatePath(projectPath);
|
||||
await secureFs.unlink(statePath);
|
||||
logger.info('Cleared execution state');
|
||||
} catch (error) {
|
||||
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
|
||||
logger.error('Failed to clear execution state:', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check for and resume interrupted features after server restart
|
||||
* This should be called during server initialization
|
||||
*/
|
||||
async resumeInterruptedFeatures(projectPath: string): Promise<void> {
|
||||
logger.info('Checking for interrupted features to resume...');
|
||||
|
||||
// Load all features and find those that were interrupted
|
||||
const featuresDir = getFeaturesDir(projectPath);
|
||||
|
||||
try {
|
||||
const entries = await secureFs.readdir(featuresDir, { withFileTypes: true });
|
||||
const interruptedFeatures: Feature[] = [];
|
||||
|
||||
for (const entry of entries) {
|
||||
if (entry.isDirectory()) {
|
||||
const featurePath = path.join(featuresDir, entry.name, 'feature.json');
|
||||
try {
|
||||
const data = (await secureFs.readFile(featurePath, 'utf-8')) as string;
|
||||
const feature = JSON.parse(data) as Feature;
|
||||
|
||||
// Check if feature was interrupted (in_progress or pipeline_*)
|
||||
if (
|
||||
feature.status === 'in_progress' ||
|
||||
(feature.status && feature.status.startsWith('pipeline_'))
|
||||
) {
|
||||
// Verify it has existing context (agent-output.md)
|
||||
const featureDir = getFeatureDir(projectPath, feature.id);
|
||||
const contextPath = path.join(featureDir, 'agent-output.md');
|
||||
try {
|
||||
await secureFs.access(contextPath);
|
||||
interruptedFeatures.push(feature);
|
||||
logger.info(
|
||||
`Found interrupted feature: ${feature.id} (${feature.title}) - status: ${feature.status}`
|
||||
);
|
||||
} catch {
|
||||
// No context file, skip this feature - it will be restarted fresh
|
||||
logger.info(`Interrupted feature ${feature.id} has no context, will restart fresh`);
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Skip invalid features
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (interruptedFeatures.length === 0) {
|
||||
logger.info('No interrupted features found');
|
||||
return;
|
||||
}
|
||||
|
||||
logger.info(`Found ${interruptedFeatures.length} interrupted feature(s) to resume`);
|
||||
|
||||
// Emit event to notify UI
|
||||
this.emitAutoModeEvent('auto_mode_resuming_features', {
|
||||
message: `Resuming ${interruptedFeatures.length} interrupted feature(s) after server restart`,
|
||||
projectPath,
|
||||
featureIds: interruptedFeatures.map((f) => f.id),
|
||||
features: interruptedFeatures.map((f) => ({
|
||||
id: f.id,
|
||||
title: f.title,
|
||||
status: f.status,
|
||||
})),
|
||||
});
|
||||
|
||||
// Resume each interrupted feature
|
||||
for (const feature of interruptedFeatures) {
|
||||
try {
|
||||
logger.info(`Resuming feature: ${feature.id} (${feature.title})`);
|
||||
// Use resumeFeature which will detect the existing context and continue
|
||||
await this.resumeFeature(projectPath, feature.id, true);
|
||||
} catch (error) {
|
||||
logger.error(`Failed to resume feature ${feature.id}:`, error);
|
||||
// Continue with other features
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
if ((error as NodeJS.ErrnoException).code === 'ENOENT') {
|
||||
logger.info('No features directory found, nothing to resume');
|
||||
} else {
|
||||
logger.error('Error checking for interrupted features:', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract and record learnings from a completed feature
|
||||
* Uses a quick Claude call to identify important decisions and patterns
|
||||
@@ -3002,41 +3581,43 @@ IMPORTANT: Only include NON-OBVIOUS learnings with real reasoning. Skip trivial
|
||||
If nothing notable: {"learnings": []}`;
|
||||
|
||||
try {
|
||||
// Import query dynamically to avoid circular dependencies
|
||||
const { query } = await import('@anthropic-ai/claude-agent-sdk');
|
||||
|
||||
// Get model from phase settings
|
||||
const settings = await this.settingsService?.getGlobalSettings();
|
||||
const phaseModelEntry =
|
||||
settings?.phaseModels?.memoryExtractionModel || DEFAULT_PHASE_MODELS.memoryExtractionModel;
|
||||
const { model } = resolvePhaseModel(phaseModelEntry);
|
||||
const hasClaudeKey = Boolean(process.env.ANTHROPIC_API_KEY);
|
||||
let resolvedModel = model;
|
||||
|
||||
const stream = query({
|
||||
prompt: userPrompt,
|
||||
options: {
|
||||
model,
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
permissionMode: 'acceptEdits',
|
||||
systemPrompt:
|
||||
'You are a JSON extraction assistant. You MUST respond with ONLY valid JSON, no explanations, no markdown, no other text. Extract learnings from the provided implementation context and return them as JSON.',
|
||||
},
|
||||
});
|
||||
|
||||
// Extract text from stream
|
||||
let responseText = '';
|
||||
for await (const msg of stream) {
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
responseText += block.text;
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success') {
|
||||
responseText = msg.result || responseText;
|
||||
if (isClaudeModel(model) && !hasClaudeKey) {
|
||||
const fallbackModel = feature.model
|
||||
? resolveModelString(feature.model, DEFAULT_MODELS.claude)
|
||||
: null;
|
||||
if (fallbackModel && !isClaudeModel(fallbackModel)) {
|
||||
console.log(
|
||||
`[AutoMode] Claude not configured for memory extraction; using feature model "${fallbackModel}".`
|
||||
);
|
||||
resolvedModel = fallbackModel;
|
||||
} else {
|
||||
console.log(
|
||||
'[AutoMode] Claude not configured for memory extraction; skipping learning extraction.'
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const result = await simpleQuery({
|
||||
prompt: userPrompt,
|
||||
model: resolvedModel,
|
||||
cwd: projectPath,
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
systemPrompt:
|
||||
'You are a JSON extraction assistant. You MUST respond with ONLY valid JSON, no explanations, no markdown, no other text. Extract learnings from the provided implementation context and return them as JSON.',
|
||||
});
|
||||
|
||||
const responseText = result.text;
|
||||
|
||||
console.log(`[AutoMode] Learning extraction response: ${responseText.length} chars`);
|
||||
console.log(`[AutoMode] Response preview: ${responseText.substring(0, 300)}`);
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ import { spawn } from 'child_process';
|
||||
import * as os from 'os';
|
||||
import * as pty from 'node-pty';
|
||||
import { ClaudeUsage } from '../routes/claude/types.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
/**
|
||||
* Claude Usage Service
|
||||
@@ -14,6 +15,8 @@ import { ClaudeUsage } from '../routes/claude/types.js';
|
||||
* - macOS: Uses 'expect' command for PTY
|
||||
* - Windows/Linux: Uses node-pty for PTY
|
||||
*/
|
||||
const logger = createLogger('ClaudeUsage');
|
||||
|
||||
export class ClaudeUsageService {
|
||||
private claudeBinary = 'claude';
|
||||
private timeout = 30000; // 30 second timeout
|
||||
@@ -46,13 +49,11 @@ export class ClaudeUsageService {
|
||||
|
||||
/**
|
||||
* Execute the claude /usage command and return the output
|
||||
* Uses platform-specific PTY implementation
|
||||
* Uses node-pty on all platforms for consistency
|
||||
*/
|
||||
private executeClaudeUsageCommand(): Promise<string> {
|
||||
if (this.isWindows || this.isLinux) {
|
||||
return this.executeClaudeUsageCommandPty();
|
||||
}
|
||||
return this.executeClaudeUsageCommandMac();
|
||||
// Use node-pty on all platforms - it's more reliable than expect on macOS
|
||||
return this.executeClaudeUsageCommandPty();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -64,24 +65,36 @@ export class ClaudeUsageService {
|
||||
let stderr = '';
|
||||
let settled = false;
|
||||
|
||||
// Use a simple working directory (home or tmp)
|
||||
const workingDirectory = process.env.HOME || '/tmp';
|
||||
// Use current working directory - likely already trusted by Claude CLI
|
||||
const workingDirectory = process.cwd();
|
||||
|
||||
// Use 'expect' with an inline script to run claude /usage with a PTY
|
||||
// Wait for "Current session" header, then wait for full output before exiting
|
||||
// Running from cwd which should already be trusted
|
||||
const expectScript = `
|
||||
set timeout 20
|
||||
set timeout 30
|
||||
spawn claude /usage
|
||||
|
||||
# Wait for usage data or handle trust prompt if needed
|
||||
expect {
|
||||
"Current session" {
|
||||
sleep 2
|
||||
send "\\x1b"
|
||||
-re "Ready to code|permission to work|Do you want to work" {
|
||||
# Trust prompt appeared - send Enter to approve
|
||||
sleep 1
|
||||
send "\\r"
|
||||
exp_continue
|
||||
}
|
||||
"Esc to cancel" {
|
||||
"Current session" {
|
||||
# Usage data appeared - wait for full output, then exit
|
||||
sleep 3
|
||||
send "\\x1b"
|
||||
}
|
||||
timeout {}
|
||||
"% left" {
|
||||
# Usage percentage appeared
|
||||
sleep 3
|
||||
send "\\x1b"
|
||||
}
|
||||
timeout {
|
||||
send "\\x1b"
|
||||
}
|
||||
eof {}
|
||||
}
|
||||
expect eof
|
||||
@@ -155,77 +168,190 @@ export class ClaudeUsageService {
|
||||
let output = '';
|
||||
let settled = false;
|
||||
let hasSeenUsageData = false;
|
||||
let hasSeenTrustPrompt = false;
|
||||
|
||||
const workingDirectory = this.isWindows
|
||||
? process.env.USERPROFILE || os.homedir() || 'C:\\'
|
||||
: process.env.HOME || os.homedir() || '/tmp';
|
||||
// Use current working directory (project dir) - most likely already trusted by Claude CLI
|
||||
const workingDirectory = process.cwd();
|
||||
|
||||
// Use platform-appropriate shell and command
|
||||
const shell = this.isWindows ? 'cmd.exe' : '/bin/sh';
|
||||
const args = this.isWindows ? ['/c', 'claude', '/usage'] : ['-c', 'claude /usage'];
|
||||
// Use --add-dir to whitelist the current directory and bypass the trust prompt
|
||||
// We don't pass /usage here, we'll type it into the REPL
|
||||
const args = this.isWindows
|
||||
? ['/c', 'claude', '--add-dir', workingDirectory]
|
||||
: ['-c', `claude --add-dir "${workingDirectory}"`];
|
||||
|
||||
const ptyProcess = pty.spawn(shell, args, {
|
||||
name: 'xterm-256color',
|
||||
cols: 120,
|
||||
rows: 30,
|
||||
cwd: workingDirectory,
|
||||
env: {
|
||||
...process.env,
|
||||
TERM: 'xterm-256color',
|
||||
} as Record<string, string>,
|
||||
});
|
||||
let ptyProcess: any = null;
|
||||
|
||||
try {
|
||||
ptyProcess = pty.spawn(shell, args, {
|
||||
name: 'xterm-256color',
|
||||
cols: 120,
|
||||
rows: 30,
|
||||
cwd: workingDirectory,
|
||||
env: {
|
||||
...process.env,
|
||||
TERM: 'xterm-256color',
|
||||
} as Record<string, string>,
|
||||
});
|
||||
} catch (spawnError) {
|
||||
const errorMessage = spawnError instanceof Error ? spawnError.message : String(spawnError);
|
||||
logger.error('[executeClaudeUsageCommandPty] Failed to spawn PTY:', errorMessage);
|
||||
|
||||
// Return a user-friendly error instead of crashing
|
||||
reject(
|
||||
new Error(
|
||||
`Unable to access terminal: ${errorMessage}. Claude CLI may not be available or PTY support is limited in this environment.`
|
||||
)
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
if (!settled) {
|
||||
settled = true;
|
||||
ptyProcess.kill();
|
||||
if (ptyProcess && !ptyProcess.killed) {
|
||||
ptyProcess.kill();
|
||||
}
|
||||
// Don't fail if we have data - return it instead
|
||||
if (output.includes('Current session')) {
|
||||
resolve(output);
|
||||
} else if (hasSeenTrustPrompt) {
|
||||
// Trust prompt was shown but we couldn't auto-approve it
|
||||
reject(
|
||||
new Error(
|
||||
'TRUST_PROMPT_PENDING: Claude CLI is waiting for folder permission. Please run "claude" in your terminal and approve access to continue.'
|
||||
)
|
||||
);
|
||||
} else {
|
||||
reject(new Error('Command timed out'));
|
||||
reject(
|
||||
new Error(
|
||||
'The Claude CLI took too long to respond. This can happen if the CLI is waiting for a trust prompt or is otherwise busy.'
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
}, this.timeout);
|
||||
}, 45000); // 45 second timeout
|
||||
|
||||
ptyProcess.onData((data) => {
|
||||
let hasSentCommand = false;
|
||||
let hasApprovedTrust = false;
|
||||
|
||||
ptyProcess.onData((data: string) => {
|
||||
output += data;
|
||||
|
||||
// Check if we've seen the usage data (look for "Current session")
|
||||
if (!hasSeenUsageData && output.includes('Current session')) {
|
||||
// Strip ANSI codes for easier matching
|
||||
// eslint-disable-next-line no-control-regex
|
||||
const cleanOutput = output.replace(/\x1B\[[0-9;]*[A-Za-z]/g, '');
|
||||
|
||||
// Check for specific authentication/permission errors
|
||||
if (
|
||||
cleanOutput.includes('OAuth token does not meet scope requirement') ||
|
||||
cleanOutput.includes('permission_error') ||
|
||||
cleanOutput.includes('token_expired') ||
|
||||
cleanOutput.includes('authentication_error')
|
||||
) {
|
||||
if (!settled) {
|
||||
settled = true;
|
||||
if (ptyProcess && !ptyProcess.killed) {
|
||||
ptyProcess.kill();
|
||||
}
|
||||
reject(
|
||||
new Error(
|
||||
"Claude CLI authentication issue. Please run 'claude logout' and then 'claude login' in your terminal to refresh permissions."
|
||||
)
|
||||
);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if we've seen the usage data (look for "Current session" or the TUI Usage header)
|
||||
if (
|
||||
!hasSeenUsageData &&
|
||||
(cleanOutput.includes('Current session') ||
|
||||
(cleanOutput.includes('Usage') && cleanOutput.includes('% left')))
|
||||
) {
|
||||
hasSeenUsageData = true;
|
||||
// Wait for full output, then send escape to exit
|
||||
setTimeout(() => {
|
||||
if (!settled) {
|
||||
if (!settled && ptyProcess && !ptyProcess.killed) {
|
||||
ptyProcess.write('\x1b'); // Send escape key
|
||||
|
||||
// Fallback: if ESC doesn't exit (Linux), use SIGTERM after 2s
|
||||
setTimeout(() => {
|
||||
if (!settled) {
|
||||
if (!settled && ptyProcess && !ptyProcess.killed) {
|
||||
ptyProcess.kill('SIGTERM');
|
||||
}
|
||||
}, 2000);
|
||||
}
|
||||
}, 2000);
|
||||
}, 3000);
|
||||
}
|
||||
|
||||
// Handle Trust Dialog - multiple variants:
|
||||
// - "Do you want to work in this folder?"
|
||||
// - "Ready to code here?" / "I'll need permission to work with your files"
|
||||
// Since we are running in cwd (project dir), it is safe to approve.
|
||||
if (
|
||||
!hasApprovedTrust &&
|
||||
(cleanOutput.includes('Do you want to work in this folder?') ||
|
||||
cleanOutput.includes('Ready to code here') ||
|
||||
cleanOutput.includes('permission to work with your files'))
|
||||
) {
|
||||
hasApprovedTrust = true;
|
||||
hasSeenTrustPrompt = true;
|
||||
// Wait a tiny bit to ensure prompt is ready, then send Enter
|
||||
setTimeout(() => {
|
||||
if (!settled && ptyProcess && !ptyProcess.killed) {
|
||||
ptyProcess.write('\r');
|
||||
}
|
||||
}, 1000);
|
||||
}
|
||||
|
||||
// Detect REPL prompt and send /usage command
|
||||
if (
|
||||
!hasSentCommand &&
|
||||
(cleanOutput.includes('❯') || cleanOutput.includes('? for shortcuts'))
|
||||
) {
|
||||
hasSentCommand = true;
|
||||
// Wait for REPL to fully settle
|
||||
setTimeout(() => {
|
||||
if (!settled && ptyProcess && !ptyProcess.killed) {
|
||||
// Send command with carriage return
|
||||
ptyProcess.write('/usage\r');
|
||||
|
||||
// Send another enter after 1 second to confirm selection if autocomplete menu appeared
|
||||
setTimeout(() => {
|
||||
if (!settled && ptyProcess && !ptyProcess.killed) {
|
||||
ptyProcess.write('\r');
|
||||
}
|
||||
}, 1200);
|
||||
}
|
||||
}, 1500);
|
||||
}
|
||||
|
||||
// Fallback: if we see "Esc to cancel" but haven't seen usage data yet
|
||||
if (!hasSeenUsageData && output.includes('Esc to cancel')) {
|
||||
if (
|
||||
!hasSeenUsageData &&
|
||||
cleanOutput.includes('Esc to cancel') &&
|
||||
!cleanOutput.includes('Do you want to work in this folder?')
|
||||
) {
|
||||
setTimeout(() => {
|
||||
if (!settled) {
|
||||
if (!settled && ptyProcess && !ptyProcess.killed) {
|
||||
ptyProcess.write('\x1b'); // Send escape key
|
||||
}
|
||||
}, 3000);
|
||||
}, 5000);
|
||||
}
|
||||
});
|
||||
|
||||
ptyProcess.onExit(({ exitCode }) => {
|
||||
ptyProcess.onExit(({ exitCode }: { exitCode: number }) => {
|
||||
clearTimeout(timeoutId);
|
||||
if (settled) return;
|
||||
settled = true;
|
||||
|
||||
// Check for authentication errors in output
|
||||
if (output.includes('token_expired') || output.includes('authentication_error')) {
|
||||
if (
|
||||
output.includes('token_expired') ||
|
||||
output.includes('authentication_error') ||
|
||||
output.includes('permission_error')
|
||||
) {
|
||||
reject(new Error("Authentication required - please run 'claude login'"));
|
||||
return;
|
||||
}
|
||||
|
||||
212
apps/server/src/services/codex-app-server-service.ts
Normal file
212
apps/server/src/services/codex-app-server-service.ts
Normal file
@@ -0,0 +1,212 @@
|
||||
import { spawn, type ChildProcess } from 'child_process';
|
||||
import readline from 'readline';
|
||||
import { findCodexCliPath } from '@automaker/platform';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import type {
|
||||
AppServerModelResponse,
|
||||
AppServerAccountResponse,
|
||||
AppServerRateLimitsResponse,
|
||||
JsonRpcRequest,
|
||||
} from '@automaker/types';
|
||||
|
||||
const logger = createLogger('CodexAppServer');
|
||||
|
||||
/**
|
||||
* CodexAppServerService
|
||||
*
|
||||
* Centralized service for communicating with Codex CLI's app-server via JSON-RPC protocol.
|
||||
* Handles process spawning, JSON-RPC messaging, and cleanup.
|
||||
*
|
||||
* Connection strategy: Spawn on-demand (new process for each method call)
|
||||
*/
|
||||
export class CodexAppServerService {
|
||||
private cachedCliPath: string | null = null;
|
||||
|
||||
/**
|
||||
* Check if Codex CLI is available on the system
|
||||
*/
|
||||
async isAvailable(): Promise<boolean> {
|
||||
this.cachedCliPath = await findCodexCliPath();
|
||||
return Boolean(this.cachedCliPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch available models from app-server
|
||||
*/
|
||||
async getModels(): Promise<AppServerModelResponse | null> {
|
||||
const result = await this.executeJsonRpc<AppServerModelResponse>((sendRequest) => {
|
||||
return sendRequest('model/list', {});
|
||||
});
|
||||
|
||||
if (result) {
|
||||
logger.info(`[getModels] ✓ Fetched ${result.data.length} models`);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch account information from app-server
|
||||
*/
|
||||
async getAccount(): Promise<AppServerAccountResponse | null> {
|
||||
return this.executeJsonRpc<AppServerAccountResponse>((sendRequest) => {
|
||||
return sendRequest('account/read', { refreshToken: false });
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch rate limits from app-server
|
||||
*/
|
||||
async getRateLimits(): Promise<AppServerRateLimitsResponse | null> {
|
||||
return this.executeJsonRpc<AppServerRateLimitsResponse>((sendRequest) => {
|
||||
return sendRequest('account/rateLimits/read', {});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute JSON-RPC requests via Codex app-server
|
||||
*
|
||||
* This method:
|
||||
* 1. Spawns a new `codex app-server` process
|
||||
* 2. Handles JSON-RPC initialization handshake
|
||||
* 3. Executes user-provided requests
|
||||
* 4. Cleans up the process
|
||||
*
|
||||
* @param requestFn - Function that receives sendRequest helper and returns a promise
|
||||
* @returns Result of the JSON-RPC request or null on failure
|
||||
*/
|
||||
private async executeJsonRpc<T>(
|
||||
requestFn: (sendRequest: <R>(method: string, params?: unknown) => Promise<R>) => Promise<T>
|
||||
): Promise<T | null> {
|
||||
let childProcess: ChildProcess | null = null;
|
||||
|
||||
try {
|
||||
const cliPath = this.cachedCliPath || (await findCodexCliPath());
|
||||
|
||||
if (!cliPath) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// On Windows, .cmd files must be run through shell
|
||||
const needsShell = process.platform === 'win32' && cliPath.toLowerCase().endsWith('.cmd');
|
||||
|
||||
childProcess = spawn(cliPath, ['app-server'], {
|
||||
cwd: process.cwd(),
|
||||
env: {
|
||||
...process.env,
|
||||
TERM: 'dumb',
|
||||
},
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
shell: needsShell,
|
||||
});
|
||||
|
||||
if (!childProcess.stdin || !childProcess.stdout) {
|
||||
throw new Error('Failed to create stdio pipes');
|
||||
}
|
||||
|
||||
// Setup readline for reading JSONL responses
|
||||
const rl = readline.createInterface({
|
||||
input: childProcess.stdout,
|
||||
crlfDelay: Infinity,
|
||||
});
|
||||
|
||||
// Message ID counter for JSON-RPC
|
||||
let messageId = 0;
|
||||
const pendingRequests = new Map<
|
||||
number,
|
||||
{
|
||||
resolve: (value: unknown) => void;
|
||||
reject: (error: Error) => void;
|
||||
timeout: NodeJS.Timeout;
|
||||
}
|
||||
>();
|
||||
|
||||
// Process incoming messages
|
||||
rl.on('line', (line) => {
|
||||
if (!line.trim()) return;
|
||||
|
||||
try {
|
||||
const message = JSON.parse(line);
|
||||
|
||||
// Handle response to our request
|
||||
if ('id' in message && message.id !== undefined) {
|
||||
const pending = pendingRequests.get(message.id);
|
||||
if (pending) {
|
||||
clearTimeout(pending.timeout);
|
||||
pendingRequests.delete(message.id);
|
||||
if (message.error) {
|
||||
pending.reject(new Error(message.error.message || 'Unknown error'));
|
||||
} else {
|
||||
pending.resolve(message.result);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ignore notifications (no id field)
|
||||
} catch {
|
||||
// Ignore parse errors for non-JSON lines
|
||||
}
|
||||
});
|
||||
|
||||
// Helper to send JSON-RPC request and wait for response
|
||||
const sendRequest = <R>(method: string, params?: unknown): Promise<R> => {
|
||||
return new Promise((resolve, reject) => {
|
||||
const id = ++messageId;
|
||||
const request: JsonRpcRequest = {
|
||||
method,
|
||||
id,
|
||||
params: params ?? {},
|
||||
};
|
||||
|
||||
// Set timeout for request (10 seconds)
|
||||
const timeout = setTimeout(() => {
|
||||
pendingRequests.delete(id);
|
||||
reject(new Error(`Request timeout: ${method}`));
|
||||
}, 10000);
|
||||
|
||||
pendingRequests.set(id, {
|
||||
resolve: resolve as (value: unknown) => void,
|
||||
reject,
|
||||
timeout,
|
||||
});
|
||||
|
||||
childProcess!.stdin!.write(JSON.stringify(request) + '\n');
|
||||
});
|
||||
};
|
||||
|
||||
// Helper to send notification (no response expected)
|
||||
const sendNotification = (method: string, params?: unknown): void => {
|
||||
const notification = params ? { method, params } : { method };
|
||||
childProcess!.stdin!.write(JSON.stringify(notification) + '\n');
|
||||
};
|
||||
|
||||
// 1. Initialize the app-server
|
||||
await sendRequest('initialize', {
|
||||
clientInfo: {
|
||||
name: 'automaker',
|
||||
title: 'AutoMaker',
|
||||
version: '1.0.0',
|
||||
},
|
||||
});
|
||||
|
||||
// 2. Send initialized notification
|
||||
sendNotification('initialized');
|
||||
|
||||
// 3. Execute user-provided requests
|
||||
const result = await requestFn(sendRequest);
|
||||
|
||||
// Clean up
|
||||
rl.close();
|
||||
childProcess.kill('SIGTERM');
|
||||
|
||||
return result;
|
||||
} catch (error) {
|
||||
logger.error('[executeJsonRpc] Failed:', error);
|
||||
return null;
|
||||
} finally {
|
||||
// Ensure process is killed
|
||||
if (childProcess && !childProcess.killed) {
|
||||
childProcess.kill('SIGTERM');
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
258
apps/server/src/services/codex-model-cache-service.ts
Normal file
258
apps/server/src/services/codex-model-cache-service.ts
Normal file
@@ -0,0 +1,258 @@
|
||||
import path from 'path';
|
||||
import { secureFs } from '@automaker/platform';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import type { AppServerModel } from '@automaker/types';
|
||||
import type { CodexAppServerService } from './codex-app-server-service.js';
|
||||
|
||||
const logger = createLogger('CodexModelCache');
|
||||
|
||||
/**
|
||||
* Codex model with UI-compatible format
|
||||
*/
|
||||
export interface CodexModel {
|
||||
id: string;
|
||||
label: string;
|
||||
description: string;
|
||||
hasThinking: boolean;
|
||||
supportsVision: boolean;
|
||||
tier: 'premium' | 'standard' | 'basic';
|
||||
isDefault: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cache structure stored on disk
|
||||
*/
|
||||
interface CodexModelCache {
|
||||
models: CodexModel[];
|
||||
cachedAt: number;
|
||||
ttl: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* CodexModelCacheService
|
||||
*
|
||||
* Caches Codex models fetched from app-server with TTL-based invalidation and disk persistence.
|
||||
*
|
||||
* Features:
|
||||
* - 1-hour TTL (configurable)
|
||||
* - Atomic file writes (temp file + rename)
|
||||
* - Thread-safe (deduplicates concurrent refresh requests)
|
||||
* - Auto-bootstrap on service creation
|
||||
* - Graceful fallback (returns empty array on errors)
|
||||
*/
|
||||
export class CodexModelCacheService {
|
||||
private cacheFilePath: string;
|
||||
private ttl: number;
|
||||
private appServerService: CodexAppServerService;
|
||||
private inFlightRefresh: Promise<CodexModel[]> | null = null;
|
||||
|
||||
constructor(
|
||||
dataDir: string,
|
||||
appServerService: CodexAppServerService,
|
||||
ttl: number = 3600000 // 1 hour default
|
||||
) {
|
||||
this.cacheFilePath = path.join(dataDir, 'codex-models-cache.json');
|
||||
this.ttl = ttl;
|
||||
this.appServerService = appServerService;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get models from cache or fetch if stale
|
||||
*
|
||||
* @param forceRefresh - If true, bypass cache and fetch fresh data
|
||||
* @returns Array of Codex models (empty array if unavailable)
|
||||
*/
|
||||
async getModels(forceRefresh = false): Promise<CodexModel[]> {
|
||||
// If force refresh, skip cache
|
||||
if (forceRefresh) {
|
||||
return this.refreshModels();
|
||||
}
|
||||
|
||||
// Try to load from cache
|
||||
const cached = await this.loadFromCache();
|
||||
if (cached) {
|
||||
const age = Date.now() - cached.cachedAt;
|
||||
const isStale = age > cached.ttl;
|
||||
|
||||
if (!isStale) {
|
||||
logger.info(
|
||||
`[getModels] ✓ Using cached models (${cached.models.length} models, age: ${Math.round(age / 60000)}min)`
|
||||
);
|
||||
return cached.models;
|
||||
}
|
||||
}
|
||||
|
||||
// Cache is stale or missing, refresh
|
||||
return this.refreshModels();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get models with cache metadata
|
||||
*
|
||||
* @param forceRefresh - If true, bypass cache and fetch fresh data
|
||||
* @returns Object containing models and cache timestamp
|
||||
*/
|
||||
async getModelsWithMetadata(
|
||||
forceRefresh = false
|
||||
): Promise<{ models: CodexModel[]; cachedAt: number }> {
|
||||
const models = await this.getModels(forceRefresh);
|
||||
|
||||
// Try to get the actual cache timestamp
|
||||
const cached = await this.loadFromCache();
|
||||
const cachedAt = cached?.cachedAt ?? Date.now();
|
||||
|
||||
return { models, cachedAt };
|
||||
}
|
||||
|
||||
/**
|
||||
* Refresh models from app-server and update cache
|
||||
*
|
||||
* Thread-safe: Deduplicates concurrent refresh requests
|
||||
*/
|
||||
async refreshModels(): Promise<CodexModel[]> {
|
||||
// Deduplicate concurrent refresh requests
|
||||
if (this.inFlightRefresh) {
|
||||
return this.inFlightRefresh;
|
||||
}
|
||||
|
||||
// Start new refresh
|
||||
this.inFlightRefresh = this.doRefresh();
|
||||
|
||||
try {
|
||||
const models = await this.inFlightRefresh;
|
||||
return models;
|
||||
} finally {
|
||||
this.inFlightRefresh = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear the cache file
|
||||
*/
|
||||
async clearCache(): Promise<void> {
|
||||
logger.info('[clearCache] Clearing cache...');
|
||||
|
||||
try {
|
||||
await secureFs.unlink(this.cacheFilePath);
|
||||
logger.info('[clearCache] Cache cleared');
|
||||
} catch (error) {
|
||||
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
|
||||
logger.error('[clearCache] Failed to clear cache:', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal method to perform the actual refresh
|
||||
*/
|
||||
private async doRefresh(): Promise<CodexModel[]> {
|
||||
try {
|
||||
// Check if app-server is available
|
||||
const isAvailable = await this.appServerService.isAvailable();
|
||||
if (!isAvailable) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Fetch models from app-server
|
||||
const response = await this.appServerService.getModels();
|
||||
if (!response || !response.data) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Transform models to UI format
|
||||
const models = response.data.map((model) => this.transformModel(model));
|
||||
|
||||
// Save to cache
|
||||
await this.saveToCache(models);
|
||||
|
||||
logger.info(`[refreshModels] ✓ Fetched fresh models (${models.length} models)`);
|
||||
|
||||
return models;
|
||||
} catch (error) {
|
||||
logger.error('[doRefresh] Refresh failed:', error);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Transform app-server model to UI-compatible format
|
||||
*/
|
||||
private transformModel(appServerModel: AppServerModel): CodexModel {
|
||||
return {
|
||||
id: `codex-${appServerModel.id}`, // Add 'codex-' prefix for compatibility
|
||||
label: appServerModel.displayName,
|
||||
description: appServerModel.description,
|
||||
hasThinking: appServerModel.supportedReasoningEfforts.length > 0,
|
||||
supportsVision: true, // All Codex models support vision
|
||||
tier: this.inferTier(appServerModel.id),
|
||||
isDefault: appServerModel.isDefault,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Infer tier from model ID
|
||||
*/
|
||||
private inferTier(modelId: string): 'premium' | 'standard' | 'basic' {
|
||||
if (modelId.includes('max') || modelId.includes('gpt-5.2-codex')) {
|
||||
return 'premium';
|
||||
}
|
||||
if (modelId.includes('mini')) {
|
||||
return 'basic';
|
||||
}
|
||||
return 'standard';
|
||||
}
|
||||
|
||||
/**
|
||||
* Load cache from disk
|
||||
*/
|
||||
private async loadFromCache(): Promise<CodexModelCache | null> {
|
||||
try {
|
||||
const content = await secureFs.readFile(this.cacheFilePath, 'utf-8');
|
||||
const cache = JSON.parse(content.toString()) as CodexModelCache;
|
||||
|
||||
// Validate cache structure
|
||||
if (!Array.isArray(cache.models) || typeof cache.cachedAt !== 'number') {
|
||||
logger.warn('[loadFromCache] Invalid cache structure, ignoring');
|
||||
return null;
|
||||
}
|
||||
|
||||
return cache;
|
||||
} catch (error) {
|
||||
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
|
||||
logger.warn('[loadFromCache] Failed to read cache:', error);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save cache to disk (atomic write)
|
||||
*/
|
||||
private async saveToCache(models: CodexModel[]): Promise<void> {
|
||||
const cache: CodexModelCache = {
|
||||
models,
|
||||
cachedAt: Date.now(),
|
||||
ttl: this.ttl,
|
||||
};
|
||||
|
||||
const tempPath = `${this.cacheFilePath}.tmp.${Date.now()}`;
|
||||
|
||||
try {
|
||||
// Write to temp file
|
||||
const content = JSON.stringify(cache, null, 2);
|
||||
await secureFs.writeFile(tempPath, content, 'utf-8');
|
||||
|
||||
// Atomic rename
|
||||
await secureFs.rename(tempPath, this.cacheFilePath);
|
||||
} catch (error) {
|
||||
logger.error('[saveToCache] Failed to save cache:', error);
|
||||
|
||||
// Clean up temp file
|
||||
try {
|
||||
await secureFs.unlink(tempPath);
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
import {
|
||||
findCodexCliPath,
|
||||
spawnProcess,
|
||||
getCodexAuthPath,
|
||||
systemPathExists,
|
||||
systemPathReadFile,
|
||||
} from '@automaker/platform';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import type { CodexAppServerService } from './codex-app-server-service.js';
|
||||
|
||||
const logger = createLogger('CodexUsage');
|
||||
|
||||
@@ -18,19 +18,12 @@ export interface CodexRateLimitWindow {
|
||||
resetsAt: number;
|
||||
}
|
||||
|
||||
export interface CodexCreditsSnapshot {
|
||||
balance?: string;
|
||||
unlimited?: boolean;
|
||||
hasCredits?: boolean;
|
||||
}
|
||||
|
||||
export type CodexPlanType = 'free' | 'plus' | 'pro' | 'team' | 'enterprise' | 'edu' | 'unknown';
|
||||
|
||||
export interface CodexUsageData {
|
||||
rateLimits: {
|
||||
primary?: CodexRateLimitWindow;
|
||||
secondary?: CodexRateLimitWindow;
|
||||
credits?: CodexCreditsSnapshot;
|
||||
planType?: CodexPlanType;
|
||||
} | null;
|
||||
lastUpdated: string;
|
||||
@@ -39,13 +32,24 @@ export interface CodexUsageData {
|
||||
/**
|
||||
* Codex Usage Service
|
||||
*
|
||||
* Attempts to fetch usage data from Codex CLI and OpenAI API.
|
||||
* Codex CLI doesn't provide a direct usage command, but we can:
|
||||
* 1. Parse usage info from error responses (rate limit errors contain plan info)
|
||||
* 2. Check for OpenAI API usage if API key is available
|
||||
* Fetches usage data from Codex CLI using the app-server JSON-RPC API.
|
||||
* Falls back to auth file parsing if app-server is unavailable.
|
||||
*/
|
||||
export class CodexUsageService {
|
||||
private cachedCliPath: string | null = null;
|
||||
private appServerService: CodexAppServerService | null = null;
|
||||
private accountPlanTypeArray: CodexPlanType[] = [
|
||||
'free',
|
||||
'plus',
|
||||
'pro',
|
||||
'team',
|
||||
'enterprise',
|
||||
'edu',
|
||||
];
|
||||
|
||||
constructor(appServerService?: CodexAppServerService) {
|
||||
this.appServerService = appServerService || null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if Codex CLI is available on the system
|
||||
@@ -58,60 +62,131 @@ export class CodexUsageService {
|
||||
/**
|
||||
* Attempt to fetch usage data
|
||||
*
|
||||
* Tries multiple approaches:
|
||||
* 1. Always try to get plan type from auth file first (authoritative source)
|
||||
* 2. Check for OpenAI API key in environment for API usage
|
||||
* 3. Make a test request to capture rate limit headers from CLI
|
||||
* 4. Combine results from auth file and CLI
|
||||
* Priority order:
|
||||
* 1. Codex app-server JSON-RPC API (most reliable, provides real-time data)
|
||||
* 2. Auth file JWT parsing (fallback for plan type)
|
||||
*/
|
||||
async fetchUsageData(): Promise<CodexUsageData> {
|
||||
logger.info('[fetchUsageData] Starting...');
|
||||
const cliPath = this.cachedCliPath || (await findCodexCliPath());
|
||||
|
||||
if (!cliPath) {
|
||||
logger.error('[fetchUsageData] Codex CLI not found');
|
||||
throw new Error('Codex CLI not found. Please install it with: npm install -g @openai/codex');
|
||||
}
|
||||
|
||||
// Always try to get plan type from auth file first - this is the authoritative source
|
||||
const authPlanType = await this.getPlanTypeFromAuthFile();
|
||||
logger.info(`[fetchUsageData] Using CLI path: ${cliPath}`);
|
||||
|
||||
// Check if user has an API key that we can use
|
||||
const hasApiKey = !!process.env.OPENAI_API_KEY;
|
||||
|
||||
if (hasApiKey) {
|
||||
// Try to get usage from OpenAI API
|
||||
const openaiUsage = await this.fetchOpenAIUsage();
|
||||
if (openaiUsage) {
|
||||
// Merge with auth file plan type if available
|
||||
if (authPlanType && openaiUsage.rateLimits) {
|
||||
openaiUsage.rateLimits.planType = authPlanType;
|
||||
}
|
||||
return openaiUsage;
|
||||
}
|
||||
// Try to get usage from Codex app-server (most reliable method)
|
||||
const appServerUsage = await this.fetchFromAppServer();
|
||||
if (appServerUsage) {
|
||||
logger.info('[fetchUsageData] ✓ Fetched usage from app-server');
|
||||
return appServerUsage;
|
||||
}
|
||||
|
||||
// Try to get usage from Codex CLI by making a simple request
|
||||
const codexUsage = await this.fetchCodexUsage(cliPath, authPlanType);
|
||||
if (codexUsage) {
|
||||
return codexUsage;
|
||||
}
|
||||
logger.info('[fetchUsageData] App-server failed, trying auth file fallback...');
|
||||
|
||||
// Fallback: try to parse full usage from auth file
|
||||
// Fallback: try to parse usage from auth file
|
||||
const authUsage = await this.fetchFromAuthFile();
|
||||
if (authUsage) {
|
||||
logger.info('[fetchUsageData] ✓ Fetched usage from auth file');
|
||||
return authUsage;
|
||||
}
|
||||
|
||||
// If all else fails, return a message with helpful information
|
||||
throw new Error(
|
||||
'Codex usage statistics require additional configuration. ' +
|
||||
'To enable usage tracking:\n\n' +
|
||||
'1. Set your OpenAI API key in the environment:\n' +
|
||||
' export OPENAI_API_KEY=sk-...\n\n' +
|
||||
'2. Or check your usage at:\n' +
|
||||
' https://platform.openai.com/usage\n\n' +
|
||||
'Note: If using Codex CLI with ChatGPT OAuth authentication, ' +
|
||||
'usage data must be queried through your OpenAI account.'
|
||||
);
|
||||
logger.info('[fetchUsageData] All methods failed, returning unknown');
|
||||
|
||||
// If all else fails, return unknown
|
||||
return {
|
||||
rateLimits: {
|
||||
planType: 'unknown',
|
||||
},
|
||||
lastUpdated: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch usage data from Codex app-server using JSON-RPC API
|
||||
* This is the most reliable method as it gets real-time data from OpenAI
|
||||
*/
|
||||
private async fetchFromAppServer(): Promise<CodexUsageData | null> {
|
||||
try {
|
||||
// Use CodexAppServerService if available
|
||||
if (!this.appServerService) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Fetch account and rate limits in parallel
|
||||
const [accountResult, rateLimitsResult] = await Promise.all([
|
||||
this.appServerService.getAccount(),
|
||||
this.appServerService.getRateLimits(),
|
||||
]);
|
||||
|
||||
if (!accountResult) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Build response
|
||||
// Prefer planType from rateLimits (more accurate/current) over account (can be stale)
|
||||
let planType: CodexPlanType = 'unknown';
|
||||
|
||||
// First try rate limits planType (most accurate)
|
||||
const rateLimitsPlanType = rateLimitsResult?.rateLimits?.planType;
|
||||
if (rateLimitsPlanType) {
|
||||
const normalizedType = rateLimitsPlanType.toLowerCase() as CodexPlanType;
|
||||
if (this.accountPlanTypeArray.includes(normalizedType)) {
|
||||
planType = normalizedType;
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to account planType if rate limits didn't have it
|
||||
if (planType === 'unknown' && accountResult.account?.planType) {
|
||||
const normalizedType = accountResult.account.planType.toLowerCase() as CodexPlanType;
|
||||
if (this.accountPlanTypeArray.includes(normalizedType)) {
|
||||
planType = normalizedType;
|
||||
}
|
||||
}
|
||||
|
||||
const result: CodexUsageData = {
|
||||
rateLimits: {
|
||||
planType,
|
||||
},
|
||||
lastUpdated: new Date().toISOString(),
|
||||
};
|
||||
|
||||
// Add rate limit info if available
|
||||
if (rateLimitsResult?.rateLimits?.primary) {
|
||||
const primary = rateLimitsResult.rateLimits.primary;
|
||||
result.rateLimits!.primary = {
|
||||
limit: -1, // Not provided by API
|
||||
used: -1, // Not provided by API
|
||||
remaining: -1, // Not provided by API
|
||||
usedPercent: primary.usedPercent,
|
||||
windowDurationMins: primary.windowDurationMins,
|
||||
resetsAt: primary.resetsAt,
|
||||
};
|
||||
}
|
||||
|
||||
// Add secondary rate limit if available
|
||||
if (rateLimitsResult?.rateLimits?.secondary) {
|
||||
const secondary = rateLimitsResult.rateLimits.secondary;
|
||||
result.rateLimits!.secondary = {
|
||||
limit: -1, // Not provided by API
|
||||
used: -1, // Not provided by API
|
||||
remaining: -1, // Not provided by API
|
||||
usedPercent: secondary.usedPercent,
|
||||
windowDurationMins: secondary.windowDurationMins,
|
||||
resetsAt: secondary.resetsAt,
|
||||
};
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`[fetchFromAppServer] ✓ Plan: ${planType}, Primary: ${result.rateLimits?.primary?.usedPercent || 'N/A'}%, Secondary: ${result.rateLimits?.secondary?.usedPercent || 'N/A'}%`
|
||||
);
|
||||
return result;
|
||||
} catch (error) {
|
||||
logger.error('[fetchFromAppServer] Failed:', error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -121,9 +196,11 @@ export class CodexUsageService {
|
||||
private async getPlanTypeFromAuthFile(): Promise<CodexPlanType> {
|
||||
try {
|
||||
const authFilePath = getCodexAuthPath();
|
||||
const exists = await systemPathExists(authFilePath);
|
||||
logger.info(`[getPlanTypeFromAuthFile] Auth file path: ${authFilePath}`);
|
||||
const exists = systemPathExists(authFilePath);
|
||||
|
||||
if (!exists) {
|
||||
logger.warn('[getPlanTypeFromAuthFile] Auth file does not exist');
|
||||
return 'unknown';
|
||||
}
|
||||
|
||||
@@ -131,16 +208,24 @@ export class CodexUsageService {
|
||||
const authData = JSON.parse(authContent);
|
||||
|
||||
if (!authData.tokens?.id_token) {
|
||||
logger.info('[getPlanTypeFromAuthFile] No id_token in auth file');
|
||||
return 'unknown';
|
||||
}
|
||||
|
||||
const claims = this.parseJwt(authData.tokens.id_token);
|
||||
if (!claims) {
|
||||
logger.info('[getPlanTypeFromAuthFile] Failed to parse JWT');
|
||||
return 'unknown';
|
||||
}
|
||||
|
||||
logger.info('[getPlanTypeFromAuthFile] JWT claims keys:', Object.keys(claims));
|
||||
|
||||
// Extract plan type from nested OpenAI auth object with type validation
|
||||
const openaiAuthClaim = claims['https://api.openai.com/auth'];
|
||||
logger.info(
|
||||
'[getPlanTypeFromAuthFile] OpenAI auth claim:',
|
||||
JSON.stringify(openaiAuthClaim, null, 2)
|
||||
);
|
||||
|
||||
let accountType: string | undefined;
|
||||
let isSubscriptionExpired = false;
|
||||
@@ -188,154 +273,23 @@ export class CodexUsageService {
|
||||
}
|
||||
|
||||
if (accountType) {
|
||||
const normalizedType = accountType.toLowerCase();
|
||||
if (['free', 'plus', 'pro', 'team', 'enterprise', 'edu'].includes(normalizedType)) {
|
||||
return normalizedType as CodexPlanType;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to get plan type from auth file:', error);
|
||||
}
|
||||
|
||||
return 'unknown';
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to fetch usage from OpenAI API using the API key
|
||||
*/
|
||||
private async fetchOpenAIUsage(): Promise<CodexUsageData | null> {
|
||||
const apiKey = process.env.OPENAI_API_KEY;
|
||||
if (!apiKey) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
const endTime = Math.floor(Date.now() / 1000);
|
||||
const startTime = endTime - 7 * 24 * 60 * 60; // Last 7 days
|
||||
|
||||
const response = await fetch(
|
||||
`https://api.openai.com/v1/organization/usage/completions?start_time=${startTime}&end_time=${endTime}&limit=1`,
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
return this.parseOpenAIUsage(data);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to fetch from OpenAI API:', error);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse OpenAI usage API response
|
||||
*/
|
||||
private parseOpenAIUsage(data: any): CodexUsageData {
|
||||
let totalInputTokens = 0;
|
||||
let totalOutputTokens = 0;
|
||||
|
||||
if (data.data && Array.isArray(data.data)) {
|
||||
for (const bucket of data.data) {
|
||||
if (bucket.results && Array.isArray(bucket.results)) {
|
||||
for (const result of bucket.results) {
|
||||
totalInputTokens += result.input_tokens || 0;
|
||||
totalOutputTokens += result.output_tokens || 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
rateLimits: {
|
||||
planType: 'unknown',
|
||||
credits: {
|
||||
hasCredits: true,
|
||||
},
|
||||
},
|
||||
lastUpdated: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to fetch usage by making a test request to Codex CLI
|
||||
* and parsing rate limit information from the response
|
||||
*/
|
||||
private async fetchCodexUsage(
|
||||
cliPath: string,
|
||||
authPlanType: CodexPlanType
|
||||
): Promise<CodexUsageData | null> {
|
||||
try {
|
||||
// Make a simple request to trigger rate limit info if at limit
|
||||
const result = await spawnProcess({
|
||||
command: cliPath,
|
||||
args: ['exec', '--', 'echo', 'test'],
|
||||
cwd: process.cwd(),
|
||||
env: {
|
||||
...process.env,
|
||||
TERM: 'dumb',
|
||||
},
|
||||
timeout: 10000,
|
||||
});
|
||||
|
||||
// Parse the output for rate limit information
|
||||
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
|
||||
|
||||
// Check if we got a rate limit error
|
||||
const rateLimitMatch = combinedOutput.match(
|
||||
/usage_limit_reached.*?"plan_type":"([^"]+)".*?"resets_at":(\d+).*?"resets_in_seconds":(\d+)/
|
||||
);
|
||||
|
||||
if (rateLimitMatch) {
|
||||
// Rate limit error contains the plan type - use that as it's the most authoritative
|
||||
const planType = rateLimitMatch[1] as CodexPlanType;
|
||||
const resetsAt = parseInt(rateLimitMatch[2], 10);
|
||||
const resetsInSeconds = parseInt(rateLimitMatch[3], 10);
|
||||
|
||||
const normalizedType = accountType.toLowerCase() as CodexPlanType;
|
||||
logger.info(
|
||||
`Rate limit hit - plan: ${planType}, resets in ${Math.ceil(resetsInSeconds / 60)} mins`
|
||||
`[getPlanTypeFromAuthFile] Account type: "${accountType}", normalized: "${normalizedType}"`
|
||||
);
|
||||
|
||||
return {
|
||||
rateLimits: {
|
||||
planType,
|
||||
primary: {
|
||||
limit: 0,
|
||||
used: 0,
|
||||
remaining: 0,
|
||||
usedPercent: 100,
|
||||
windowDurationMins: Math.ceil(resetsInSeconds / 60),
|
||||
resetsAt,
|
||||
},
|
||||
},
|
||||
lastUpdated: new Date().toISOString(),
|
||||
};
|
||||
if (this.accountPlanTypeArray.includes(normalizedType)) {
|
||||
logger.info(`[getPlanTypeFromAuthFile] Returning plan type: ${normalizedType}`);
|
||||
return normalizedType;
|
||||
}
|
||||
} else {
|
||||
logger.info('[getPlanTypeFromAuthFile] No account type found in claims');
|
||||
}
|
||||
|
||||
// No rate limit error - use the plan type from auth file
|
||||
const isFreePlan = authPlanType === 'free';
|
||||
|
||||
return {
|
||||
rateLimits: {
|
||||
planType: authPlanType,
|
||||
credits: {
|
||||
hasCredits: true,
|
||||
unlimited: !isFreePlan && authPlanType !== 'unknown',
|
||||
},
|
||||
},
|
||||
lastUpdated: new Date().toISOString(),
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('Failed to fetch from Codex CLI:', error);
|
||||
logger.error('[getPlanTypeFromAuthFile] Failed to get plan type from auth file:', error);
|
||||
}
|
||||
|
||||
return null;
|
||||
logger.info('[getPlanTypeFromAuthFile] Returning unknown');
|
||||
return 'unknown';
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -343,27 +297,27 @@ export class CodexUsageService {
|
||||
* Reuses getPlanTypeFromAuthFile to avoid code duplication
|
||||
*/
|
||||
private async fetchFromAuthFile(): Promise<CodexUsageData | null> {
|
||||
logger.info('[fetchFromAuthFile] Starting...');
|
||||
try {
|
||||
const planType = await this.getPlanTypeFromAuthFile();
|
||||
logger.info(`[fetchFromAuthFile] Got plan type: ${planType}`);
|
||||
|
||||
if (planType === 'unknown') {
|
||||
logger.info('[fetchFromAuthFile] Plan type unknown, returning null');
|
||||
return null;
|
||||
}
|
||||
|
||||
const isFreePlan = planType === 'free';
|
||||
|
||||
return {
|
||||
const result: CodexUsageData = {
|
||||
rateLimits: {
|
||||
planType,
|
||||
credits: {
|
||||
hasCredits: true,
|
||||
unlimited: !isFreePlan,
|
||||
},
|
||||
},
|
||||
lastUpdated: new Date().toISOString(),
|
||||
};
|
||||
|
||||
logger.info('[fetchFromAuthFile] Returning result:', JSON.stringify(result, null, 2));
|
||||
return result;
|
||||
} catch (error) {
|
||||
logger.error('Failed to parse auth file:', error);
|
||||
logger.error('[fetchFromAuthFile] Failed to parse auth file:', error);
|
||||
}
|
||||
|
||||
return null;
|
||||
@@ -372,7 +326,7 @@ export class CodexUsageService {
|
||||
/**
|
||||
* Parse JWT token to extract claims
|
||||
*/
|
||||
private parseJwt(token: string): any {
|
||||
private parseJwt(token: string): Record<string, unknown> | null {
|
||||
try {
|
||||
const parts = token.split('.');
|
||||
|
||||
@@ -383,18 +337,8 @@ export class CodexUsageService {
|
||||
const base64Url = parts[1];
|
||||
const base64 = base64Url.replace(/-/g, '+').replace(/_/g, '/');
|
||||
|
||||
// Use Buffer for Node.js environment instead of atob
|
||||
let jsonPayload: string;
|
||||
if (typeof Buffer !== 'undefined') {
|
||||
jsonPayload = Buffer.from(base64, 'base64').toString('utf-8');
|
||||
} else {
|
||||
jsonPayload = decodeURIComponent(
|
||||
atob(base64)
|
||||
.split('')
|
||||
.map((c) => '%' + ('00' + c.charCodeAt(0).toString(16)).slice(-2))
|
||||
.join('')
|
||||
);
|
||||
}
|
||||
// Use Buffer for Node.js environment
|
||||
const jsonPayload = Buffer.from(base64, 'base64').toString('utf-8');
|
||||
|
||||
return JSON.parse(jsonPayload);
|
||||
} catch {
|
||||
|
||||
@@ -12,24 +12,123 @@ import * as secureFs from '../lib/secure-fs.js';
|
||||
import path from 'path';
|
||||
import net from 'net';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import type { EventEmitter } from '../lib/events.js';
|
||||
|
||||
const logger = createLogger('DevServerService');
|
||||
|
||||
// Maximum scrollback buffer size (characters) - matches TerminalService pattern
|
||||
const MAX_SCROLLBACK_SIZE = 50000; // ~50KB per dev server
|
||||
|
||||
// Throttle output to prevent overwhelming WebSocket under heavy load
|
||||
const OUTPUT_THROTTLE_MS = 4; // ~250fps max update rate for responsive feedback
|
||||
const OUTPUT_BATCH_SIZE = 4096; // Smaller batches for lower latency
|
||||
|
||||
export interface DevServerInfo {
|
||||
worktreePath: string;
|
||||
port: number;
|
||||
url: string;
|
||||
process: ChildProcess | null;
|
||||
startedAt: Date;
|
||||
// Scrollback buffer for log history (replay on reconnect)
|
||||
scrollbackBuffer: string;
|
||||
// Pending output to be flushed to subscribers
|
||||
outputBuffer: string;
|
||||
// Throttle timer for batching output
|
||||
flushTimeout: NodeJS.Timeout | null;
|
||||
// Flag to indicate server is stopping (prevents output after stop)
|
||||
stopping: boolean;
|
||||
}
|
||||
|
||||
// Port allocation starts at 3001 to avoid conflicts with common dev ports
|
||||
const BASE_PORT = 3001;
|
||||
const MAX_PORT = 3099; // Safety limit
|
||||
|
||||
// Common livereload ports that may need cleanup when stopping dev servers
|
||||
const LIVERELOAD_PORTS = [35729, 35730, 35731] as const;
|
||||
|
||||
class DevServerService {
|
||||
private runningServers: Map<string, DevServerInfo> = new Map();
|
||||
private allocatedPorts: Set<number> = new Set();
|
||||
private emitter: EventEmitter | null = null;
|
||||
|
||||
/**
|
||||
* Set the event emitter for streaming log events
|
||||
* Called during service initialization with the global event emitter
|
||||
*/
|
||||
setEventEmitter(emitter: EventEmitter): void {
|
||||
this.emitter = emitter;
|
||||
}
|
||||
|
||||
/**
|
||||
* Append data to scrollback buffer with size limit enforcement
|
||||
* Evicts oldest data when buffer exceeds MAX_SCROLLBACK_SIZE
|
||||
*/
|
||||
private appendToScrollback(server: DevServerInfo, data: string): void {
|
||||
server.scrollbackBuffer += data;
|
||||
if (server.scrollbackBuffer.length > MAX_SCROLLBACK_SIZE) {
|
||||
server.scrollbackBuffer = server.scrollbackBuffer.slice(-MAX_SCROLLBACK_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush buffered output to WebSocket subscribers
|
||||
* Sends batched output to prevent overwhelming clients under heavy load
|
||||
*/
|
||||
private flushOutput(server: DevServerInfo): void {
|
||||
// Skip flush if server is stopping or buffer is empty
|
||||
if (server.stopping || server.outputBuffer.length === 0) {
|
||||
server.flushTimeout = null;
|
||||
return;
|
||||
}
|
||||
|
||||
let dataToSend = server.outputBuffer;
|
||||
if (dataToSend.length > OUTPUT_BATCH_SIZE) {
|
||||
// Send in batches if buffer is large
|
||||
dataToSend = server.outputBuffer.slice(0, OUTPUT_BATCH_SIZE);
|
||||
server.outputBuffer = server.outputBuffer.slice(OUTPUT_BATCH_SIZE);
|
||||
// Schedule another flush for remaining data
|
||||
server.flushTimeout = setTimeout(() => this.flushOutput(server), OUTPUT_THROTTLE_MS);
|
||||
} else {
|
||||
server.outputBuffer = '';
|
||||
server.flushTimeout = null;
|
||||
}
|
||||
|
||||
// Emit output event for WebSocket streaming
|
||||
if (this.emitter) {
|
||||
this.emitter.emit('dev-server:output', {
|
||||
worktreePath: server.worktreePath,
|
||||
content: dataToSend,
|
||||
timestamp: new Date().toISOString(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle incoming stdout/stderr data from dev server process
|
||||
* Buffers data for scrollback replay and schedules throttled emission
|
||||
*/
|
||||
private handleProcessOutput(server: DevServerInfo, data: Buffer): void {
|
||||
// Skip output if server is stopping
|
||||
if (server.stopping) {
|
||||
return;
|
||||
}
|
||||
|
||||
const content = data.toString();
|
||||
|
||||
// Append to scrollback buffer for replay on reconnect
|
||||
this.appendToScrollback(server, content);
|
||||
|
||||
// Buffer output for throttled live delivery
|
||||
server.outputBuffer += content;
|
||||
|
||||
// Schedule flush if not already scheduled
|
||||
if (!server.flushTimeout) {
|
||||
server.flushTimeout = setTimeout(() => this.flushOutput(server), OUTPUT_THROTTLE_MS);
|
||||
}
|
||||
|
||||
// Also log for debugging (existing behavior)
|
||||
logger.debug(`[Port${server.port}] ${content.trim()}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a port is available (not in use by system or by us)
|
||||
@@ -244,10 +343,9 @@ class DevServerService {
|
||||
// Reserve the port (port was already force-killed in findAvailablePort)
|
||||
this.allocatedPorts.add(port);
|
||||
|
||||
// Also kill common related ports (livereload uses 35729 by default)
|
||||
// Also kill common related ports (livereload, etc.)
|
||||
// Some dev servers use fixed ports for HMR/livereload regardless of main port
|
||||
const commonRelatedPorts = [35729, 35730, 35731];
|
||||
for (const relatedPort of commonRelatedPorts) {
|
||||
for (const relatedPort of LIVERELOAD_PORTS) {
|
||||
this.killProcessOnPort(relatedPort);
|
||||
}
|
||||
|
||||
@@ -259,9 +357,14 @@ class DevServerService {
|
||||
logger.debug(`Command: ${devCommand.cmd} ${devCommand.args.join(' ')} with PORT=${port}`);
|
||||
|
||||
// Spawn the dev process with PORT environment variable
|
||||
// FORCE_COLOR enables colored output even when not running in a TTY
|
||||
const env = {
|
||||
...process.env,
|
||||
PORT: String(port),
|
||||
FORCE_COLOR: '1',
|
||||
// Some tools use these additional env vars for color detection
|
||||
COLORTERM: 'truecolor',
|
||||
TERM: 'xterm-256color',
|
||||
};
|
||||
|
||||
const devProcess = spawn(devCommand.cmd, devCommand.args, {
|
||||
@@ -274,32 +377,66 @@ class DevServerService {
|
||||
// Track if process failed early using object to work around TypeScript narrowing
|
||||
const status = { error: null as string | null, exited: false };
|
||||
|
||||
// Log output for debugging
|
||||
// Create server info early so we can reference it in handlers
|
||||
// We'll add it to runningServers after verifying the process started successfully
|
||||
const serverInfo: DevServerInfo = {
|
||||
worktreePath,
|
||||
port,
|
||||
url: `http://localhost:${port}`,
|
||||
process: devProcess,
|
||||
startedAt: new Date(),
|
||||
scrollbackBuffer: '',
|
||||
outputBuffer: '',
|
||||
flushTimeout: null,
|
||||
stopping: false,
|
||||
};
|
||||
|
||||
// Capture stdout with buffer management and event emission
|
||||
if (devProcess.stdout) {
|
||||
devProcess.stdout.on('data', (data: Buffer) => {
|
||||
logger.debug(`[Port${port}] ${data.toString().trim()}`);
|
||||
this.handleProcessOutput(serverInfo, data);
|
||||
});
|
||||
}
|
||||
|
||||
// Capture stderr with buffer management and event emission
|
||||
if (devProcess.stderr) {
|
||||
devProcess.stderr.on('data', (data: Buffer) => {
|
||||
const msg = data.toString().trim();
|
||||
logger.debug(`[Port${port}] ${msg}`);
|
||||
this.handleProcessOutput(serverInfo, data);
|
||||
});
|
||||
}
|
||||
|
||||
// Helper to clean up resources and emit stop event
|
||||
const cleanupAndEmitStop = (exitCode: number | null, errorMessage?: string) => {
|
||||
if (serverInfo.flushTimeout) {
|
||||
clearTimeout(serverInfo.flushTimeout);
|
||||
serverInfo.flushTimeout = null;
|
||||
}
|
||||
|
||||
// Emit stopped event (only if not already stopping - prevents duplicate events)
|
||||
if (this.emitter && !serverInfo.stopping) {
|
||||
this.emitter.emit('dev-server:stopped', {
|
||||
worktreePath,
|
||||
port,
|
||||
exitCode,
|
||||
error: errorMessage,
|
||||
timestamp: new Date().toISOString(),
|
||||
});
|
||||
}
|
||||
|
||||
this.allocatedPorts.delete(port);
|
||||
this.runningServers.delete(worktreePath);
|
||||
};
|
||||
|
||||
devProcess.on('error', (error) => {
|
||||
logger.error(`Process error:`, error);
|
||||
status.error = error.message;
|
||||
this.allocatedPorts.delete(port);
|
||||
this.runningServers.delete(worktreePath);
|
||||
cleanupAndEmitStop(null, error.message);
|
||||
});
|
||||
|
||||
devProcess.on('exit', (code) => {
|
||||
logger.info(`Process for ${worktreePath} exited with code ${code}`);
|
||||
status.exited = true;
|
||||
this.allocatedPorts.delete(port);
|
||||
this.runningServers.delete(worktreePath);
|
||||
cleanupAndEmitStop(code);
|
||||
});
|
||||
|
||||
// Wait a moment to see if the process fails immediately
|
||||
@@ -319,16 +456,19 @@ class DevServerService {
|
||||
};
|
||||
}
|
||||
|
||||
const serverInfo: DevServerInfo = {
|
||||
worktreePath,
|
||||
port,
|
||||
url: `http://localhost:${port}`,
|
||||
process: devProcess,
|
||||
startedAt: new Date(),
|
||||
};
|
||||
|
||||
// Server started successfully - add to running servers map
|
||||
this.runningServers.set(worktreePath, serverInfo);
|
||||
|
||||
// Emit started event for WebSocket subscribers
|
||||
if (this.emitter) {
|
||||
this.emitter.emit('dev-server:started', {
|
||||
worktreePath,
|
||||
port,
|
||||
url: serverInfo.url,
|
||||
timestamp: new Date().toISOString(),
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
result: {
|
||||
@@ -365,6 +505,28 @@ class DevServerService {
|
||||
|
||||
logger.info(`Stopping dev server for ${worktreePath}`);
|
||||
|
||||
// Mark as stopping to prevent further output events
|
||||
server.stopping = true;
|
||||
|
||||
// Clean up flush timeout to prevent memory leaks
|
||||
if (server.flushTimeout) {
|
||||
clearTimeout(server.flushTimeout);
|
||||
server.flushTimeout = null;
|
||||
}
|
||||
|
||||
// Clear any pending output buffer
|
||||
server.outputBuffer = '';
|
||||
|
||||
// Emit stopped event immediately so UI updates right away
|
||||
if (this.emitter) {
|
||||
this.emitter.emit('dev-server:stopped', {
|
||||
worktreePath,
|
||||
port: server.port,
|
||||
exitCode: null, // Will be populated by exit handler if process exits normally
|
||||
timestamp: new Date().toISOString(),
|
||||
});
|
||||
}
|
||||
|
||||
// Kill the process
|
||||
if (server.process && !server.process.killed) {
|
||||
server.process.kill('SIGTERM');
|
||||
@@ -422,6 +584,41 @@ class DevServerService {
|
||||
return this.runningServers.get(worktreePath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get buffered logs for a worktree's dev server
|
||||
* Returns the scrollback buffer containing historical log output
|
||||
* Used by the API to serve logs to clients on initial connection
|
||||
*/
|
||||
getServerLogs(worktreePath: string): {
|
||||
success: boolean;
|
||||
result?: {
|
||||
worktreePath: string;
|
||||
port: number;
|
||||
logs: string;
|
||||
startedAt: string;
|
||||
};
|
||||
error?: string;
|
||||
} {
|
||||
const server = this.runningServers.get(worktreePath);
|
||||
|
||||
if (!server) {
|
||||
return {
|
||||
success: false,
|
||||
error: `No dev server running for worktree: ${worktreePath}`,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
result: {
|
||||
worktreePath: server.worktreePath,
|
||||
port: server.port,
|
||||
logs: server.scrollbackBuffer,
|
||||
startedAt: server.startedAt.toISOString(),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all allocated ports
|
||||
*/
|
||||
|
||||
@@ -308,13 +308,15 @@ export class FeatureLoader {
|
||||
* @param updates - Partial feature updates
|
||||
* @param descriptionHistorySource - Source of description change ('enhance' or 'edit')
|
||||
* @param enhancementMode - Enhancement mode if source is 'enhance'
|
||||
* @param preEnhancementDescription - Description before enhancement (for restoring original)
|
||||
*/
|
||||
async update(
|
||||
projectPath: string,
|
||||
featureId: string,
|
||||
updates: Partial<Feature>,
|
||||
descriptionHistorySource?: 'enhance' | 'edit',
|
||||
enhancementMode?: 'improve' | 'technical' | 'simplify' | 'acceptance'
|
||||
enhancementMode?: 'improve' | 'technical' | 'simplify' | 'acceptance' | 'ux-reviewer',
|
||||
preEnhancementDescription?: string
|
||||
): Promise<Feature> {
|
||||
const feature = await this.get(projectPath, featureId);
|
||||
if (!feature) {
|
||||
@@ -338,9 +340,31 @@ export class FeatureLoader {
|
||||
updates.description !== feature.description &&
|
||||
updates.description.trim()
|
||||
) {
|
||||
const timestamp = new Date().toISOString();
|
||||
|
||||
// If this is an enhancement and we have the pre-enhancement description,
|
||||
// add the original text to history first (so user can restore to it)
|
||||
if (
|
||||
descriptionHistorySource === 'enhance' &&
|
||||
preEnhancementDescription &&
|
||||
preEnhancementDescription.trim()
|
||||
) {
|
||||
// Check if this pre-enhancement text is different from the last history entry
|
||||
const lastEntry = updatedHistory[updatedHistory.length - 1];
|
||||
if (!lastEntry || lastEntry.description !== preEnhancementDescription) {
|
||||
const preEnhanceEntry: DescriptionHistoryEntry = {
|
||||
description: preEnhancementDescription,
|
||||
timestamp,
|
||||
source: updatedHistory.length === 0 ? 'initial' : 'edit',
|
||||
};
|
||||
updatedHistory = [...updatedHistory, preEnhanceEntry];
|
||||
}
|
||||
}
|
||||
|
||||
// Add the new/enhanced description to history
|
||||
const historyEntry: DescriptionHistoryEntry = {
|
||||
description: updates.description,
|
||||
timestamp: new Date().toISOString(),
|
||||
timestamp,
|
||||
source: descriptionHistorySource || 'edit',
|
||||
...(descriptionHistorySource === 'enhance' && enhancementMode ? { enhancementMode } : {}),
|
||||
};
|
||||
|
||||
360
apps/server/src/services/init-script-service.ts
Normal file
360
apps/server/src/services/init-script-service.ts
Normal file
@@ -0,0 +1,360 @@
|
||||
/**
|
||||
* Init Script Service - Executes worktree initialization scripts
|
||||
*
|
||||
* Runs the .automaker/worktree-init.sh script after worktree creation.
|
||||
* Uses Git Bash on Windows for cross-platform shell script compatibility.
|
||||
*/
|
||||
|
||||
import { spawn } from 'child_process';
|
||||
import path from 'path';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { systemPathExists, getShellPaths, findGitBashPath } from '@automaker/platform';
|
||||
import { findCommand } from '../lib/cli-detection.js';
|
||||
import type { EventEmitter } from '../lib/events.js';
|
||||
import { readWorktreeMetadata, writeWorktreeMetadata } from '../lib/worktree-metadata.js';
|
||||
import * as secureFs from '../lib/secure-fs.js';
|
||||
|
||||
const logger = createLogger('InitScript');
|
||||
|
||||
export interface InitScriptOptions {
|
||||
/** Absolute path to the project root */
|
||||
projectPath: string;
|
||||
/** Absolute path to the worktree directory */
|
||||
worktreePath: string;
|
||||
/** Branch name for this worktree */
|
||||
branch: string;
|
||||
/** Event emitter for streaming output */
|
||||
emitter: EventEmitter;
|
||||
}
|
||||
|
||||
interface ShellCommand {
|
||||
shell: string;
|
||||
args: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Init Script Service
|
||||
*
|
||||
* Handles execution of worktree initialization scripts with cross-platform
|
||||
* shell detection and proper streaming of output via WebSocket events.
|
||||
*/
|
||||
export class InitScriptService {
|
||||
private cachedShellCommand: ShellCommand | null | undefined = undefined;
|
||||
|
||||
/**
|
||||
* Get the path to the init script for a project
|
||||
*/
|
||||
getInitScriptPath(projectPath: string): string {
|
||||
return path.join(projectPath, '.automaker', 'worktree-init.sh');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the init script has already been run for a worktree
|
||||
*/
|
||||
async hasInitScriptRun(projectPath: string, branch: string): Promise<boolean> {
|
||||
const metadata = await readWorktreeMetadata(projectPath, branch);
|
||||
return metadata?.initScriptRan === true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the appropriate shell for running scripts
|
||||
* Uses findGitBashPath() on Windows to avoid WSL bash, then falls back to PATH
|
||||
*/
|
||||
async findShellCommand(): Promise<ShellCommand | null> {
|
||||
// Return cached result if available
|
||||
if (this.cachedShellCommand !== undefined) {
|
||||
return this.cachedShellCommand;
|
||||
}
|
||||
|
||||
if (process.platform === 'win32') {
|
||||
// On Windows, prioritize Git Bash over WSL bash (C:\Windows\System32\bash.exe)
|
||||
// WSL bash may not be properly configured and causes ENOENT errors
|
||||
|
||||
// First try known Git Bash installation paths
|
||||
const gitBashPath = await findGitBashPath();
|
||||
if (gitBashPath) {
|
||||
logger.debug(`Found Git Bash at: ${gitBashPath}`);
|
||||
this.cachedShellCommand = { shell: gitBashPath, args: [] };
|
||||
return this.cachedShellCommand;
|
||||
}
|
||||
|
||||
// Fall back to finding bash in PATH, but skip WSL bash
|
||||
const bashInPath = await findCommand(['bash']);
|
||||
if (bashInPath && !bashInPath.toLowerCase().includes('system32')) {
|
||||
logger.debug(`Found bash in PATH at: ${bashInPath}`);
|
||||
this.cachedShellCommand = { shell: bashInPath, args: [] };
|
||||
return this.cachedShellCommand;
|
||||
}
|
||||
|
||||
logger.warn('Git Bash not found. WSL bash was skipped to avoid compatibility issues.');
|
||||
this.cachedShellCommand = null;
|
||||
return null;
|
||||
}
|
||||
|
||||
// Unix-like systems: use getShellPaths() and check existence
|
||||
const shellPaths = getShellPaths();
|
||||
const posixShells = shellPaths.filter(
|
||||
(p) => p.includes('bash') || p === '/bin/sh' || p === '/usr/bin/sh'
|
||||
);
|
||||
|
||||
for (const shellPath of posixShells) {
|
||||
try {
|
||||
if (systemPathExists(shellPath)) {
|
||||
this.cachedShellCommand = { shell: shellPath, args: [] };
|
||||
return this.cachedShellCommand;
|
||||
}
|
||||
} catch {
|
||||
// Path not allowed or doesn't exist, continue
|
||||
}
|
||||
}
|
||||
|
||||
// Ultimate fallback
|
||||
if (systemPathExists('/bin/sh')) {
|
||||
this.cachedShellCommand = { shell: '/bin/sh', args: [] };
|
||||
return this.cachedShellCommand;
|
||||
}
|
||||
|
||||
this.cachedShellCommand = null;
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Run the worktree initialization script
|
||||
* Non-blocking - returns immediately after spawning
|
||||
*/
|
||||
async runInitScript(options: InitScriptOptions): Promise<void> {
|
||||
const { projectPath, worktreePath, branch, emitter } = options;
|
||||
|
||||
const scriptPath = this.getInitScriptPath(projectPath);
|
||||
|
||||
// Check if script exists using secureFs (respects ALLOWED_ROOT_DIRECTORY)
|
||||
try {
|
||||
await secureFs.access(scriptPath);
|
||||
} catch {
|
||||
logger.debug(`No init script found at ${scriptPath}`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if already run
|
||||
if (await this.hasInitScriptRun(projectPath, branch)) {
|
||||
logger.info(`Init script already ran for branch "${branch}", skipping`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Get shell command
|
||||
const shellCmd = await this.findShellCommand();
|
||||
if (!shellCmd) {
|
||||
const error =
|
||||
process.platform === 'win32'
|
||||
? 'Git Bash not found. Please install Git for Windows to run init scripts.'
|
||||
: 'No shell found (/bin/bash or /bin/sh)';
|
||||
logger.error(error);
|
||||
|
||||
// Update metadata with error, preserving existing metadata
|
||||
const existingMetadata = await readWorktreeMetadata(projectPath, branch);
|
||||
await writeWorktreeMetadata(projectPath, branch, {
|
||||
branch,
|
||||
createdAt: existingMetadata?.createdAt || new Date().toISOString(),
|
||||
pr: existingMetadata?.pr,
|
||||
initScriptRan: true,
|
||||
initScriptStatus: 'failed',
|
||||
initScriptError: error,
|
||||
});
|
||||
|
||||
emitter.emit('worktree:init-completed', {
|
||||
projectPath,
|
||||
worktreePath,
|
||||
branch,
|
||||
success: false,
|
||||
error,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
logger.info(`Running init script for branch "${branch}" in ${worktreePath}`);
|
||||
logger.debug(`Using shell: ${shellCmd.shell}`);
|
||||
|
||||
// Update metadata to mark as running
|
||||
const existingMetadata = await readWorktreeMetadata(projectPath, branch);
|
||||
await writeWorktreeMetadata(projectPath, branch, {
|
||||
branch,
|
||||
createdAt: existingMetadata?.createdAt || new Date().toISOString(),
|
||||
pr: existingMetadata?.pr,
|
||||
initScriptRan: false,
|
||||
initScriptStatus: 'running',
|
||||
});
|
||||
|
||||
// Emit started event
|
||||
emitter.emit('worktree:init-started', {
|
||||
projectPath,
|
||||
worktreePath,
|
||||
branch,
|
||||
});
|
||||
|
||||
// Build safe environment - only pass necessary variables, not all of process.env
|
||||
// This prevents exposure of sensitive credentials like ANTHROPIC_API_KEY
|
||||
const safeEnv: Record<string, string> = {
|
||||
// Automaker-specific variables
|
||||
AUTOMAKER_PROJECT_PATH: projectPath,
|
||||
AUTOMAKER_WORKTREE_PATH: worktreePath,
|
||||
AUTOMAKER_BRANCH: branch,
|
||||
|
||||
// Essential system variables
|
||||
PATH: process.env.PATH || '',
|
||||
HOME: process.env.HOME || '',
|
||||
USER: process.env.USER || '',
|
||||
TMPDIR: process.env.TMPDIR || process.env.TEMP || process.env.TMP || '/tmp',
|
||||
|
||||
// Shell and locale
|
||||
SHELL: process.env.SHELL || '',
|
||||
LANG: process.env.LANG || 'en_US.UTF-8',
|
||||
LC_ALL: process.env.LC_ALL || '',
|
||||
|
||||
// Force color output even though we're not a TTY
|
||||
FORCE_COLOR: '1',
|
||||
npm_config_color: 'always',
|
||||
CLICOLOR_FORCE: '1',
|
||||
|
||||
// Git configuration
|
||||
GIT_TERMINAL_PROMPT: '0',
|
||||
};
|
||||
|
||||
// Platform-specific additions
|
||||
if (process.platform === 'win32') {
|
||||
safeEnv.USERPROFILE = process.env.USERPROFILE || '';
|
||||
safeEnv.APPDATA = process.env.APPDATA || '';
|
||||
safeEnv.LOCALAPPDATA = process.env.LOCALAPPDATA || '';
|
||||
safeEnv.SystemRoot = process.env.SystemRoot || 'C:\\Windows';
|
||||
safeEnv.TEMP = process.env.TEMP || '';
|
||||
}
|
||||
|
||||
// Spawn the script with safe environment
|
||||
const child = spawn(shellCmd.shell, [...shellCmd.args, scriptPath], {
|
||||
cwd: worktreePath,
|
||||
env: safeEnv,
|
||||
stdio: ['ignore', 'pipe', 'pipe'],
|
||||
});
|
||||
|
||||
// Stream stdout
|
||||
child.stdout?.on('data', (data: Buffer) => {
|
||||
const content = data.toString();
|
||||
emitter.emit('worktree:init-output', {
|
||||
projectPath,
|
||||
branch,
|
||||
type: 'stdout',
|
||||
content,
|
||||
});
|
||||
});
|
||||
|
||||
// Stream stderr
|
||||
child.stderr?.on('data', (data: Buffer) => {
|
||||
const content = data.toString();
|
||||
emitter.emit('worktree:init-output', {
|
||||
projectPath,
|
||||
branch,
|
||||
type: 'stderr',
|
||||
content,
|
||||
});
|
||||
});
|
||||
|
||||
// Handle completion
|
||||
child.on('exit', async (code) => {
|
||||
const success = code === 0;
|
||||
const status = success ? 'success' : 'failed';
|
||||
|
||||
logger.info(`Init script for branch "${branch}" ${status} with exit code ${code}`);
|
||||
|
||||
// Update metadata
|
||||
const metadata = await readWorktreeMetadata(projectPath, branch);
|
||||
await writeWorktreeMetadata(projectPath, branch, {
|
||||
branch,
|
||||
createdAt: metadata?.createdAt || new Date().toISOString(),
|
||||
pr: metadata?.pr,
|
||||
initScriptRan: true,
|
||||
initScriptStatus: status,
|
||||
initScriptError: success ? undefined : `Exit code: ${code}`,
|
||||
});
|
||||
|
||||
// Emit completion event
|
||||
emitter.emit('worktree:init-completed', {
|
||||
projectPath,
|
||||
worktreePath,
|
||||
branch,
|
||||
success,
|
||||
exitCode: code,
|
||||
});
|
||||
});
|
||||
|
||||
child.on('error', async (error) => {
|
||||
logger.error(`Init script error for branch "${branch}":`, error);
|
||||
|
||||
// Update metadata
|
||||
const metadata = await readWorktreeMetadata(projectPath, branch);
|
||||
await writeWorktreeMetadata(projectPath, branch, {
|
||||
branch,
|
||||
createdAt: metadata?.createdAt || new Date().toISOString(),
|
||||
pr: metadata?.pr,
|
||||
initScriptRan: true,
|
||||
initScriptStatus: 'failed',
|
||||
initScriptError: error.message,
|
||||
});
|
||||
|
||||
// Emit completion with error
|
||||
emitter.emit('worktree:init-completed', {
|
||||
projectPath,
|
||||
worktreePath,
|
||||
branch,
|
||||
success: false,
|
||||
error: error.message,
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Force re-run the worktree initialization script
|
||||
* Ignores the initScriptRan flag - useful for testing or re-setup
|
||||
*/
|
||||
async forceRunInitScript(options: InitScriptOptions): Promise<void> {
|
||||
const { projectPath, branch } = options;
|
||||
|
||||
// Reset the initScriptRan flag so the script will run
|
||||
const metadata = await readWorktreeMetadata(projectPath, branch);
|
||||
if (metadata) {
|
||||
await writeWorktreeMetadata(projectPath, branch, {
|
||||
...metadata,
|
||||
initScriptRan: false,
|
||||
initScriptStatus: undefined,
|
||||
initScriptError: undefined,
|
||||
});
|
||||
}
|
||||
|
||||
// Now run the script
|
||||
await this.runInitScript(options);
|
||||
}
|
||||
}
|
||||
|
||||
// Singleton instance for convenience
|
||||
let initScriptService: InitScriptService | null = null;
|
||||
|
||||
/**
|
||||
* Get the singleton InitScriptService instance
|
||||
*/
|
||||
export function getInitScriptService(): InitScriptService {
|
||||
if (!initScriptService) {
|
||||
initScriptService = new InitScriptService();
|
||||
}
|
||||
return initScriptService;
|
||||
}
|
||||
|
||||
// Export convenience functions that use the singleton
|
||||
export const getInitScriptPath = (projectPath: string) =>
|
||||
getInitScriptService().getInitScriptPath(projectPath);
|
||||
|
||||
export const hasInitScriptRun = (projectPath: string, branch: string) =>
|
||||
getInitScriptService().hasInitScriptRun(projectPath, branch);
|
||||
|
||||
export const runInitScript = (options: InitScriptOptions) =>
|
||||
getInitScriptService().runInitScript(options);
|
||||
|
||||
export const forceRunInitScript = (options: InitScriptOptions) =>
|
||||
getInitScriptService().forceRunInitScript(options);
|
||||
@@ -22,7 +22,6 @@ import type {
|
||||
Credentials,
|
||||
ProjectSettings,
|
||||
KeyboardShortcuts,
|
||||
AIProfile,
|
||||
ProjectRef,
|
||||
TrashedProjectRef,
|
||||
BoardBackgroundSettings,
|
||||
@@ -299,7 +298,6 @@ export class SettingsService {
|
||||
ignoreEmptyArrayOverwrite('trashedProjects');
|
||||
ignoreEmptyArrayOverwrite('projectHistory');
|
||||
ignoreEmptyArrayOverwrite('recentFolders');
|
||||
ignoreEmptyArrayOverwrite('aiProfiles');
|
||||
ignoreEmptyArrayOverwrite('mcpServers');
|
||||
ignoreEmptyArrayOverwrite('enabledCursorModels');
|
||||
|
||||
@@ -433,6 +431,8 @@ export class SettingsService {
|
||||
*/
|
||||
async getMaskedCredentials(): Promise<{
|
||||
anthropic: { configured: boolean; masked: string };
|
||||
google: { configured: boolean; masked: string };
|
||||
openai: { configured: boolean; masked: string };
|
||||
}> {
|
||||
const credentials = await this.getCredentials();
|
||||
|
||||
@@ -446,6 +446,14 @@ export class SettingsService {
|
||||
configured: !!credentials.apiKeys.anthropic,
|
||||
masked: maskKey(credentials.apiKeys.anthropic),
|
||||
},
|
||||
google: {
|
||||
configured: !!credentials.apiKeys.google,
|
||||
masked: maskKey(credentials.apiKeys.google),
|
||||
},
|
||||
openai: {
|
||||
configured: !!credentials.apiKeys.openai,
|
||||
masked: maskKey(credentials.apiKeys.openai),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@@ -602,8 +610,6 @@ export class SettingsService {
|
||||
theme: (appState.theme as GlobalSettings['theme']) || 'dark',
|
||||
sidebarOpen: appState.sidebarOpen !== undefined ? (appState.sidebarOpen as boolean) : true,
|
||||
chatHistoryOpen: (appState.chatHistoryOpen as boolean) || false,
|
||||
kanbanCardDetailLevel:
|
||||
(appState.kanbanCardDetailLevel as GlobalSettings['kanbanCardDetailLevel']) || 'standard',
|
||||
maxConcurrency: (appState.maxConcurrency as number) || 3,
|
||||
defaultSkipTests:
|
||||
appState.defaultSkipTests !== undefined ? (appState.defaultSkipTests as boolean) : true,
|
||||
@@ -617,18 +623,15 @@ export class SettingsService {
|
||||
: false,
|
||||
useWorktrees:
|
||||
appState.useWorktrees !== undefined ? (appState.useWorktrees as boolean) : true,
|
||||
showProfilesOnly: (appState.showProfilesOnly as boolean) || false,
|
||||
defaultPlanningMode:
|
||||
(appState.defaultPlanningMode as GlobalSettings['defaultPlanningMode']) || 'skip',
|
||||
defaultRequirePlanApproval: (appState.defaultRequirePlanApproval as boolean) || false,
|
||||
defaultAIProfileId: (appState.defaultAIProfileId as string | null) || null,
|
||||
muteDoneSound: (appState.muteDoneSound as boolean) || false,
|
||||
enhancementModel:
|
||||
(appState.enhancementModel as GlobalSettings['enhancementModel']) || 'sonnet',
|
||||
keyboardShortcuts:
|
||||
(appState.keyboardShortcuts as KeyboardShortcuts) ||
|
||||
DEFAULT_GLOBAL_SETTINGS.keyboardShortcuts,
|
||||
aiProfiles: (appState.aiProfiles as AIProfile[]) || [],
|
||||
projects: (appState.projects as ProjectRef[]) || [],
|
||||
trashedProjects: (appState.trashedProjects as TrashedProjectRef[]) || [],
|
||||
projectHistory: (appState.projectHistory as string[]) || [],
|
||||
|
||||
@@ -7,13 +7,11 @@
|
||||
|
||||
export type {
|
||||
ThemeMode,
|
||||
KanbanCardDetailLevel,
|
||||
ModelAlias,
|
||||
PlanningMode,
|
||||
ThinkingLevel,
|
||||
ModelProvider,
|
||||
KeyboardShortcuts,
|
||||
AIProfile,
|
||||
ProjectRef,
|
||||
TrashedProjectRef,
|
||||
ChatSessionRef,
|
||||
|
||||
@@ -17,6 +17,14 @@ import {
|
||||
type EnhancementMode,
|
||||
} from '@/lib/enhancement-prompts.js';
|
||||
|
||||
const ENHANCEMENT_MODES: EnhancementMode[] = [
|
||||
'improve',
|
||||
'technical',
|
||||
'simplify',
|
||||
'acceptance',
|
||||
'ux-reviewer',
|
||||
];
|
||||
|
||||
describe('enhancement-prompts.ts', () => {
|
||||
describe('System Prompt Constants', () => {
|
||||
it('should have non-empty improve system prompt', () => {
|
||||
@@ -184,8 +192,7 @@ describe('enhancement-prompts.ts', () => {
|
||||
});
|
||||
|
||||
it('should work with all enhancement modes', () => {
|
||||
const modes: EnhancementMode[] = ['improve', 'technical', 'simplify', 'acceptance'];
|
||||
modes.forEach((mode) => {
|
||||
ENHANCEMENT_MODES.forEach((mode) => {
|
||||
const prompt = buildUserPrompt(mode, testText);
|
||||
expect(prompt).toContain(testText);
|
||||
expect(prompt.length).toBeGreaterThan(100);
|
||||
@@ -205,6 +212,7 @@ describe('enhancement-prompts.ts', () => {
|
||||
expect(isValidEnhancementMode('technical')).toBe(true);
|
||||
expect(isValidEnhancementMode('simplify')).toBe(true);
|
||||
expect(isValidEnhancementMode('acceptance')).toBe(true);
|
||||
expect(isValidEnhancementMode('ux-reviewer')).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for invalid modes', () => {
|
||||
@@ -216,13 +224,12 @@ describe('enhancement-prompts.ts', () => {
|
||||
});
|
||||
|
||||
describe('getAvailableEnhancementModes', () => {
|
||||
it('should return all four enhancement modes', () => {
|
||||
it('should return all enhancement modes', () => {
|
||||
const modes = getAvailableEnhancementModes();
|
||||
expect(modes).toHaveLength(4);
|
||||
expect(modes).toContain('improve');
|
||||
expect(modes).toContain('technical');
|
||||
expect(modes).toContain('simplify');
|
||||
expect(modes).toContain('acceptance');
|
||||
expect(modes).toHaveLength(ENHANCEMENT_MODES.length);
|
||||
ENHANCEMENT_MODES.forEach((mode) => {
|
||||
expect(modes).toContain(mode);
|
||||
});
|
||||
});
|
||||
|
||||
it('should return an array', () => {
|
||||
|
||||
@@ -12,6 +12,8 @@ describe('claude-provider.ts', () => {
|
||||
vi.clearAllMocks();
|
||||
provider = new ClaudeProvider();
|
||||
delete process.env.ANTHROPIC_API_KEY;
|
||||
delete process.env.ANTHROPIC_BASE_URL;
|
||||
delete process.env.ANTHROPIC_AUTH_TOKEN;
|
||||
});
|
||||
|
||||
describe('getName', () => {
|
||||
@@ -267,6 +269,93 @@ describe('claude-provider.ts', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('environment variable passthrough', () => {
|
||||
afterEach(() => {
|
||||
delete process.env.ANTHROPIC_BASE_URL;
|
||||
delete process.env.ANTHROPIC_AUTH_TOKEN;
|
||||
});
|
||||
|
||||
it('should pass ANTHROPIC_BASE_URL to SDK env', async () => {
|
||||
process.env.ANTHROPIC_BASE_URL = 'https://custom.example.com/v1';
|
||||
|
||||
vi.mocked(sdk.query).mockReturnValue(
|
||||
(async function* () {
|
||||
yield { type: 'text', text: 'test' };
|
||||
})()
|
||||
);
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: 'Test',
|
||||
cwd: '/test',
|
||||
});
|
||||
|
||||
await collectAsyncGenerator(generator);
|
||||
|
||||
expect(sdk.query).toHaveBeenCalledWith({
|
||||
prompt: 'Test',
|
||||
options: expect.objectContaining({
|
||||
env: expect.objectContaining({
|
||||
ANTHROPIC_BASE_URL: 'https://custom.example.com/v1',
|
||||
}),
|
||||
}),
|
||||
});
|
||||
});
|
||||
|
||||
it('should pass ANTHROPIC_AUTH_TOKEN to SDK env', async () => {
|
||||
process.env.ANTHROPIC_AUTH_TOKEN = 'custom-auth-token';
|
||||
|
||||
vi.mocked(sdk.query).mockReturnValue(
|
||||
(async function* () {
|
||||
yield { type: 'text', text: 'test' };
|
||||
})()
|
||||
);
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: 'Test',
|
||||
cwd: '/test',
|
||||
});
|
||||
|
||||
await collectAsyncGenerator(generator);
|
||||
|
||||
expect(sdk.query).toHaveBeenCalledWith({
|
||||
prompt: 'Test',
|
||||
options: expect.objectContaining({
|
||||
env: expect.objectContaining({
|
||||
ANTHROPIC_AUTH_TOKEN: 'custom-auth-token',
|
||||
}),
|
||||
}),
|
||||
});
|
||||
});
|
||||
|
||||
it('should pass both custom endpoint vars together', async () => {
|
||||
process.env.ANTHROPIC_BASE_URL = 'https://gateway.example.com';
|
||||
process.env.ANTHROPIC_AUTH_TOKEN = 'gateway-token';
|
||||
|
||||
vi.mocked(sdk.query).mockReturnValue(
|
||||
(async function* () {
|
||||
yield { type: 'text', text: 'test' };
|
||||
})()
|
||||
);
|
||||
|
||||
const generator = provider.executeQuery({
|
||||
prompt: 'Test',
|
||||
cwd: '/test',
|
||||
});
|
||||
|
||||
await collectAsyncGenerator(generator);
|
||||
|
||||
expect(sdk.query).toHaveBeenCalledWith({
|
||||
prompt: 'Test',
|
||||
options: expect.objectContaining({
|
||||
env: expect.objectContaining({
|
||||
ANTHROPIC_BASE_URL: 'https://gateway.example.com',
|
||||
ANTHROPIC_AUTH_TOKEN: 'gateway-token',
|
||||
}),
|
||||
}),
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('getAvailableModels', () => {
|
||||
it('should return 4 Claude models', () => {
|
||||
const models = provider.getAvailableModels();
|
||||
|
||||
@@ -257,7 +257,7 @@ describe('codex-provider.ts', () => {
|
||||
expect(results[1].result).toBe('Hello from SDK');
|
||||
});
|
||||
|
||||
it('uses the CLI when tools are requested even if an API key is present', async () => {
|
||||
it('uses the SDK when API key is present, even for tool requests (to avoid OAuth issues)', async () => {
|
||||
process.env[OPENAI_API_KEY_ENV] = 'sk-test';
|
||||
vi.mocked(spawnJSONLProcess).mockReturnValue((async function* () {})());
|
||||
|
||||
@@ -270,8 +270,8 @@ describe('codex-provider.ts', () => {
|
||||
})
|
||||
);
|
||||
|
||||
expect(codexRunMock).not.toHaveBeenCalled();
|
||||
expect(spawnJSONLProcess).toHaveBeenCalled();
|
||||
expect(codexRunMock).toHaveBeenCalled();
|
||||
expect(spawnJSONLProcess).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('falls back to CLI when no tools are requested and no API key is available', async () => {
|
||||
|
||||
@@ -3,7 +3,7 @@ import {
|
||||
OpencodeProvider,
|
||||
resetToolUseIdCounter,
|
||||
} from '../../../src/providers/opencode-provider.js';
|
||||
import type { ProviderMessage } from '@automaker/types';
|
||||
import type { ProviderMessage, ModelDefinition } from '@automaker/types';
|
||||
import { collectAsyncGenerator } from '../../utils/helpers.js';
|
||||
import { spawnJSONLProcess, getOpenCodeAuthIndicators } from '@automaker/platform';
|
||||
|
||||
@@ -51,63 +51,38 @@ describe('opencode-provider.ts', () => {
|
||||
});
|
||||
|
||||
describe('getAvailableModels', () => {
|
||||
it('should return 10 models', () => {
|
||||
it('should return 5 models', () => {
|
||||
const models = provider.getAvailableModels();
|
||||
expect(models).toHaveLength(10);
|
||||
expect(models).toHaveLength(5);
|
||||
});
|
||||
|
||||
it('should include Claude Sonnet 4.5 (Bedrock) as default', () => {
|
||||
const models = provider.getAvailableModels();
|
||||
const sonnet = models.find(
|
||||
(m) => m.id === 'amazon-bedrock/anthropic.claude-sonnet-4-5-20250929-v1:0'
|
||||
);
|
||||
|
||||
expect(sonnet).toBeDefined();
|
||||
expect(sonnet?.name).toBe('Claude Sonnet 4.5 (Bedrock)');
|
||||
expect(sonnet?.provider).toBe('opencode');
|
||||
expect(sonnet?.default).toBe(true);
|
||||
expect(sonnet?.modelString).toBe('amazon-bedrock/anthropic.claude-sonnet-4-5-20250929-v1:0');
|
||||
});
|
||||
|
||||
it('should include Claude Opus 4.5 (Bedrock)', () => {
|
||||
const models = provider.getAvailableModels();
|
||||
const opus = models.find(
|
||||
(m) => m.id === 'amazon-bedrock/anthropic.claude-opus-4-5-20251101-v1:0'
|
||||
);
|
||||
|
||||
expect(opus).toBeDefined();
|
||||
expect(opus?.name).toBe('Claude Opus 4.5 (Bedrock)');
|
||||
expect(opus?.modelString).toBe('amazon-bedrock/anthropic.claude-opus-4-5-20251101-v1:0');
|
||||
});
|
||||
|
||||
it('should include Claude Haiku 4.5 (Bedrock)', () => {
|
||||
const models = provider.getAvailableModels();
|
||||
const haiku = models.find(
|
||||
(m) => m.id === 'amazon-bedrock/anthropic.claude-haiku-4-5-20251001-v1:0'
|
||||
);
|
||||
|
||||
expect(haiku).toBeDefined();
|
||||
expect(haiku?.name).toBe('Claude Haiku 4.5 (Bedrock)');
|
||||
expect(haiku?.tier).toBe('standard');
|
||||
});
|
||||
|
||||
it('should include free tier Big Pickle model', () => {
|
||||
it('should include Big Pickle as default', () => {
|
||||
const models = provider.getAvailableModels();
|
||||
const bigPickle = models.find((m) => m.id === 'opencode/big-pickle');
|
||||
|
||||
expect(bigPickle).toBeDefined();
|
||||
expect(bigPickle?.name).toBe('Big Pickle (Free)');
|
||||
expect(bigPickle?.provider).toBe('opencode');
|
||||
expect(bigPickle?.default).toBe(true);
|
||||
expect(bigPickle?.modelString).toBe('opencode/big-pickle');
|
||||
expect(bigPickle?.tier).toBe('basic');
|
||||
});
|
||||
|
||||
it('should include DeepSeek R1 (Bedrock)', () => {
|
||||
it('should include free tier GLM model', () => {
|
||||
const models = provider.getAvailableModels();
|
||||
const deepseek = models.find((m) => m.id === 'amazon-bedrock/deepseek.r1-v1:0');
|
||||
const glm = models.find((m) => m.id === 'opencode/glm-4.7-free');
|
||||
|
||||
expect(deepseek).toBeDefined();
|
||||
expect(deepseek?.name).toBe('DeepSeek R1 (Bedrock)');
|
||||
expect(deepseek?.tier).toBe('premium');
|
||||
expect(glm).toBeDefined();
|
||||
expect(glm?.name).toBe('GLM 4.7 Free');
|
||||
expect(glm?.tier).toBe('basic');
|
||||
});
|
||||
|
||||
it('should include free tier MiniMax model', () => {
|
||||
const models = provider.getAvailableModels();
|
||||
const minimax = models.find((m) => m.id === 'opencode/minimax-m2.1-free');
|
||||
|
||||
expect(minimax).toBeDefined();
|
||||
expect(minimax?.name).toBe('MiniMax M2.1 Free');
|
||||
expect(minimax?.tier).toBe('basic');
|
||||
});
|
||||
|
||||
it('should have all models support tools', () => {
|
||||
@@ -128,6 +103,24 @@ describe('opencode-provider.ts', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseModelsOutput', () => {
|
||||
it('should parse nested provider model IDs', () => {
|
||||
const output = ['openrouter/anthropic/claude-3.5-sonnet', 'openai/gpt-4o'].join('\n');
|
||||
|
||||
const parseModelsOutput = (
|
||||
provider as unknown as { parseModelsOutput: (output: string) => ModelDefinition[] }
|
||||
).parseModelsOutput.bind(provider);
|
||||
const models = parseModelsOutput(output);
|
||||
|
||||
expect(models).toHaveLength(2);
|
||||
const openrouterModel = models.find((model) => model.id.startsWith('openrouter/'));
|
||||
|
||||
expect(openrouterModel).toBeDefined();
|
||||
expect(openrouterModel?.provider).toBe('openrouter');
|
||||
expect(openrouterModel?.modelString).toBe('openrouter/anthropic/claude-3.5-sonnet');
|
||||
});
|
||||
});
|
||||
|
||||
describe('supportsFeature', () => {
|
||||
it("should support 'tools' feature", () => {
|
||||
expect(provider.supportsFeature('tools')).toBe(true);
|
||||
@@ -168,41 +161,23 @@ describe('opencode-provider.ts', () => {
|
||||
it('should build correct args with run subcommand', () => {
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Hello',
|
||||
model: 'opencode/big-pickle',
|
||||
cwd: '/tmp/project',
|
||||
});
|
||||
|
||||
expect(args[0]).toBe('run');
|
||||
});
|
||||
|
||||
it('should include --format stream-json for streaming output', () => {
|
||||
it('should include --format json for streaming output', () => {
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Hello',
|
||||
model: 'opencode/big-pickle',
|
||||
cwd: '/tmp/project',
|
||||
});
|
||||
|
||||
const formatIndex = args.indexOf('--format');
|
||||
expect(formatIndex).toBeGreaterThan(-1);
|
||||
expect(args[formatIndex + 1]).toBe('stream-json');
|
||||
});
|
||||
|
||||
it('should include -q flag for quiet mode', () => {
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Hello',
|
||||
cwd: '/tmp/project',
|
||||
});
|
||||
|
||||
expect(args).toContain('-q');
|
||||
});
|
||||
|
||||
it('should include working directory with -c flag', () => {
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Hello',
|
||||
cwd: '/tmp/my-project',
|
||||
});
|
||||
|
||||
const cwdIndex = args.indexOf('-c');
|
||||
expect(cwdIndex).toBeGreaterThan(-1);
|
||||
expect(args[cwdIndex + 1]).toBe('/tmp/my-project');
|
||||
expect(args[formatIndex + 1]).toBe('json');
|
||||
});
|
||||
|
||||
it('should include model with --model flag', () => {
|
||||
@@ -228,30 +203,24 @@ describe('opencode-provider.ts', () => {
|
||||
expect(args[modelIndex + 1]).toBe('anthropic/claude-sonnet-4-5');
|
||||
});
|
||||
|
||||
it('should include dash as final arg for stdin prompt', () => {
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Hello',
|
||||
cwd: '/tmp/project',
|
||||
});
|
||||
|
||||
expect(args[args.length - 1]).toBe('-');
|
||||
});
|
||||
|
||||
it('should handle missing cwd', () => {
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Hello',
|
||||
model: 'opencode/big-pickle',
|
||||
});
|
||||
|
||||
expect(args).not.toContain('-c');
|
||||
});
|
||||
|
||||
it('should handle missing model', () => {
|
||||
it('should handle model from opencode provider', () => {
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Hello',
|
||||
model: 'opencode/big-pickle',
|
||||
cwd: '/tmp/project',
|
||||
});
|
||||
|
||||
expect(args).not.toContain('--model');
|
||||
expect(args).toContain('--model');
|
||||
expect(args).toContain('opencode/big-pickle');
|
||||
});
|
||||
});
|
||||
|
||||
@@ -260,12 +229,15 @@ describe('opencode-provider.ts', () => {
|
||||
// ==========================================================================
|
||||
|
||||
describe('normalizeEvent', () => {
|
||||
describe('text-delta events', () => {
|
||||
it('should convert text-delta to assistant message with text content', () => {
|
||||
describe('text events (new OpenCode format)', () => {
|
||||
it('should convert text to assistant message with text content', () => {
|
||||
const event = {
|
||||
type: 'text-delta',
|
||||
text: 'Hello, world!',
|
||||
session_id: 'test-session',
|
||||
type: 'text',
|
||||
part: {
|
||||
type: 'text',
|
||||
text: 'Hello, world!',
|
||||
},
|
||||
sessionID: 'test-session',
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -285,10 +257,13 @@ describe('opencode-provider.ts', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('should return null for empty text-delta', () => {
|
||||
it('should return null for empty text', () => {
|
||||
const event = {
|
||||
type: 'text-delta',
|
||||
text: '',
|
||||
type: 'text',
|
||||
part: {
|
||||
type: 'text',
|
||||
text: '',
|
||||
},
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -296,9 +271,10 @@ describe('opencode-provider.ts', () => {
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null for text-delta with undefined text', () => {
|
||||
it('should return null for text with undefined text', () => {
|
||||
const event = {
|
||||
type: 'text-delta',
|
||||
type: 'text',
|
||||
part: {},
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -307,27 +283,17 @@ describe('opencode-provider.ts', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('text-end events', () => {
|
||||
it('should return null for text-end events (informational)', () => {
|
||||
describe('tool_call events', () => {
|
||||
it('should convert tool_call to assistant message with tool_use content', () => {
|
||||
const event = {
|
||||
type: 'text-end',
|
||||
session_id: 'test-session',
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('tool-call events', () => {
|
||||
it('should convert tool-call to assistant message with tool_use content', () => {
|
||||
const event = {
|
||||
type: 'tool-call',
|
||||
call_id: 'call-123',
|
||||
name: 'Read',
|
||||
args: { file_path: '/tmp/test.txt' },
|
||||
session_id: 'test-session',
|
||||
type: 'tool_call',
|
||||
part: {
|
||||
type: 'tool-call',
|
||||
call_id: 'call-123',
|
||||
name: 'Read',
|
||||
args: { file_path: '/tmp/test.txt' },
|
||||
},
|
||||
sessionID: 'test-session',
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -351,9 +317,12 @@ describe('opencode-provider.ts', () => {
|
||||
|
||||
it('should generate tool_use_id when call_id is missing', () => {
|
||||
const event = {
|
||||
type: 'tool-call',
|
||||
name: 'Write',
|
||||
args: { content: 'test' },
|
||||
type: 'tool_call',
|
||||
part: {
|
||||
type: 'tool-call',
|
||||
name: 'Write',
|
||||
args: { content: 'test' },
|
||||
},
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -363,21 +332,27 @@ describe('opencode-provider.ts', () => {
|
||||
|
||||
// Second call should increment
|
||||
const result2 = provider.normalizeEvent({
|
||||
type: 'tool-call',
|
||||
name: 'Edit',
|
||||
args: {},
|
||||
type: 'tool_call',
|
||||
part: {
|
||||
type: 'tool-call',
|
||||
name: 'Edit',
|
||||
args: {},
|
||||
},
|
||||
});
|
||||
expect(result2?.message?.content[0].tool_use_id).toBe('opencode-tool-2');
|
||||
});
|
||||
});
|
||||
|
||||
describe('tool-result events', () => {
|
||||
it('should convert tool-result to assistant message with tool_result content', () => {
|
||||
describe('tool_result events', () => {
|
||||
it('should convert tool_result to assistant message with tool_result content', () => {
|
||||
const event = {
|
||||
type: 'tool-result',
|
||||
call_id: 'call-123',
|
||||
output: 'File contents here',
|
||||
session_id: 'test-session',
|
||||
type: 'tool_result',
|
||||
part: {
|
||||
type: 'tool-result',
|
||||
call_id: 'call-123',
|
||||
output: 'File contents here',
|
||||
},
|
||||
sessionID: 'test-session',
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -398,10 +373,13 @@ describe('opencode-provider.ts', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle tool-result without call_id', () => {
|
||||
it('should handle tool_result without call_id', () => {
|
||||
const event = {
|
||||
type: 'tool-result',
|
||||
output: 'Result without ID',
|
||||
type: 'tool_result',
|
||||
part: {
|
||||
type: 'tool-result',
|
||||
output: 'Result without ID',
|
||||
},
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -411,13 +389,16 @@ describe('opencode-provider.ts', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('tool-error events', () => {
|
||||
it('should convert tool-error to error message', () => {
|
||||
describe('tool_error events', () => {
|
||||
it('should convert tool_error to error message', () => {
|
||||
const event = {
|
||||
type: 'tool-error',
|
||||
call_id: 'call-123',
|
||||
error: 'File not found',
|
||||
session_id: 'test-session',
|
||||
type: 'tool_error',
|
||||
part: {
|
||||
type: 'tool-error',
|
||||
call_id: 'call-123',
|
||||
error: 'File not found',
|
||||
},
|
||||
sessionID: 'test-session',
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -431,8 +412,11 @@ describe('opencode-provider.ts', () => {
|
||||
|
||||
it('should provide default error message when error is missing', () => {
|
||||
const event = {
|
||||
type: 'tool-error',
|
||||
call_id: 'call-123',
|
||||
type: 'tool_error',
|
||||
part: {
|
||||
type: 'tool-error',
|
||||
call_id: 'call-123',
|
||||
},
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -442,12 +426,14 @@ describe('opencode-provider.ts', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('start-step events', () => {
|
||||
it('should return null for start-step events (informational)', () => {
|
||||
describe('step_start events', () => {
|
||||
it('should return null for step_start events (informational)', () => {
|
||||
const event = {
|
||||
type: 'start-step',
|
||||
step: 1,
|
||||
session_id: 'test-session',
|
||||
type: 'step_start',
|
||||
part: {
|
||||
type: 'step-start',
|
||||
},
|
||||
sessionID: 'test-session',
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -456,14 +442,16 @@ describe('opencode-provider.ts', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('finish-step events', () => {
|
||||
it('should convert successful finish-step to result message', () => {
|
||||
describe('step_finish events', () => {
|
||||
it('should convert successful step_finish to result message', () => {
|
||||
const event = {
|
||||
type: 'finish-step',
|
||||
step: 1,
|
||||
success: true,
|
||||
result: 'Task completed successfully',
|
||||
session_id: 'test-session',
|
||||
type: 'step_finish',
|
||||
part: {
|
||||
type: 'step-finish',
|
||||
reason: 'stop',
|
||||
result: 'Task completed successfully',
|
||||
},
|
||||
sessionID: 'test-session',
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -476,13 +464,15 @@ describe('opencode-provider.ts', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('should convert finish-step with success=false to error message', () => {
|
||||
it('should convert step_finish with error to error message', () => {
|
||||
const event = {
|
||||
type: 'finish-step',
|
||||
step: 1,
|
||||
success: false,
|
||||
error: 'Something went wrong',
|
||||
session_id: 'test-session',
|
||||
type: 'step_finish',
|
||||
part: {
|
||||
type: 'step-finish',
|
||||
reason: 'error',
|
||||
error: 'Something went wrong',
|
||||
},
|
||||
sessionID: 'test-session',
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -494,11 +484,13 @@ describe('opencode-provider.ts', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('should convert finish-step with error property to error message', () => {
|
||||
it('should convert step_finish with error property to error message', () => {
|
||||
const event = {
|
||||
type: 'finish-step',
|
||||
step: 1,
|
||||
error: 'Process failed',
|
||||
type: 'step_finish',
|
||||
part: {
|
||||
type: 'step-finish',
|
||||
error: 'Process failed',
|
||||
},
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -509,9 +501,11 @@ describe('opencode-provider.ts', () => {
|
||||
|
||||
it('should provide default error message for failed step without error text', () => {
|
||||
const event = {
|
||||
type: 'finish-step',
|
||||
step: 1,
|
||||
success: false,
|
||||
type: 'step_finish',
|
||||
part: {
|
||||
type: 'step-finish',
|
||||
reason: 'error',
|
||||
},
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -520,11 +514,14 @@ describe('opencode-provider.ts', () => {
|
||||
expect(result?.error).toBe('Step execution failed');
|
||||
});
|
||||
|
||||
it('should treat finish-step without success flag as success', () => {
|
||||
it('should treat step_finish with reason=stop as success', () => {
|
||||
const event = {
|
||||
type: 'finish-step',
|
||||
step: 1,
|
||||
result: 'Done',
|
||||
type: 'step_finish',
|
||||
part: {
|
||||
type: 'step-finish',
|
||||
reason: 'stop',
|
||||
result: 'Done',
|
||||
},
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -586,13 +583,12 @@ describe('opencode-provider.ts', () => {
|
||||
return mockedProvider;
|
||||
}
|
||||
|
||||
it('should stream text-delta events as assistant messages', async () => {
|
||||
it('should stream text events as assistant messages', async () => {
|
||||
const mockedProvider = setupMockedProvider();
|
||||
|
||||
const mockEvents = [
|
||||
{ type: 'text-delta', text: 'Hello ' },
|
||||
{ type: 'text-delta', text: 'World!' },
|
||||
{ type: 'text-end' },
|
||||
{ type: 'text', part: { type: 'text', text: 'Hello ' } },
|
||||
{ type: 'text', part: { type: 'text', text: 'World!' } },
|
||||
];
|
||||
|
||||
vi.mocked(spawnJSONLProcess).mockReturnValue(
|
||||
@@ -611,7 +607,6 @@ describe('opencode-provider.ts', () => {
|
||||
})
|
||||
);
|
||||
|
||||
// text-end should be filtered out (returns null)
|
||||
expect(results).toHaveLength(2);
|
||||
expect(results[0].type).toBe('assistant');
|
||||
expect(results[0].message?.content[0].text).toBe('Hello ');
|
||||
@@ -623,15 +618,21 @@ describe('opencode-provider.ts', () => {
|
||||
|
||||
const mockEvents = [
|
||||
{
|
||||
type: 'tool-call',
|
||||
call_id: 'tool-1',
|
||||
name: 'Read',
|
||||
args: { file_path: '/tmp/test.txt' },
|
||||
type: 'tool_call',
|
||||
part: {
|
||||
type: 'tool-call',
|
||||
call_id: 'tool-1',
|
||||
name: 'Read',
|
||||
args: { file_path: '/tmp/test.txt' },
|
||||
},
|
||||
},
|
||||
{
|
||||
type: 'tool-result',
|
||||
call_id: 'tool-1',
|
||||
output: 'File contents',
|
||||
type: 'tool_result',
|
||||
part: {
|
||||
type: 'tool-result',
|
||||
call_id: 'tool-1',
|
||||
output: 'File contents',
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
@@ -718,10 +719,7 @@ describe('opencode-provider.ts', () => {
|
||||
const call = vi.mocked(spawnJSONLProcess).mock.calls[0][0];
|
||||
expect(call.args).toContain('run');
|
||||
expect(call.args).toContain('--format');
|
||||
expect(call.args).toContain('stream-json');
|
||||
expect(call.args).toContain('-q');
|
||||
expect(call.args).toContain('-c');
|
||||
expect(call.args).toContain('/tmp/workspace');
|
||||
expect(call.args).toContain('json');
|
||||
expect(call.args).toContain('--model');
|
||||
expect(call.args).toContain('anthropic/claude-opus-4-5');
|
||||
});
|
||||
@@ -731,9 +729,9 @@ describe('opencode-provider.ts', () => {
|
||||
|
||||
const mockEvents = [
|
||||
{ type: 'unknown-internal-event', data: 'ignored' },
|
||||
{ type: 'text-delta', text: 'Valid text' },
|
||||
{ type: 'text', part: { type: 'text', text: 'Valid text' } },
|
||||
{ type: 'another-unknown', foo: 'bar' },
|
||||
{ type: 'finish-step', result: 'Done' },
|
||||
{ type: 'step_finish', part: { type: 'step-finish', reason: 'stop', result: 'Done' } },
|
||||
];
|
||||
|
||||
vi.mocked(spawnJSONLProcess).mockReturnValue(
|
||||
@@ -747,6 +745,7 @@ describe('opencode-provider.ts', () => {
|
||||
const results = await collectAsyncGenerator<ProviderMessage>(
|
||||
mockedProvider.executeQuery({
|
||||
prompt: 'Test',
|
||||
model: 'opencode/big-pickle',
|
||||
cwd: '/test',
|
||||
})
|
||||
);
|
||||
@@ -1039,10 +1038,22 @@ describe('opencode-provider.ts', () => {
|
||||
const sessionId = 'test-session-123';
|
||||
|
||||
const mockEvents = [
|
||||
{ type: 'text-delta', text: 'Hello ', session_id: sessionId },
|
||||
{ type: 'tool-call', name: 'Read', args: {}, call_id: 'c1', session_id: sessionId },
|
||||
{ type: 'tool-result', call_id: 'c1', output: 'file content', session_id: sessionId },
|
||||
{ type: 'finish-step', result: 'Done', session_id: sessionId },
|
||||
{ type: 'text', part: { type: 'text', text: 'Hello ' }, sessionID: sessionId },
|
||||
{
|
||||
type: 'tool_call',
|
||||
part: { type: 'tool-call', name: 'Read', args: {}, call_id: 'c1' },
|
||||
sessionID: sessionId,
|
||||
},
|
||||
{
|
||||
type: 'tool_result',
|
||||
part: { type: 'tool-result', call_id: 'c1', output: 'file content' },
|
||||
sessionID: sessionId,
|
||||
},
|
||||
{
|
||||
type: 'step_finish',
|
||||
part: { type: 'step-finish', reason: 'stop', result: 'Done' },
|
||||
sessionID: sessionId,
|
||||
},
|
||||
];
|
||||
|
||||
vi.mocked(spawnJSONLProcess).mockReturnValue(
|
||||
@@ -1056,6 +1067,7 @@ describe('opencode-provider.ts', () => {
|
||||
const results = await collectAsyncGenerator<ProviderMessage>(
|
||||
mockedProvider.executeQuery({
|
||||
prompt: 'Test',
|
||||
model: 'opencode/big-pickle',
|
||||
cwd: '/tmp',
|
||||
})
|
||||
);
|
||||
@@ -1069,12 +1081,15 @@ describe('opencode-provider.ts', () => {
|
||||
});
|
||||
|
||||
describe('normalizeEvent additional edge cases', () => {
|
||||
it('should handle tool-call with empty args object', () => {
|
||||
it('should handle tool_call with empty args object', () => {
|
||||
const event = {
|
||||
type: 'tool-call',
|
||||
call_id: 'call-123',
|
||||
name: 'Glob',
|
||||
args: {},
|
||||
type: 'tool_call',
|
||||
part: {
|
||||
type: 'tool-call',
|
||||
call_id: 'call-123',
|
||||
name: 'Glob',
|
||||
args: {},
|
||||
},
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -1083,12 +1098,15 @@ describe('opencode-provider.ts', () => {
|
||||
expect(result?.message?.content[0].input).toEqual({});
|
||||
});
|
||||
|
||||
it('should handle tool-call with null args', () => {
|
||||
it('should handle tool_call with null args', () => {
|
||||
const event = {
|
||||
type: 'tool-call',
|
||||
call_id: 'call-123',
|
||||
name: 'Glob',
|
||||
args: null,
|
||||
type: 'tool_call',
|
||||
part: {
|
||||
type: 'tool-call',
|
||||
call_id: 'call-123',
|
||||
name: 'Glob',
|
||||
args: null,
|
||||
},
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -1097,18 +1115,21 @@ describe('opencode-provider.ts', () => {
|
||||
expect(result?.message?.content[0].input).toBeNull();
|
||||
});
|
||||
|
||||
it('should handle tool-call with complex nested args', () => {
|
||||
it('should handle tool_call with complex nested args', () => {
|
||||
const event = {
|
||||
type: 'tool-call',
|
||||
call_id: 'call-123',
|
||||
name: 'Edit',
|
||||
args: {
|
||||
file_path: '/tmp/test.ts',
|
||||
changes: [
|
||||
{ line: 10, old: 'foo', new: 'bar' },
|
||||
{ line: 20, old: 'baz', new: 'qux' },
|
||||
],
|
||||
options: { replace_all: true },
|
||||
type: 'tool_call',
|
||||
part: {
|
||||
type: 'tool-call',
|
||||
call_id: 'call-123',
|
||||
name: 'Edit',
|
||||
args: {
|
||||
file_path: '/tmp/test.ts',
|
||||
changes: [
|
||||
{ line: 10, old: 'foo', new: 'bar' },
|
||||
{ line: 20, old: 'baz', new: 'qux' },
|
||||
],
|
||||
options: { replace_all: true },
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
@@ -1125,11 +1146,14 @@ describe('opencode-provider.ts', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle tool-result with empty output', () => {
|
||||
it('should handle tool_result with empty output', () => {
|
||||
const event = {
|
||||
type: 'tool-result',
|
||||
call_id: 'call-123',
|
||||
output: '',
|
||||
type: 'tool_result',
|
||||
part: {
|
||||
type: 'tool-result',
|
||||
call_id: 'call-123',
|
||||
output: '',
|
||||
},
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -1138,10 +1162,13 @@ describe('opencode-provider.ts', () => {
|
||||
expect(result?.message?.content[0].content).toBe('');
|
||||
});
|
||||
|
||||
it('should handle text-delta with whitespace-only text', () => {
|
||||
it('should handle text with whitespace-only text', () => {
|
||||
const event = {
|
||||
type: 'text-delta',
|
||||
text: ' ',
|
||||
type: 'text',
|
||||
part: {
|
||||
type: 'text',
|
||||
text: ' ',
|
||||
},
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -1151,10 +1178,13 @@ describe('opencode-provider.ts', () => {
|
||||
expect(result?.message?.content[0].text).toBe(' ');
|
||||
});
|
||||
|
||||
it('should handle text-delta with newlines', () => {
|
||||
it('should handle text with newlines', () => {
|
||||
const event = {
|
||||
type: 'text-delta',
|
||||
text: 'Line 1\nLine 2\nLine 3',
|
||||
type: 'text',
|
||||
part: {
|
||||
type: 'text',
|
||||
text: 'Line 1\nLine 2\nLine 3',
|
||||
},
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -1162,12 +1192,15 @@ describe('opencode-provider.ts', () => {
|
||||
expect(result?.message?.content[0].text).toBe('Line 1\nLine 2\nLine 3');
|
||||
});
|
||||
|
||||
it('should handle finish-step with both result and error (error takes precedence)', () => {
|
||||
it('should handle step_finish with both result and error (error takes precedence)', () => {
|
||||
const event = {
|
||||
type: 'finish-step',
|
||||
result: 'Some result',
|
||||
error: 'But also an error',
|
||||
success: false,
|
||||
type: 'step_finish',
|
||||
part: {
|
||||
type: 'step-finish',
|
||||
reason: 'stop',
|
||||
result: 'Some result',
|
||||
error: 'But also an error',
|
||||
},
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
@@ -1203,7 +1236,7 @@ describe('opencode-provider.ts', () => {
|
||||
const defaultModels = models.filter((m) => m.default === true);
|
||||
|
||||
expect(defaultModels).toHaveLength(1);
|
||||
expect(defaultModels[0].id).toBe('amazon-bedrock/anthropic.claude-sonnet-4-5-20250929-v1:0');
|
||||
expect(defaultModels[0].id).toBe('opencode/big-pickle');
|
||||
});
|
||||
|
||||
it('should have valid tier values for all models', () => {
|
||||
@@ -1231,13 +1264,14 @@ describe('opencode-provider.ts', () => {
|
||||
const longPrompt = 'a'.repeat(10000);
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: longPrompt,
|
||||
model: 'opencode/big-pickle',
|
||||
cwd: '/tmp',
|
||||
});
|
||||
|
||||
// The prompt is NOT in args (it's passed via stdin)
|
||||
// Just verify the args structure is correct
|
||||
expect(args).toContain('run');
|
||||
expect(args).toContain('-');
|
||||
expect(args).not.toContain('-');
|
||||
expect(args.join(' ')).not.toContain(longPrompt);
|
||||
});
|
||||
|
||||
@@ -1245,22 +1279,25 @@ describe('opencode-provider.ts', () => {
|
||||
const specialPrompt = 'Test $HOME $(rm -rf /) `command` "quotes" \'single\'';
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: specialPrompt,
|
||||
model: 'opencode/big-pickle',
|
||||
cwd: '/tmp',
|
||||
});
|
||||
|
||||
// Special chars in prompt should not affect args (prompt is via stdin)
|
||||
expect(args).toContain('run');
|
||||
expect(args).toContain('-');
|
||||
expect(args).not.toContain('-');
|
||||
});
|
||||
|
||||
it('should handle cwd with spaces', () => {
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Test',
|
||||
model: 'opencode/big-pickle',
|
||||
cwd: '/tmp/path with spaces/project',
|
||||
});
|
||||
|
||||
const cwdIndex = args.indexOf('-c');
|
||||
expect(args[cwdIndex + 1]).toBe('/tmp/path with spaces/project');
|
||||
// cwd is set at subprocess level, not via CLI args
|
||||
expect(args).not.toContain('-c');
|
||||
expect(args).not.toContain('/tmp/path with spaces/project');
|
||||
});
|
||||
|
||||
it('should handle model with unusual characters', () => {
|
||||
|
||||
@@ -5,59 +5,61 @@ import {
|
||||
getSpecRegenerationStatus,
|
||||
} from '@/routes/app-spec/common.js';
|
||||
|
||||
const TEST_PROJECT_PATH = '/tmp/automaker-test-project';
|
||||
|
||||
describe('app-spec/common.ts', () => {
|
||||
beforeEach(() => {
|
||||
// Reset state before each test
|
||||
setRunningState(false, null);
|
||||
setRunningState(TEST_PROJECT_PATH, false, null);
|
||||
});
|
||||
|
||||
describe('setRunningState', () => {
|
||||
it('should set isRunning to true when running is true', () => {
|
||||
setRunningState(true);
|
||||
expect(getSpecRegenerationStatus().isRunning).toBe(true);
|
||||
setRunningState(TEST_PROJECT_PATH, true);
|
||||
expect(getSpecRegenerationStatus(TEST_PROJECT_PATH).isRunning).toBe(true);
|
||||
});
|
||||
|
||||
it('should set isRunning to false when running is false', () => {
|
||||
setRunningState(true);
|
||||
setRunningState(false);
|
||||
expect(getSpecRegenerationStatus().isRunning).toBe(false);
|
||||
setRunningState(TEST_PROJECT_PATH, true);
|
||||
setRunningState(TEST_PROJECT_PATH, false);
|
||||
expect(getSpecRegenerationStatus(TEST_PROJECT_PATH).isRunning).toBe(false);
|
||||
});
|
||||
|
||||
it('should set currentAbortController when provided', () => {
|
||||
const controller = new AbortController();
|
||||
setRunningState(true, controller);
|
||||
expect(getSpecRegenerationStatus().currentAbortController).toBe(controller);
|
||||
setRunningState(TEST_PROJECT_PATH, true, controller);
|
||||
expect(getSpecRegenerationStatus(TEST_PROJECT_PATH).currentAbortController).toBe(controller);
|
||||
});
|
||||
|
||||
it('should set currentAbortController to null when not provided', () => {
|
||||
const controller = new AbortController();
|
||||
setRunningState(true, controller);
|
||||
setRunningState(false);
|
||||
expect(getSpecRegenerationStatus().currentAbortController).toBe(null);
|
||||
setRunningState(TEST_PROJECT_PATH, true, controller);
|
||||
setRunningState(TEST_PROJECT_PATH, false);
|
||||
expect(getSpecRegenerationStatus(TEST_PROJECT_PATH).currentAbortController).toBe(null);
|
||||
});
|
||||
|
||||
it('should set currentAbortController to null when explicitly passed null', () => {
|
||||
it('should keep currentAbortController when explicitly passed null while running', () => {
|
||||
const controller = new AbortController();
|
||||
setRunningState(true, controller);
|
||||
setRunningState(true, null);
|
||||
expect(getSpecRegenerationStatus().currentAbortController).toBe(null);
|
||||
setRunningState(TEST_PROJECT_PATH, true, controller);
|
||||
setRunningState(TEST_PROJECT_PATH, true, null);
|
||||
expect(getSpecRegenerationStatus(TEST_PROJECT_PATH).currentAbortController).toBe(controller);
|
||||
});
|
||||
|
||||
it('should update state multiple times correctly', () => {
|
||||
const controller1 = new AbortController();
|
||||
const controller2 = new AbortController();
|
||||
|
||||
setRunningState(true, controller1);
|
||||
expect(getSpecRegenerationStatus().isRunning).toBe(true);
|
||||
expect(getSpecRegenerationStatus().currentAbortController).toBe(controller1);
|
||||
setRunningState(TEST_PROJECT_PATH, true, controller1);
|
||||
expect(getSpecRegenerationStatus(TEST_PROJECT_PATH).isRunning).toBe(true);
|
||||
expect(getSpecRegenerationStatus(TEST_PROJECT_PATH).currentAbortController).toBe(controller1);
|
||||
|
||||
setRunningState(true, controller2);
|
||||
expect(getSpecRegenerationStatus().isRunning).toBe(true);
|
||||
expect(getSpecRegenerationStatus().currentAbortController).toBe(controller2);
|
||||
setRunningState(TEST_PROJECT_PATH, true, controller2);
|
||||
expect(getSpecRegenerationStatus(TEST_PROJECT_PATH).isRunning).toBe(true);
|
||||
expect(getSpecRegenerationStatus(TEST_PROJECT_PATH).currentAbortController).toBe(controller2);
|
||||
|
||||
setRunningState(false, null);
|
||||
expect(getSpecRegenerationStatus().isRunning).toBe(false);
|
||||
expect(getSpecRegenerationStatus().currentAbortController).toBe(null);
|
||||
setRunningState(TEST_PROJECT_PATH, false, null);
|
||||
expect(getSpecRegenerationStatus(TEST_PROJECT_PATH).isRunning).toBe(false);
|
||||
expect(getSpecRegenerationStatus(TEST_PROJECT_PATH).currentAbortController).toBe(null);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -551,7 +551,7 @@ Resets in 2h
|
||||
expect(result.sessionPercentage).toBe(35);
|
||||
expect(pty.spawn).toHaveBeenCalledWith(
|
||||
'cmd.exe',
|
||||
['/c', 'claude', '/usage'],
|
||||
['/c', 'claude', '--add-dir', 'C:\\Users\\testuser'],
|
||||
expect.any(Object)
|
||||
);
|
||||
});
|
||||
@@ -582,8 +582,8 @@ Resets in 2h
|
||||
// Simulate seeing usage data
|
||||
dataCallback!(mockOutput);
|
||||
|
||||
// Advance time to trigger escape key sending
|
||||
vi.advanceTimersByTime(2100);
|
||||
// Advance time to trigger escape key sending (impl uses 3000ms delay)
|
||||
vi.advanceTimersByTime(3100);
|
||||
|
||||
expect(mockPty.write).toHaveBeenCalledWith('\x1b');
|
||||
|
||||
@@ -614,9 +614,10 @@ Resets in 2h
|
||||
const promise = windowsService.fetchUsageData();
|
||||
|
||||
dataCallback!('authentication_error');
|
||||
exitCallback!({ exitCode: 1 });
|
||||
|
||||
await expect(promise).rejects.toThrow('Authentication required');
|
||||
await expect(promise).rejects.toThrow(
|
||||
"Claude CLI authentication issue. Please run 'claude logout' and then 'claude login' in your terminal to refresh permissions."
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle timeout with no data on Windows', async () => {
|
||||
@@ -628,14 +629,18 @@ Resets in 2h
|
||||
onExit: vi.fn(),
|
||||
write: vi.fn(),
|
||||
kill: vi.fn(),
|
||||
killed: false,
|
||||
};
|
||||
vi.mocked(pty.spawn).mockReturnValue(mockPty as any);
|
||||
|
||||
const promise = windowsService.fetchUsageData();
|
||||
|
||||
vi.advanceTimersByTime(31000);
|
||||
// Advance time past timeout (45 seconds)
|
||||
vi.advanceTimersByTime(46000);
|
||||
|
||||
await expect(promise).rejects.toThrow('Command timed out');
|
||||
await expect(promise).rejects.toThrow(
|
||||
'The Claude CLI took too long to respond. This can happen if the CLI is waiting for a trust prompt or is otherwise busy.'
|
||||
);
|
||||
expect(mockPty.kill).toHaveBeenCalled();
|
||||
|
||||
vi.useRealTimers();
|
||||
@@ -654,6 +659,7 @@ Resets in 2h
|
||||
onExit: vi.fn(),
|
||||
write: vi.fn(),
|
||||
kill: vi.fn(),
|
||||
killed: false,
|
||||
};
|
||||
vi.mocked(pty.spawn).mockReturnValue(mockPty as any);
|
||||
|
||||
@@ -662,8 +668,8 @@ Resets in 2h
|
||||
// Simulate receiving usage data
|
||||
dataCallback!('Current session\n65% left\nResets in 2h');
|
||||
|
||||
// Advance time past timeout (30 seconds)
|
||||
vi.advanceTimersByTime(31000);
|
||||
// Advance time past timeout (45 seconds)
|
||||
vi.advanceTimersByTime(46000);
|
||||
|
||||
// Should resolve with data instead of rejecting
|
||||
const result = await promise;
|
||||
@@ -686,6 +692,7 @@ Resets in 2h
|
||||
onExit: vi.fn(),
|
||||
write: vi.fn(),
|
||||
kill: vi.fn(),
|
||||
killed: false,
|
||||
};
|
||||
vi.mocked(pty.spawn).mockReturnValue(mockPty as any);
|
||||
|
||||
@@ -694,8 +701,8 @@ Resets in 2h
|
||||
// Simulate seeing usage data
|
||||
dataCallback!('Current session\n65% left');
|
||||
|
||||
// Advance 2s to trigger ESC
|
||||
vi.advanceTimersByTime(2100);
|
||||
// Advance 3s to trigger ESC (impl uses 3000ms delay)
|
||||
vi.advanceTimersByTime(3100);
|
||||
expect(mockPty.write).toHaveBeenCalledWith('\x1b');
|
||||
|
||||
// Advance another 2s to trigger SIGTERM fallback
|
||||
|
||||
@@ -8,6 +8,7 @@ import fs from 'fs/promises';
|
||||
vi.mock('child_process', () => ({
|
||||
spawn: vi.fn(),
|
||||
execSync: vi.fn(),
|
||||
execFile: vi.fn(),
|
||||
}));
|
||||
|
||||
// Mock secure-fs
|
||||
|
||||
@@ -70,6 +70,8 @@ const eslintConfig = defineConfig([
|
||||
AbortSignal: 'readonly',
|
||||
Audio: 'readonly',
|
||||
ScrollBehavior: 'readonly',
|
||||
URL: 'readonly',
|
||||
URLSearchParams: 'readonly',
|
||||
// Timers
|
||||
setTimeout: 'readonly',
|
||||
setInterval: 'readonly',
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@automaker/ui",
|
||||
"version": "0.9.0",
|
||||
"version": "0.11.0",
|
||||
"description": "An autonomous AI development studio that helps you build software faster using AI-powered agents",
|
||||
"homepage": "https://github.com/AutoMaker-Org/automaker",
|
||||
"repository": {
|
||||
@@ -42,6 +42,8 @@
|
||||
"@automaker/dependency-resolver": "1.0.0",
|
||||
"@automaker/types": "1.0.0",
|
||||
"@codemirror/lang-xml": "6.1.0",
|
||||
"@codemirror/language": "^6.12.1",
|
||||
"@codemirror/legacy-modes": "^6.5.2",
|
||||
"@codemirror/theme-one-dark": "6.1.3",
|
||||
"@dnd-kit/core": "6.3.1",
|
||||
"@dnd-kit/sortable": "10.0.0",
|
||||
@@ -54,6 +56,7 @@
|
||||
"@radix-ui/react-label": "2.1.8",
|
||||
"@radix-ui/react-popover": "1.1.15",
|
||||
"@radix-ui/react-radio-group": "1.3.8",
|
||||
"@radix-ui/react-scroll-area": "^1.2.10",
|
||||
"@radix-ui/react-select": "2.2.6",
|
||||
"@radix-ui/react-slider": "1.3.6",
|
||||
"@radix-ui/react-slot": "1.2.4",
|
||||
|
||||
@@ -41,16 +41,13 @@ const E2E_SETTINGS = {
|
||||
theme: 'dark',
|
||||
sidebarOpen: true,
|
||||
chatHistoryOpen: false,
|
||||
kanbanCardDetailLevel: 'standard',
|
||||
maxConcurrency: 3,
|
||||
defaultSkipTests: true,
|
||||
enableDependencyBlocking: true,
|
||||
skipVerificationInAutoMode: false,
|
||||
useWorktrees: true,
|
||||
showProfilesOnly: false,
|
||||
defaultPlanningMode: 'skip',
|
||||
defaultRequirePlanApproval: false,
|
||||
defaultAIProfileId: null,
|
||||
muteDoneSound: false,
|
||||
phaseModels: {
|
||||
enhancementModel: { model: 'sonnet' },
|
||||
@@ -73,7 +70,6 @@ const E2E_SETTINGS = {
|
||||
spec: 'D',
|
||||
context: 'C',
|
||||
settings: 'S',
|
||||
profiles: 'M',
|
||||
terminal: 'T',
|
||||
toggleSidebar: '`',
|
||||
addFeature: 'N',
|
||||
@@ -84,7 +80,6 @@ const E2E_SETTINGS = {
|
||||
projectPicker: 'P',
|
||||
cyclePrevProject: 'Q',
|
||||
cycleNextProject: 'E',
|
||||
addProfile: 'N',
|
||||
splitTerminalRight: 'Alt+D',
|
||||
splitTerminalDown: 'Alt+S',
|
||||
closeTerminal: 'Alt+W',
|
||||
@@ -94,48 +89,6 @@ const E2E_SETTINGS = {
|
||||
githubPrs: 'R',
|
||||
newTerminalTab: 'Alt+T',
|
||||
},
|
||||
aiProfiles: [
|
||||
{
|
||||
id: 'profile-heavy-task',
|
||||
name: 'Heavy Task',
|
||||
description:
|
||||
'Claude Opus with Ultrathink for complex architecture, migrations, or deep debugging.',
|
||||
model: 'opus',
|
||||
thinkingLevel: 'ultrathink',
|
||||
provider: 'claude',
|
||||
isBuiltIn: true,
|
||||
icon: 'Brain',
|
||||
},
|
||||
{
|
||||
id: 'profile-balanced',
|
||||
name: 'Balanced',
|
||||
description: 'Claude Sonnet with medium thinking for typical development tasks.',
|
||||
model: 'sonnet',
|
||||
thinkingLevel: 'medium',
|
||||
provider: 'claude',
|
||||
isBuiltIn: true,
|
||||
icon: 'Scale',
|
||||
},
|
||||
{
|
||||
id: 'profile-quick-edit',
|
||||
name: 'Quick Edit',
|
||||
description: 'Claude Haiku for fast, simple edits and minor fixes.',
|
||||
model: 'haiku',
|
||||
thinkingLevel: 'none',
|
||||
provider: 'claude',
|
||||
isBuiltIn: true,
|
||||
icon: 'Zap',
|
||||
},
|
||||
{
|
||||
id: 'profile-cursor-refactoring',
|
||||
name: 'Cursor Refactoring',
|
||||
description: 'Cursor Composer 1 for refactoring tasks.',
|
||||
provider: 'cursor',
|
||||
cursorModel: 'composer-1',
|
||||
isBuiltIn: true,
|
||||
icon: 'Sparkles',
|
||||
},
|
||||
],
|
||||
// Default test project using the fixture path - tests can override via route mocking if needed
|
||||
projects: [
|
||||
{
|
||||
|
||||
@@ -5,6 +5,7 @@ import { router } from './utils/router';
|
||||
import { SplashScreen } from './components/splash-screen';
|
||||
import { useSettingsSync } from './hooks/use-settings-sync';
|
||||
import { useCursorStatusInit } from './hooks/use-cursor-status-init';
|
||||
import { useProviderAuthInit } from './hooks/use-provider-auth-init';
|
||||
import './styles/global.css';
|
||||
import './styles/theme-imports';
|
||||
|
||||
@@ -24,8 +25,11 @@ export default function App() {
|
||||
useEffect(() => {
|
||||
if (import.meta.env.DEV) {
|
||||
const clearPerfEntries = () => {
|
||||
performance.clearMarks();
|
||||
performance.clearMeasures();
|
||||
// Check if window.performance is available before calling its methods
|
||||
if (window.performance) {
|
||||
window.performance.clearMarks();
|
||||
window.performance.clearMeasures();
|
||||
}
|
||||
};
|
||||
const interval = setInterval(clearPerfEntries, 5000);
|
||||
return () => clearInterval(interval);
|
||||
@@ -45,6 +49,9 @@ export default function App() {
|
||||
// Initialize Cursor CLI status at startup
|
||||
useCursorStatusInit();
|
||||
|
||||
// Initialize Provider auth status at startup (for Claude/Codex usage display)
|
||||
useProviderAuthInit();
|
||||
|
||||
const handleSplashComplete = useCallback(() => {
|
||||
sessionStorage.setItem('automaker-splash-shown', 'true');
|
||||
setShowSplash(false);
|
||||
|
||||
@@ -11,6 +11,7 @@ import { useSetupStore } from '@/store/setup-store';
|
||||
const ERROR_CODES = {
|
||||
API_BRIDGE_UNAVAILABLE: 'API_BRIDGE_UNAVAILABLE',
|
||||
AUTH_ERROR: 'AUTH_ERROR',
|
||||
TRUST_PROMPT: 'TRUST_PROMPT',
|
||||
UNKNOWN: 'UNKNOWN',
|
||||
} as const;
|
||||
|
||||
@@ -55,8 +56,12 @@ export function ClaudeUsagePopover() {
|
||||
}
|
||||
const data = await api.claude.getUsage();
|
||||
if ('error' in data) {
|
||||
// Detect trust prompt error
|
||||
const isTrustPrompt =
|
||||
data.error === 'Trust prompt pending' ||
|
||||
(data.message && data.message.includes('folder permission'));
|
||||
setError({
|
||||
code: ERROR_CODES.AUTH_ERROR,
|
||||
code: isTrustPrompt ? ERROR_CODES.TRUST_PROMPT : ERROR_CODES.AUTH_ERROR,
|
||||
message: data.message || data.error,
|
||||
});
|
||||
return;
|
||||
@@ -257,6 +262,11 @@ export function ClaudeUsagePopover() {
|
||||
<p className="text-xs text-muted-foreground">
|
||||
{error.code === ERROR_CODES.API_BRIDGE_UNAVAILABLE ? (
|
||||
'Ensure the Electron bridge is running or restart the app'
|
||||
) : error.code === ERROR_CODES.TRUST_PROMPT ? (
|
||||
<>
|
||||
Run <code className="font-mono bg-muted px-1 rounded">claude</code> in your
|
||||
terminal and approve access to continue
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
Make sure Claude CLI is installed and authenticated via{' '}
|
||||
|
||||
220
apps/ui/src/components/icons/editor-icons.tsx
Normal file
220
apps/ui/src/components/icons/editor-icons.tsx
Normal file
@@ -0,0 +1,220 @@
|
||||
import type { ComponentType, ComponentProps } from 'react';
|
||||
import { FolderOpen } from 'lucide-react';
|
||||
|
||||
type IconProps = ComponentProps<'svg'>;
|
||||
type IconComponent = ComponentType<IconProps>;
|
||||
|
||||
const ANTIGRAVITY_COMMANDS = ['antigravity', 'agy'] as const;
|
||||
const [PRIMARY_ANTIGRAVITY_COMMAND, LEGACY_ANTIGRAVITY_COMMAND] = ANTIGRAVITY_COMMANDS;
|
||||
|
||||
/**
|
||||
* Cursor editor logo icon - from LobeHub icons
|
||||
*/
|
||||
export function CursorIcon(props: IconProps) {
|
||||
return (
|
||||
<svg viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" {...props}>
|
||||
<path d="M22.106 5.68L12.5.135a.998.998 0 00-.998 0L1.893 5.68a.84.84 0 00-.419.726v11.186c0 .3.16.577.42.727l9.607 5.547a.999.999 0 00.998 0l9.608-5.547a.84.84 0 00.42-.727V6.407a.84.84 0 00-.42-.726zm-.603 1.176L12.228 22.92c-.063.108-.228.064-.228-.061V12.34a.59.59 0 00-.295-.51l-9.11-5.26c-.107-.062-.063-.228.062-.228h18.55c.264 0 .428.286.296.514z" />
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* VS Code editor logo icon
|
||||
*/
|
||||
export function VSCodeIcon(props: IconProps) {
|
||||
return (
|
||||
<svg viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" {...props}>
|
||||
<path d="M23.15 2.587L18.21.21a1.494 1.494 0 0 0-1.705.29l-9.46 8.63-4.12-3.128a.999.999 0 0 0-1.276.057L.327 7.261A1 1 0 0 0 .326 8.74L3.899 12 .326 15.26a1 1 0 0 0 .001 1.479L1.65 17.94a.999.999 0 0 0 1.276.057l4.12-3.128 9.46 8.63a1.492 1.492 0 0 0 1.704.29l4.942-2.377A1.5 1.5 0 0 0 24 20.06V3.939a1.5 1.5 0 0 0-.85-1.352zm-5.146 14.861L10.826 12l7.178-5.448v10.896z" />
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* VS Code Insiders editor logo icon (same as VS Code)
|
||||
*/
|
||||
export function VSCodeInsidersIcon(props: IconProps) {
|
||||
return <VSCodeIcon {...props} />;
|
||||
}
|
||||
|
||||
/**
|
||||
* Kiro editor logo icon (VS Code fork)
|
||||
*/
|
||||
export function KiroIcon(props: IconProps) {
|
||||
return (
|
||||
<svg viewBox="0 0 32 32" fill="currentColor" xmlns="http://www.w3.org/2000/svg" {...props}>
|
||||
<path d="M6.594.016A7.006 7.006 0 0 0 .742 3.875a6.996 6.996 0 0 0-.726 2.793C.004 6.878 0 9.93.004 16.227c.004 8.699.008 9.265.031 9.476.113.93.324 1.652.707 2.422a6.918 6.918 0 0 0 3.172 3.148c.75.372 1.508.59 2.398.692.227.027.77.027 9.688.027 8.945 0 9.457 0 9.688-.027.917-.106 1.66-.32 2.437-.707a6.918 6.918 0 0 0 3.148-3.172c.372-.75.59-1.508.692-2.398.027-.227.027-.77.027-9.665 0-9.976.004-9.53-.07-10.03a6.993 6.993 0 0 0-3.024-4.798 6.427 6.427 0 0 0-.757-.445 7.06 7.06 0 0 0-2.774-.734c-.328-.02-18.437-.02-18.773 0Zm10.789 5.406a7.556 7.556 0 0 1 6.008 3.805c.148.257.406.796.52 1.085.394 1 .632 2.157.769 3.75.035.38.05 1.965.023 2.407-.125 2.168-.625 4.183-1.515 6.078a9.77 9.77 0 0 1-.801 1.437c-.93 1.305-2.32 2.332-3.48 2.57-.895.184-1.602-.1-2.048-.827a3.42 3.42 0 0 1-.25-.528c-.035-.097-.062-.129-.086-.09-.003.008-.09.075-.191.153-.95.722-2.02 1.175-3.059 1.293-.273.03-.859.023-1.085-.016-.715-.121-1.286-.441-1.649-.93a2.563 2.563 0 0 1-.328-.632c-.117-.36-.156-.813-.117-1.227.054-.55.226-1.184.484-1.766a.48.48 0 0 0 .043-.117 2.11 2.11 0 0 0-.137.055c-.363.16-.898.305-1.308.351-.844.098-1.426-.14-1.715-.699-.106-.203-.149-.39-.16-.676-.008-.261.008-.43.066-.656.059-.23.121-.367.403-.89.382-.72.492-.946.636-1.348.328-.899.48-1.723.688-3.754.148-1.469.254-2.14.433-2.766.028-.09.078-.277.114-.414.796-3.074 3.113-5.183 6.148-5.601.129-.016.309-.04.399-.047.238-.016.96-.02 1.195 0Zm0 0" />
|
||||
<path d="M16.754 11.336a.815.815 0 0 0-.375.219c-.176.18-.293.441-.356.804-.039.235-.058.602-.039.868.028.406.082.64.204.894.128.262.304.426.546.496.106.031.383.031.5 0 .422-.113.703-.531.801-1.191a4.822 4.822 0 0 0-.012-.95c-.062-.378-.183-.675-.359-.863a.808.808 0 0 0-.648-.293.804.804 0 0 0-.262.016ZM20.375 11.328a1.01 1.01 0 0 0-.363.188c-.164.144-.293.402-.364.718-.05.23-.07.426-.07.743 0 .32.02.511.07.742.11.496.352.808.688.898.121.031.379.031.5 0 .402-.105.68-.5.781-1.11.035-.198.047-.648.024-.87-.063-.63-.293-1.059-.649-1.23a1.513 1.513 0 0 0-.219-.079 1.362 1.362 0 0 0-.398 0Zm0 0" />
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Zed editor logo icon (from Simple Icons)
|
||||
*/
|
||||
export function ZedIcon(props: IconProps) {
|
||||
return (
|
||||
<svg viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" {...props}>
|
||||
<path d="M2.25 1.5a.75.75 0 0 0-.75.75v16.5H0V2.25A2.25 2.25 0 0 1 2.25 0h20.095c1.002 0 1.504 1.212.795 1.92L10.764 14.298h3.486V12.75h1.5v1.922a1.125 1.125 0 0 1-1.125 1.125H9.264l-2.578 2.578h11.689V9h1.5v9.375a1.5 1.5 0 0 1-1.5 1.5H5.185L2.562 22.5H21.75a.75.75 0 0 0 .75-.75V5.25H24v16.5A2.25 2.25 0 0 1 21.75 24H1.655C.653 24 .151 22.788.86 22.08L13.19 9.75H9.75v1.5h-1.5V9.375A1.125 1.125 0 0 1 9.375 8.25h5.314l2.625-2.625H5.625V15h-1.5V5.625a1.5 1.5 0 0 1 1.5-1.5h13.19L21.438 1.5z" />
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sublime Text editor logo icon
|
||||
*/
|
||||
export function SublimeTextIcon(props: IconProps) {
|
||||
return (
|
||||
<svg viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" {...props}>
|
||||
<path d="M20.953.004a.397.397 0 0 0-.18.045L3.473 8.63a.397.397 0 0 0-.033.69l4.873 3.33-5.26 2.882a.397.397 0 0 0-.006.692l17.3 9.73a.397.397 0 0 0 .593-.344V15.094a.397.397 0 0 0-.203-.346l-4.917-2.763 5.233-2.725a.397.397 0 0 0 .207-.348V.397a.397.397 0 0 0-.307-.393z" />
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* macOS Finder icon
|
||||
*/
|
||||
export function FinderIcon(props: IconProps) {
|
||||
return (
|
||||
<svg viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" {...props}>
|
||||
<path d="M2.5 3A2.5 2.5 0 0 0 0 5.5v13A2.5 2.5 0 0 0 2.5 21h19a2.5 2.5 0 0 0 2.5-2.5v-13A2.5 2.5 0 0 0 21.5 3h-19zM7 8.5a1.5 1.5 0 1 1 0 3 1.5 1.5 0 0 1 0-3zm10 0a1.5 1.5 0 1 1 0 3 1.5 1.5 0 0 1 0-3zm-9 6c0-.276.336-.5.75-.5h6.5c.414 0 .75.224.75.5v1c0 .828-1.343 2.5-4 2.5s-4-1.672-4-2.5v-1z" />
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Windsurf editor logo icon (by Codeium) - from LobeHub icons
|
||||
*/
|
||||
export function WindsurfIcon(props: IconProps) {
|
||||
return (
|
||||
<svg viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" {...props}>
|
||||
<path
|
||||
fillRule="evenodd"
|
||||
clipRule="evenodd"
|
||||
d="M23.78 5.004h-.228a2.187 2.187 0 00-2.18 2.196v4.912c0 .98-.804 1.775-1.76 1.775a1.818 1.818 0 01-1.472-.773L13.168 5.95a2.197 2.197 0 00-1.81-.95c-1.134 0-2.154.972-2.154 2.173v4.94c0 .98-.797 1.775-1.76 1.775-.57 0-1.136-.289-1.472-.773L.408 5.098C.282 4.918 0 5.007 0 5.228v4.284c0 .216.066.426.188.604l5.475 7.889c.324.466.8.812 1.351.938 1.377.316 2.645-.754 2.645-2.117V11.89c0-.98.787-1.775 1.76-1.775h.002c.586 0 1.135.288 1.472.773l4.972 7.163a2.15 2.15 0 001.81.95c1.158 0 2.151-.973 2.151-2.173v-4.939c0-.98.787-1.775 1.76-1.775h.194c.122 0 .22-.1.22-.222V5.225a.221.221 0 00-.22-.222z"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Trae editor logo icon (by ByteDance) - from LobeHub icons
|
||||
*/
|
||||
export function TraeIcon(props: IconProps) {
|
||||
return (
|
||||
<svg viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" {...props}>
|
||||
<path d="M24 20.541H3.428v-3.426H0V3.4h24V20.54zM3.428 17.115h17.144V6.827H3.428v10.288zm8.573-5.196l-2.425 2.424-2.424-2.424 2.424-2.424 2.425 2.424zm6.857-.001l-2.424 2.423-2.425-2.423 2.425-2.425 2.424 2.425z" />
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* JetBrains Rider logo icon
|
||||
*/
|
||||
export function RiderIcon(props: IconProps) {
|
||||
return (
|
||||
<svg viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" {...props}>
|
||||
<path d="M0 0v24h24V0zm7.031 3.113A4.063 4.063 0 0 1 9.72 4.14a3.23 3.23 0 0 1 .84 2.28A3.16 3.16 0 0 1 8.4 9.54l2.46 3.6H8.28L6.12 9.9H4.38v3.24H2.16V3.12c1.61-.004 3.281.009 4.871-.007zm5.509.007h3.96c3.18 0 5.34 2.16 5.34 5.04 0 2.82-2.16 5.04-5.34 5.04h-3.96zm4.069 1.976c-.607.01-1.235.004-1.849.004v6.06h1.74a2.882 2.882 0 0 0 3.06-3 2.897 2.897 0 0 0-2.951-3.064zM4.319 5.1v2.88H6.6c1.08 0 1.68-.6 1.68-1.44 0-.96-.66-1.44-1.74-1.44zM2.16 19.5h9V21h-9Z" />
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* JetBrains WebStorm logo icon
|
||||
*/
|
||||
export function WebStormIcon(props: IconProps) {
|
||||
return (
|
||||
<svg viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" {...props}>
|
||||
<path d="M0 0v24h24V0H0zm17.889 2.889c1.444 0 2.667.444 3.667 1.278l-1.111 1.667c-.889-.611-1.722-1-2.556-1s-1.278.389-1.278.889v.056c0 .667.444.889 2.111 1.333 2 .556 3.111 1.278 3.111 3v.056c0 2-1.5 3.111-3.611 3.111-1.5-.056-3-.611-4.167-1.667l1.278-1.556c.889.722 1.833 1.222 2.944 1.222.889 0 1.389-.333 1.389-.944v-.056c0-.556-.333-.833-2-1.278-2-.5-3.222-1.056-3.222-3.056v-.056c0-1.833 1.444-3 3.444-3zm-16.111.222h2.278l1.5 5.778 1.722-5.778h1.667l1.667 5.778 1.5-5.778h2.333l-2.833 9.944H9.723L8.112 7.277l-1.667 5.778H4.612L1.779 3.111zm.5 16.389h9V21h-9v-1.5z" />
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Xcode logo icon
|
||||
*/
|
||||
export function XcodeIcon(props: IconProps) {
|
||||
return (
|
||||
<svg viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" {...props}>
|
||||
<path d="M19.06 5.3327c.4517-.1936.7744-.2581 1.097-.1936.5163.1291.7744.5163.968.7098.1936.3872.9034.7744 1.2261.8389.2581.0645.7098-.6453 1.0325-1.2906.3227-.5808.5163-1.3552.4517-1.5488-.0645-.1936-.968-.5808-1.1616-.5808-.1291 0-.3872.1291-.8389.0645-.4517-.0645-.9034-.5808-1.1616-.968-.4517-.6453-1.097-1.0325-1.6778-1.3552-.6453-.3227-1.3552-.5163-2.065-.6453-1.0325-.2581-2.065-.4517-3.0975-.3227-.5808.0645-1.2906.1291-1.8069.3227-.0645 0-.1936.1936-.0645.1936s.5808.0645.5808.0645-.5807.1292-.5807.2583c0 .1291.0645.1291.1291.1291.0645 0 1.4842-.0645 2.065 0 .6453.1291 1.3552.4517 1.8069 1.2261.7744 1.4197.4517 2.7749.2581 3.2266-.968 2.1295-8.6472 15.2294-9.0344 16.1328-.3873.9034-.5163 1.4842.5807 2.065s1.6778.3227 2.0005-.0645c.3872-.5163 7.0339-17.1654 9.2925-18.2624zm-3.6138 8.7117h1.5488c1.0325 0 1.2261.5163 1.2261.7098.0645.5163-.1936 1.1616-1.2261 1.1616h-.968l.7744 1.2906c.4517.7744.2581 1.1616 0 1.4197-.3872.3872-1.2261.3872-1.6778-.4517l-.9034-1.5488c-.6453 1.4197-1.2906 2.9684-2.065 4.7753h4.0009c1.9359 0 3.5492-1.6133 3.5492-3.5492V6.5588c-.0645-.1291-.1936-.0645-.2581 0-.3872.4517-1.4842 2.0004-4.001 7.4856zm-9.8087 8.0019h-.3227c-2.3231 0-4.1945-1.8714-4.1945-4.1945V7.0105c0-2.3231 1.8714-4.1945 4.1945-4.1945h9.3571c-.1936-.1936-.968-.5163-1.7423-.4517-.3227 0-.968.1291-1.3552-.1291-.3872-.3227-.3227-.5163-.9034-.5163H4.9277c-2.6458 0-4.7753 2.1295-4.7753 4.7753v11.7447c0 2.6458 2.1295 4.7753 4.4527 4.7108.6452 0 .8388-.5162 1.0324-.9034zM20.4152 6.9459v10.9058c0 2.3231-1.8714 4.1945-4.1945 4.1945H11.897s-.3872 1.0325.8389 1.0325h3.8719c2.6458 0 4.7753-2.1295 4.7753-4.7753V8.8173c.0646-.9034-.7098-1.4842-.9679-1.8714zm-18.5851.0646v10.8413c0 1.9359 1.6133 3.5492 3.5492 3.5492h.5808c0-.0645.7744-1.4197 2.4522-4.2591.1936-.3872.4517-.7744.7098-1.2261H4.4114c-.5808 0-.9034-.3872-.968-.7098-.1291-.5163.1936-1.1616.9034-1.1616h2.3877l3.033-5.2916s-.7098-1.2906-.9034-1.6133c-.2582-.4517-.1291-.9034.129-1.1615.3872-.3872 1.0325-.5808 1.6778.4517l.2581.3872.2581-.3872c.5808-.8389.968-.7744 1.2906-.7098.5163.1291.8389.7098.3872 1.6133L8.864 14.0444h1.3552c.4517-.7744.9034-1.5488 1.3552-2.3877-.0645-.3227-.1291-.7098-.0645-1.0325.0645-.5163.3227-.968.6453-1.3552l.3872.6453c1.2261-2.1295 2.1295-3.9364 2.3877-4.6463.1291-.3872.3227-1.1616.1291-1.8069H5.3794c-2.0005.0001-3.5493 1.6134-3.5493 3.5494zM4.605 17.7872c0-.0645.7744-1.4197.7744-1.4197 1.2261-.3227 1.8069.4517 1.8714.5163 0 0-.8389 1.4842-1.097 1.7423s-.5808.3227-.9034.2581c-.5164-.129-.839-.6453-.6454-1.097z" />
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Android Studio logo icon
|
||||
*/
|
||||
export function AndroidStudioIcon(props: IconProps) {
|
||||
return (
|
||||
<svg viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" {...props}>
|
||||
<path d="M19.2693 10.3368c-.3321 0-.6026.2705-.6026.6031v9.8324h-1.7379l-3.3355-6.9396c.476-.5387.6797-1.286.5243-2.0009a2.2862 2.2862 0 0 0-1.2893-1.6248v-.8124c.0121-.2871-.1426-.5787-.4043-.7407-.1391-.0825-.2884-.1234-.4402-.1234a.8478.8478 0 0 0-.4318.1182c-.2701.1671-.4248.4587-.4123.7662l-.0003.721c-1.0149.3668-1.6619 1.4153-1.4867 2.5197a2.282 2.282 0 0 0 .5916 1.2103l-3.2096 6.9064H4.0928c-1.0949-.007-1.9797-.8948-1.9832-1.9896V5.016c-.0055 1.1024.8836 2.0006 1.9859 2.0062a2.024 2.024 0 0 0 .1326-.0037h14.7453s2.5343-.2189 2.8619 1.5392c-.2491.0287-.4449.2321-.4449.4889 0 .7115-.5791 1.2901-1.3028 1.2901h-.8183zM17.222 22.5366c.2347.4837.0329 1.066-.4507 1.3007-.1296.0629-.2666.0895-.4018.0927a.9738.9738 0 0 1-.3194-.0455c-.024-.0078-.046-.0209-.0694-.0305a.9701.9701 0 0 1-.2277-.1321c-.0247-.0192-.0495-.038-.0724-.0598-.0825-.0783-.1574-.1672-.21-.2757l-1.2554-2.6143-1.5585-3.2452a.7725.7725 0 0 0-.6995-.4443h-.0024a.792.792 0 0 0-.7083.4443l-1.5109 3.2452-1.2321 2.6464a.9722.9722 0 0 1-.7985.5795c-.0626.0053-.1238-.0024-.185-.0087-.0344-.0036-.069-.0053-.1025-.0124-.0489-.0103-.0954-.0278-.142-.0452-.0301-.0113-.0613-.0197-.0901-.0339-.0496-.0244-.0948-.0565-.1397-.0889-.0217-.0156-.0457-.0275-.0662-.045a.9862.9862 0 0 1-.1695-.1844.9788.9788 0 0 1-.0708-.9852l.8469-1.8223 3.2676-7.0314a1.7964 1.7964 0 0 1-.7072-1.1637c-.1555-.9799.5129-1.9003 1.4928-2.0559V9.3946a.3542.3542 0 0 1 .1674-.3155.3468.3468 0 0 1 .3541 0 .354.354 0 0 1 .1674.3155v1.159l.0129.0064a1.8028 1.8028 0 0 1 1.2878 1.378 1.7835 1.7835 0 0 1-.6439 1.7836l3.3889 7.0507.8481 1.7643zM12.9841 12.306c.0042-.6081-.4854-1.1044-1.0935-1.1085a1.1204 1.1204 0 0 0-.7856.3219 1.101 1.101 0 0 0-.323.7716c-.0042.6081.4854 1.1044 1.0935 1.1085h.0077c.6046 0 1.0967-.488 1.1009-1.0935zm-1.027 5.2768c-.1119.0005-.2121.0632-.2571.1553l-1.4127 3.0342h3.3733l-1.4564-3.0328a.274.274 0 0 0-.2471-.1567zm8.1432-6.7459l-.0129-.0001h-.8177a.103.103 0 0 0-.103.103v12.9103a.103.103 0 0 0 .0966.103h.8435c.9861-.0035 1.7836-.804 1.7836-1.79V9.0468c0 .9887-.8014 1.7901-1.7901 1.7901zM2.6098 5.0161v.019c.0039.816.6719 1.483 1.4874 1.4869a12.061 12.061 0 0 1 .1309-.0034h1.1286c.1972-1.315.7607-2.525 1.638-3.4859H4.0993c-.9266.0031-1.6971.6401-1.9191 1.4975.2417.0355.4296.235.4296.4859zm6.3381-2.8977L7.9112.3284a.219.219 0 0 1 0-.2189A.2384.2384 0 0 1 8.098 0a.219.219 0 0 1 .1867.1094l1.0496 1.8158a6.4907 6.4907 0 0 1 5.3186 0L15.696.1094a.2189.2189 0 0 1 .3734.2189l-1.0302 1.79c1.6671.9125 2.7974 2.5439 3.0975 4.4018l-12.286-.0014c.3004-1.8572 1.4305-3.488 3.0972-4.4003zm5.3774 2.6202a.515.515 0 0 0 .5271.5028.515.515 0 0 0 .5151-.5151.5213.5213 0 0 0-.8885-.367.5151.5151 0 0 0-.1537.3793zm-5.7178-.0067a.5151.5151 0 0 0 .5207.5095.5086.5086 0 0 0 .367-.1481.5215.5215 0 1 0-.734-.7341.515.515 0 0 0-.1537.3727z" />
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Google Antigravity IDE logo icon - stylized "A" arch shape
|
||||
*/
|
||||
export function AntigravityIcon(props: IconProps) {
|
||||
return (
|
||||
<svg viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" {...props}>
|
||||
<path
|
||||
fillRule="evenodd"
|
||||
clipRule="evenodd"
|
||||
d="M12 1C11 1 9.5 3 8 7c-1.5 4-3 8.5-4 11.5-.5 1.5-.3 2.8.5 3.3.8.5 2 .2 3-.8.8-.8 1.3-2 1.8-3.2.3-.8.8-1.3 1.5-1.3h2.4c.7 0 1.2.5 1.5 1.3.5 1.2 1 2.4 1.8 3.2 1 1 2.2 1.3 3 .8.8-.5 1-1.8.5-3.3-1-3-2.5-7.5-4-11.5C14.5 3 13 1 12 1zm0 5c.8 2 2 5.5 3 8.5H9c1-3 2.2-6.5 3-8.5z"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the appropriate icon component for an editor command
|
||||
*/
|
||||
export function getEditorIcon(command: string): IconComponent {
|
||||
// Handle direct CLI commands
|
||||
const cliIcons: Record<string, IconComponent> = {
|
||||
cursor: CursorIcon,
|
||||
code: VSCodeIcon,
|
||||
'code-insiders': VSCodeInsidersIcon,
|
||||
kido: KiroIcon,
|
||||
zed: ZedIcon,
|
||||
subl: SublimeTextIcon,
|
||||
windsurf: WindsurfIcon,
|
||||
trae: TraeIcon,
|
||||
rider: RiderIcon,
|
||||
webstorm: WebStormIcon,
|
||||
xed: XcodeIcon,
|
||||
studio: AndroidStudioIcon,
|
||||
[PRIMARY_ANTIGRAVITY_COMMAND]: AntigravityIcon,
|
||||
[LEGACY_ANTIGRAVITY_COMMAND]: AntigravityIcon,
|
||||
open: FinderIcon,
|
||||
explorer: FolderOpen,
|
||||
'xdg-open': FolderOpen,
|
||||
};
|
||||
|
||||
// Check direct match first
|
||||
if (cliIcons[command]) {
|
||||
return cliIcons[command];
|
||||
}
|
||||
|
||||
// Handle 'open' commands (macOS) - both 'open -a AppName' and 'open "/path/to/App.app"'
|
||||
if (command.startsWith('open')) {
|
||||
const cmdLower = command.toLowerCase();
|
||||
if (cmdLower.includes('cursor')) return CursorIcon;
|
||||
if (cmdLower.includes('visual studio code - insiders')) return VSCodeInsidersIcon;
|
||||
if (cmdLower.includes('visual studio code')) return VSCodeIcon;
|
||||
if (cmdLower.includes('kiro')) return KiroIcon;
|
||||
if (cmdLower.includes('zed')) return ZedIcon;
|
||||
if (cmdLower.includes('sublime')) return SublimeTextIcon;
|
||||
if (cmdLower.includes('windsurf')) return WindsurfIcon;
|
||||
if (cmdLower.includes('trae')) return TraeIcon;
|
||||
if (cmdLower.includes('rider')) return RiderIcon;
|
||||
if (cmdLower.includes('webstorm')) return WebStormIcon;
|
||||
if (cmdLower.includes('xcode')) return XcodeIcon;
|
||||
if (cmdLower.includes('android studio')) return AndroidStudioIcon;
|
||||
if (cmdLower.includes('antigravity')) return AntigravityIcon;
|
||||
// If just 'open' without app name, it's Finder
|
||||
if (command === 'open') return FinderIcon;
|
||||
}
|
||||
|
||||
return FolderOpen;
|
||||
}
|
||||
@@ -0,0 +1,187 @@
|
||||
import { useState, useRef } from 'react';
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
DialogFooter,
|
||||
} from '@/components/ui/dialog';
|
||||
import { Button } from '@/components/ui/button';
|
||||
import { Input } from '@/components/ui/input';
|
||||
import { Label } from '@/components/ui/label';
|
||||
import { Upload, X, ImageIcon } from 'lucide-react';
|
||||
import { useAppStore } from '@/store/app-store';
|
||||
import { getAuthenticatedImageUrl } from '@/lib/api-fetch';
|
||||
import { getHttpApiClient } from '@/lib/http-api-client';
|
||||
import type { Project } from '@/lib/electron';
|
||||
import { IconPicker } from './icon-picker';
|
||||
|
||||
interface EditProjectDialogProps {
|
||||
project: Project;
|
||||
open: boolean;
|
||||
onOpenChange: (open: boolean) => void;
|
||||
}
|
||||
|
||||
export function EditProjectDialog({ project, open, onOpenChange }: EditProjectDialogProps) {
|
||||
const { setProjectName, setProjectIcon, setProjectCustomIcon } = useAppStore();
|
||||
const [name, setName] = useState(project.name);
|
||||
const [icon, setIcon] = useState<string | null>((project as any).icon || null);
|
||||
const [customIconPath, setCustomIconPath] = useState<string | null>(
|
||||
(project as any).customIconPath || null
|
||||
);
|
||||
const [isUploadingIcon, setIsUploadingIcon] = useState(false);
|
||||
const fileInputRef = useRef<HTMLInputElement>(null);
|
||||
|
||||
const handleSave = () => {
|
||||
if (name.trim() !== project.name) {
|
||||
setProjectName(project.id, name.trim());
|
||||
}
|
||||
if (icon !== (project as any).icon) {
|
||||
setProjectIcon(project.id, icon);
|
||||
}
|
||||
if (customIconPath !== (project as any).customIconPath) {
|
||||
setProjectCustomIcon(project.id, customIconPath);
|
||||
}
|
||||
onOpenChange(false);
|
||||
};
|
||||
|
||||
const handleCustomIconUpload = async (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const file = e.target.files?.[0];
|
||||
if (!file) return;
|
||||
|
||||
// Validate file type
|
||||
const validTypes = ['image/jpeg', 'image/png', 'image/gif', 'image/webp'];
|
||||
if (!validTypes.includes(file.type)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Validate file size (max 2MB for icons)
|
||||
if (file.size > 2 * 1024 * 1024) {
|
||||
return;
|
||||
}
|
||||
|
||||
setIsUploadingIcon(true);
|
||||
try {
|
||||
// Convert to base64
|
||||
const reader = new FileReader();
|
||||
reader.onload = async () => {
|
||||
const base64Data = reader.result as string;
|
||||
const result = await getHttpApiClient().saveImageToTemp(
|
||||
base64Data,
|
||||
`project-icon-${file.name}`,
|
||||
file.type,
|
||||
project.path
|
||||
);
|
||||
if (result.success && result.path) {
|
||||
setCustomIconPath(result.path);
|
||||
// Clear the Lucide icon when custom icon is set
|
||||
setIcon(null);
|
||||
}
|
||||
setIsUploadingIcon(false);
|
||||
};
|
||||
reader.readAsDataURL(file);
|
||||
} catch {
|
||||
setIsUploadingIcon(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleRemoveCustomIcon = () => {
|
||||
setCustomIconPath(null);
|
||||
if (fileInputRef.current) {
|
||||
fileInputRef.current.value = '';
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<Dialog open={open} onOpenChange={onOpenChange}>
|
||||
<DialogContent className="sm:max-w-md">
|
||||
<DialogHeader>
|
||||
<DialogTitle>Edit Project</DialogTitle>
|
||||
</DialogHeader>
|
||||
|
||||
<div className="space-y-4 py-4 overflow-y-auto flex-1 min-h-0">
|
||||
{/* Project Name */}
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="project-name">Project Name</Label>
|
||||
<Input
|
||||
id="project-name"
|
||||
value={name}
|
||||
onChange={(e) => setName(e.target.value)}
|
||||
placeholder="Enter project name"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Icon Picker */}
|
||||
<div className="space-y-2">
|
||||
<Label>Project Icon</Label>
|
||||
<p className="text-xs text-muted-foreground mb-2">
|
||||
Choose a preset icon or upload a custom image
|
||||
</p>
|
||||
|
||||
{/* Custom Icon Upload */}
|
||||
<div className="mb-4">
|
||||
<div className="flex items-center gap-3">
|
||||
{customIconPath ? (
|
||||
<div className="relative">
|
||||
<img
|
||||
src={getAuthenticatedImageUrl(customIconPath, project.path)}
|
||||
alt="Custom project icon"
|
||||
className="w-12 h-12 rounded-lg object-cover border border-border"
|
||||
/>
|
||||
<button
|
||||
type="button"
|
||||
onClick={handleRemoveCustomIcon}
|
||||
className="absolute -top-1.5 -right-1.5 w-5 h-5 rounded-full bg-destructive text-destructive-foreground flex items-center justify-center hover:bg-destructive/90"
|
||||
>
|
||||
<X className="w-3 h-3" />
|
||||
</button>
|
||||
</div>
|
||||
) : (
|
||||
<div className="w-12 h-12 rounded-lg border border-dashed border-border flex items-center justify-center bg-accent/30">
|
||||
<ImageIcon className="w-5 h-5 text-muted-foreground" />
|
||||
</div>
|
||||
)}
|
||||
<div className="flex-1">
|
||||
<input
|
||||
ref={fileInputRef}
|
||||
type="file"
|
||||
accept="image/jpeg,image/png,image/gif,image/webp"
|
||||
onChange={handleCustomIconUpload}
|
||||
className="hidden"
|
||||
id="custom-icon-upload-dialog"
|
||||
/>
|
||||
<Button
|
||||
type="button"
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={() => fileInputRef.current?.click()}
|
||||
disabled={isUploadingIcon}
|
||||
className="gap-1.5"
|
||||
>
|
||||
<Upload className="w-3.5 h-3.5" />
|
||||
{isUploadingIcon ? 'Uploading...' : 'Upload Custom Icon'}
|
||||
</Button>
|
||||
<p className="text-xs text-muted-foreground mt-1">
|
||||
PNG, JPG, GIF or WebP. Max 2MB.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Preset Icon Picker - only show if no custom icon */}
|
||||
{!customIconPath && <IconPicker selectedIcon={icon} onSelectIcon={setIcon} />}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<DialogFooter className="flex-shrink-0">
|
||||
<Button variant="outline" onClick={() => onOpenChange(false)}>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button onClick={handleSave} disabled={!name.trim()}>
|
||||
Save Changes
|
||||
</Button>
|
||||
</DialogFooter>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,520 @@
|
||||
import { useState } from 'react';
|
||||
import { X, Search } from 'lucide-react';
|
||||
import * as LucideIcons from 'lucide-react';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { Input } from '@/components/ui/input';
|
||||
import { ScrollArea } from '@/components/ui/scroll-area';
|
||||
|
||||
interface IconPickerProps {
|
||||
selectedIcon: string | null;
|
||||
onSelectIcon: (icon: string | null) => void;
|
||||
}
|
||||
|
||||
// Comprehensive list of project-related icons from Lucide
|
||||
// Organized by category for easier browsing
|
||||
const POPULAR_ICONS = [
|
||||
// Folders & Files
|
||||
'Folder',
|
||||
'FolderOpen',
|
||||
'FolderCode',
|
||||
'FolderGit',
|
||||
'FolderKanban',
|
||||
'FolderTree',
|
||||
'FolderInput',
|
||||
'FolderOutput',
|
||||
'FolderPlus',
|
||||
'File',
|
||||
'FileCode',
|
||||
'FileText',
|
||||
'FileJson',
|
||||
'FileImage',
|
||||
'FileVideo',
|
||||
'FileAudio',
|
||||
'FileSpreadsheet',
|
||||
'Files',
|
||||
'Archive',
|
||||
|
||||
// Code & Development
|
||||
'Code',
|
||||
'Code2',
|
||||
'Braces',
|
||||
'Brackets',
|
||||
'Terminal',
|
||||
'TerminalSquare',
|
||||
'Command',
|
||||
'GitBranch',
|
||||
'GitCommit',
|
||||
'GitMerge',
|
||||
'GitPullRequest',
|
||||
'GitCompare',
|
||||
'GitFork',
|
||||
'GitHub',
|
||||
'Gitlab',
|
||||
'Bitbucket',
|
||||
'Vscode',
|
||||
|
||||
// Packages & Containers
|
||||
'Package',
|
||||
'PackageSearch',
|
||||
'PackageCheck',
|
||||
'PackageX',
|
||||
'Box',
|
||||
'Boxes',
|
||||
'Container',
|
||||
|
||||
// UI & Design
|
||||
'Layout',
|
||||
'LayoutGrid',
|
||||
'LayoutList',
|
||||
'LayoutDashboard',
|
||||
'LayoutTemplate',
|
||||
'Layers',
|
||||
'Layers2',
|
||||
'Layers3',
|
||||
'Blocks',
|
||||
'Component',
|
||||
'Palette',
|
||||
'Paintbrush',
|
||||
'Brush',
|
||||
'PenTool',
|
||||
'Ruler',
|
||||
'Grid',
|
||||
'Grid3x3',
|
||||
'Square',
|
||||
'RectangleHorizontal',
|
||||
'RectangleVertical',
|
||||
'Circle',
|
||||
|
||||
// Tools & Settings
|
||||
'Cog',
|
||||
'Settings',
|
||||
'Settings2',
|
||||
'Wrench',
|
||||
'Hammer',
|
||||
'Screwdriver',
|
||||
'WrenchIcon',
|
||||
'Tool',
|
||||
'ScrewdriverWrench',
|
||||
'Sliders',
|
||||
'SlidersHorizontal',
|
||||
'Filter',
|
||||
'FilterX',
|
||||
|
||||
// Technology & Infrastructure
|
||||
'Server',
|
||||
'ServerCrash',
|
||||
'ServerCog',
|
||||
'Database',
|
||||
'DatabaseBackup',
|
||||
'CloudUpload',
|
||||
'CloudDownload',
|
||||
'CloudOff',
|
||||
'Globe',
|
||||
'Globe2',
|
||||
'Network',
|
||||
'Wifi',
|
||||
'WifiOff',
|
||||
'Router',
|
||||
'Cpu',
|
||||
'MemoryStick',
|
||||
'HardDrive',
|
||||
'HardDriveIcon',
|
||||
'CircuitBoard',
|
||||
'Microchip',
|
||||
'Monitor',
|
||||
'MonitorSpeaker',
|
||||
'Laptop',
|
||||
'Smartphone',
|
||||
'Tablet',
|
||||
'Mouse',
|
||||
'Keyboard',
|
||||
'Headphones',
|
||||
'Printer',
|
||||
'Scanner',
|
||||
|
||||
// Workflow & Process
|
||||
'Workflow',
|
||||
'Zap',
|
||||
'Rocket',
|
||||
'Flame',
|
||||
'Lightning',
|
||||
'Bolt',
|
||||
'Target',
|
||||
'Flag',
|
||||
'FlagTriangleRight',
|
||||
'CheckCircle',
|
||||
'CheckCircle2',
|
||||
'XCircle',
|
||||
'AlertCircle',
|
||||
'Info',
|
||||
'HelpCircle',
|
||||
'Clock',
|
||||
'Timer',
|
||||
'Stopwatch',
|
||||
'Calendar',
|
||||
'CalendarDays',
|
||||
'CalendarCheck',
|
||||
'CalendarClock',
|
||||
|
||||
// Security & Access
|
||||
'Shield',
|
||||
'ShieldCheck',
|
||||
'ShieldAlert',
|
||||
'ShieldOff',
|
||||
'Lock',
|
||||
'Unlock',
|
||||
'Key',
|
||||
'KeyRound',
|
||||
'Eye',
|
||||
'EyeOff',
|
||||
'User',
|
||||
'Users',
|
||||
'UserCheck',
|
||||
'UserX',
|
||||
'UserPlus',
|
||||
'UserCog',
|
||||
|
||||
// Business & Finance
|
||||
'Briefcase',
|
||||
'Building',
|
||||
'Building2',
|
||||
'Store',
|
||||
'ShoppingCart',
|
||||
'ShoppingBag',
|
||||
'CreditCard',
|
||||
'Wallet',
|
||||
'DollarSign',
|
||||
'Euro',
|
||||
'PoundSterling',
|
||||
'Yen',
|
||||
'Coins',
|
||||
'Receipt',
|
||||
'ChartBar',
|
||||
'ChartLine',
|
||||
'ChartPie',
|
||||
'TrendingUp',
|
||||
'TrendingDown',
|
||||
'Activity',
|
||||
'BarChart',
|
||||
'LineChart',
|
||||
'PieChart',
|
||||
|
||||
// Communication & Media
|
||||
'MessageSquare',
|
||||
'MessageCircle',
|
||||
'Mail',
|
||||
'MailOpen',
|
||||
'Send',
|
||||
'Inbox',
|
||||
'Phone',
|
||||
'PhoneCall',
|
||||
'Video',
|
||||
'VideoOff',
|
||||
'Camera',
|
||||
'CameraOff',
|
||||
'Image',
|
||||
'ImageIcon',
|
||||
'Film',
|
||||
'Music',
|
||||
'Mic',
|
||||
'MicOff',
|
||||
'Volume',
|
||||
'Volume2',
|
||||
'VolumeX',
|
||||
'Radio',
|
||||
'Podcast',
|
||||
|
||||
// Social & Community
|
||||
'Heart',
|
||||
'HeartHandshake',
|
||||
'Star',
|
||||
'StarOff',
|
||||
'ThumbsUp',
|
||||
'ThumbsDown',
|
||||
'Share',
|
||||
'Share2',
|
||||
'Link',
|
||||
'Link2',
|
||||
'ExternalLink',
|
||||
'AtSign',
|
||||
'Hash',
|
||||
'Hashtag',
|
||||
'Tag',
|
||||
'Tags',
|
||||
|
||||
// Navigation & Location
|
||||
'Compass',
|
||||
'Map',
|
||||
'MapPin',
|
||||
'Navigation',
|
||||
'Navigation2',
|
||||
'Route',
|
||||
'Plane',
|
||||
'Car',
|
||||
'Bike',
|
||||
'Ship',
|
||||
'Train',
|
||||
'Bus',
|
||||
|
||||
// Science & Education
|
||||
'FlaskConical',
|
||||
'FlaskRound',
|
||||
'Beaker',
|
||||
'TestTube',
|
||||
'TestTube2',
|
||||
'Microscope',
|
||||
'Atom',
|
||||
'Brain',
|
||||
'GraduationCap',
|
||||
'Book',
|
||||
'BookOpen',
|
||||
'BookMarked',
|
||||
'Library',
|
||||
'School',
|
||||
'University',
|
||||
|
||||
// Food & Health
|
||||
'Coffee',
|
||||
'Utensils',
|
||||
'UtensilsCrossed',
|
||||
'Apple',
|
||||
'Cherry',
|
||||
'Cookie',
|
||||
'Cake',
|
||||
'Pizza',
|
||||
'Beer',
|
||||
'Wine',
|
||||
'HeartPulse',
|
||||
'Dumbbell',
|
||||
'Running',
|
||||
|
||||
// Nature & Weather
|
||||
'Tree',
|
||||
'TreePine',
|
||||
'Leaf',
|
||||
'Flower',
|
||||
'Flower2',
|
||||
'Sun',
|
||||
'Moon',
|
||||
'CloudRain',
|
||||
'CloudSnow',
|
||||
'CloudLightning',
|
||||
'Droplet',
|
||||
'Wind',
|
||||
'Snowflake',
|
||||
'Umbrella',
|
||||
|
||||
// Objects & Symbols
|
||||
'Puzzle',
|
||||
'PuzzleIcon',
|
||||
'Gamepad',
|
||||
'Gamepad2',
|
||||
'Dice',
|
||||
'Dice1',
|
||||
'Dice6',
|
||||
'Gem',
|
||||
'Crown',
|
||||
'Trophy',
|
||||
'Medal',
|
||||
'Award',
|
||||
'Gift',
|
||||
'GiftIcon',
|
||||
'Bell',
|
||||
'BellOff',
|
||||
'BellRing',
|
||||
'Home',
|
||||
'House',
|
||||
'DoorOpen',
|
||||
'DoorClosed',
|
||||
'Window',
|
||||
'Lightbulb',
|
||||
'LightbulbOff',
|
||||
'Candle',
|
||||
'Flashlight',
|
||||
'FlashlightOff',
|
||||
'Battery',
|
||||
'BatteryFull',
|
||||
'BatteryLow',
|
||||
'BatteryCharging',
|
||||
'Plug',
|
||||
'PlugZap',
|
||||
'Power',
|
||||
'PowerOff',
|
||||
|
||||
// Arrows & Directions
|
||||
'ArrowRight',
|
||||
'ArrowLeft',
|
||||
'ArrowUp',
|
||||
'ArrowDown',
|
||||
'ArrowUpRight',
|
||||
'ArrowDownRight',
|
||||
'ArrowDownLeft',
|
||||
'ArrowUpLeft',
|
||||
'ChevronRight',
|
||||
'ChevronLeft',
|
||||
'ChevronUp',
|
||||
'ChevronDown',
|
||||
'Move',
|
||||
'MoveUp',
|
||||
'MoveDown',
|
||||
'MoveLeft',
|
||||
'MoveRight',
|
||||
'RotateCw',
|
||||
'RotateCcw',
|
||||
'RefreshCw',
|
||||
'RefreshCcw',
|
||||
|
||||
// Shapes & Symbols
|
||||
'Diamond',
|
||||
'Pentagon',
|
||||
'Cross',
|
||||
'Plus',
|
||||
'Minus',
|
||||
'X',
|
||||
'Check',
|
||||
'Divide',
|
||||
'Equal',
|
||||
'Infinity',
|
||||
'Percent',
|
||||
|
||||
// Miscellaneous
|
||||
'Bot',
|
||||
'Wand',
|
||||
'Wand2',
|
||||
'Magic',
|
||||
'Stars',
|
||||
'Comet',
|
||||
'Satellite',
|
||||
'SatelliteDish',
|
||||
'Radar',
|
||||
'RadarIcon',
|
||||
'Scan',
|
||||
'ScanLine',
|
||||
'QrCode',
|
||||
'Barcode',
|
||||
'ScanSearch',
|
||||
'Search',
|
||||
'SearchX',
|
||||
'ZoomIn',
|
||||
'ZoomOut',
|
||||
'Maximize',
|
||||
'Minimize',
|
||||
'Maximize2',
|
||||
'Minimize2',
|
||||
'Expand',
|
||||
'Shrink',
|
||||
'Copy',
|
||||
'CopyCheck',
|
||||
'Clipboard',
|
||||
'ClipboardCheck',
|
||||
'ClipboardCopy',
|
||||
'ClipboardList',
|
||||
'ClipboardPaste',
|
||||
'Scissors',
|
||||
'Cut',
|
||||
'FileEdit',
|
||||
'Pen',
|
||||
'Pencil',
|
||||
'Eraser',
|
||||
'Trash',
|
||||
'Trash2',
|
||||
'Delete',
|
||||
'ArchiveRestore',
|
||||
'Download',
|
||||
'Upload',
|
||||
'Save',
|
||||
'SaveAll',
|
||||
'FilePlus',
|
||||
'FileMinus',
|
||||
'FileX',
|
||||
'FileCheck',
|
||||
'FileQuestion',
|
||||
'FileWarning',
|
||||
'FileSearch',
|
||||
'FolderSearch',
|
||||
'FolderX',
|
||||
'FolderCheck',
|
||||
'FolderMinus',
|
||||
'FolderSync',
|
||||
'FolderUp',
|
||||
'FolderDown',
|
||||
];
|
||||
|
||||
export function IconPicker({ selectedIcon, onSelectIcon }: IconPickerProps) {
|
||||
const [search, setSearch] = useState('');
|
||||
|
||||
const filteredIcons = POPULAR_ICONS.filter((icon) =>
|
||||
icon.toLowerCase().includes(search.toLowerCase())
|
||||
);
|
||||
|
||||
const getIconComponent = (iconName: string) => {
|
||||
return (LucideIcons as Record<string, React.ComponentType<{ className?: string }>>)[iconName];
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="space-y-3">
|
||||
{/* Search */}
|
||||
<div className="relative">
|
||||
<Search className="absolute left-3 top-1/2 -translate-y-1/2 w-4 h-4 text-muted-foreground" />
|
||||
<Input
|
||||
value={search}
|
||||
onChange={(e) => setSearch(e.target.value)}
|
||||
placeholder="Search icons..."
|
||||
className="pl-9"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Selected Icon Display */}
|
||||
{selectedIcon && (
|
||||
<div className="flex items-center gap-2 p-2 rounded-md bg-accent/50 border border-border">
|
||||
<div className="flex items-center gap-2 flex-1">
|
||||
{(() => {
|
||||
const IconComponent = getIconComponent(selectedIcon);
|
||||
return IconComponent ? <IconComponent className="w-5 h-5 text-brand-500" /> : null;
|
||||
})()}
|
||||
<span className="text-sm font-medium">{selectedIcon}</span>
|
||||
</div>
|
||||
<button
|
||||
onClick={() => onSelectIcon(null)}
|
||||
className="p-1 hover:bg-background rounded transition-colors"
|
||||
title="Clear icon"
|
||||
>
|
||||
<X className="w-4 h-4" />
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Icons Grid */}
|
||||
<ScrollArea className="h-96 rounded-md border">
|
||||
<div className="grid grid-cols-6 gap-1 p-2">
|
||||
{filteredIcons.map((iconName) => {
|
||||
const IconComponent = getIconComponent(iconName);
|
||||
if (!IconComponent) return null;
|
||||
|
||||
const isSelected = selectedIcon === iconName;
|
||||
|
||||
return (
|
||||
<button
|
||||
key={iconName}
|
||||
onClick={() => onSelectIcon(iconName)}
|
||||
className={cn(
|
||||
'aspect-square rounded-md flex items-center justify-center',
|
||||
'transition-all duration-150',
|
||||
'hover:bg-accent hover:scale-110',
|
||||
isSelected
|
||||
? 'bg-brand-500/20 border-2 border-brand-500'
|
||||
: 'border border-transparent'
|
||||
)}
|
||||
title={iconName}
|
||||
>
|
||||
<IconComponent
|
||||
className={cn('w-5 h-5', isSelected ? 'text-brand-500' : 'text-foreground')}
|
||||
/>
|
||||
</button>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</ScrollArea>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
export { ProjectSwitcherItem } from './project-switcher-item';
|
||||
export { ProjectContextMenu } from './project-context-menu';
|
||||
export { EditProjectDialog } from './edit-project-dialog';
|
||||
export { IconPicker } from './icon-picker';
|
||||
@@ -0,0 +1,333 @@
|
||||
import { useEffect, useRef, useState, memo } from 'react';
|
||||
import type { LucideIcon } from 'lucide-react';
|
||||
import { Edit2, Trash2, Palette, ChevronRight, Moon, Sun, Monitor } from 'lucide-react';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { type ThemeMode, useAppStore } from '@/store/app-store';
|
||||
import { ConfirmDialog } from '@/components/ui/confirm-dialog';
|
||||
import type { Project } from '@/lib/electron';
|
||||
import { PROJECT_DARK_THEMES, PROJECT_LIGHT_THEMES } from '@/components/layout/sidebar/constants';
|
||||
import { useThemePreview } from '@/components/layout/sidebar/hooks';
|
||||
|
||||
// Constants for z-index values
|
||||
const Z_INDEX = {
|
||||
CONTEXT_MENU: 100,
|
||||
THEME_SUBMENU: 101,
|
||||
} as const;
|
||||
|
||||
// Theme option type - using ThemeMode for type safety
|
||||
interface ThemeOption {
|
||||
value: ThemeMode;
|
||||
label: string;
|
||||
icon: LucideIcon;
|
||||
color: string;
|
||||
}
|
||||
|
||||
// Reusable theme button component to avoid duplication (DRY principle)
|
||||
interface ThemeButtonProps {
|
||||
option: ThemeOption;
|
||||
isSelected: boolean;
|
||||
onPointerEnter: () => void;
|
||||
onPointerLeave: (e: React.PointerEvent) => void;
|
||||
onClick: () => void;
|
||||
}
|
||||
|
||||
const ThemeButton = memo(function ThemeButton({
|
||||
option,
|
||||
isSelected,
|
||||
onPointerEnter,
|
||||
onPointerLeave,
|
||||
onClick,
|
||||
}: ThemeButtonProps) {
|
||||
const Icon = option.icon;
|
||||
return (
|
||||
<button
|
||||
onPointerEnter={onPointerEnter}
|
||||
onPointerLeave={onPointerLeave}
|
||||
onClick={onClick}
|
||||
className={cn(
|
||||
'w-full flex items-center gap-1.5 px-2 py-1.5 rounded-md',
|
||||
'text-xs text-left',
|
||||
'hover:bg-accent transition-colors',
|
||||
'focus:outline-none focus:bg-accent',
|
||||
isSelected && 'bg-accent'
|
||||
)}
|
||||
data-testid={`project-theme-${option.value}`}
|
||||
>
|
||||
<Icon className="w-3.5 h-3.5" style={{ color: option.color }} />
|
||||
<span>{option.label}</span>
|
||||
</button>
|
||||
);
|
||||
});
|
||||
|
||||
// Reusable theme column component
|
||||
interface ThemeColumnProps {
|
||||
title: string;
|
||||
icon: LucideIcon;
|
||||
themes: ThemeOption[];
|
||||
selectedTheme: ThemeMode | null;
|
||||
onPreviewEnter: (value: ThemeMode) => void;
|
||||
onPreviewLeave: (e: React.PointerEvent) => void;
|
||||
onSelect: (value: ThemeMode) => void;
|
||||
}
|
||||
|
||||
const ThemeColumn = memo(function ThemeColumn({
|
||||
title,
|
||||
icon: Icon,
|
||||
themes,
|
||||
selectedTheme,
|
||||
onPreviewEnter,
|
||||
onPreviewLeave,
|
||||
onSelect,
|
||||
}: ThemeColumnProps) {
|
||||
return (
|
||||
<div className="flex-1">
|
||||
<div className="flex items-center gap-1.5 px-2 py-1.5 text-xs font-medium text-muted-foreground">
|
||||
<Icon className="w-3 h-3" />
|
||||
{title}
|
||||
</div>
|
||||
<div className="space-y-0.5">
|
||||
{themes.map((option) => (
|
||||
<ThemeButton
|
||||
key={option.value}
|
||||
option={option}
|
||||
isSelected={selectedTheme === option.value}
|
||||
onPointerEnter={() => onPreviewEnter(option.value)}
|
||||
onPointerLeave={onPreviewLeave}
|
||||
onClick={() => onSelect(option.value)}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
});
|
||||
|
||||
interface ProjectContextMenuProps {
|
||||
project: Project;
|
||||
position: { x: number; y: number };
|
||||
onClose: () => void;
|
||||
onEdit: (project: Project) => void;
|
||||
}
|
||||
|
||||
export function ProjectContextMenu({
|
||||
project,
|
||||
position,
|
||||
onClose,
|
||||
onEdit,
|
||||
}: ProjectContextMenuProps) {
|
||||
const menuRef = useRef<HTMLDivElement>(null);
|
||||
const {
|
||||
moveProjectToTrash,
|
||||
theme: globalTheme,
|
||||
setTheme,
|
||||
setProjectTheme,
|
||||
setPreviewTheme,
|
||||
} = useAppStore();
|
||||
const [showRemoveDialog, setShowRemoveDialog] = useState(false);
|
||||
const [showThemeSubmenu, setShowThemeSubmenu] = useState(false);
|
||||
const themeSubmenuRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
const { handlePreviewEnter, handlePreviewLeave } = useThemePreview({ setPreviewTheme });
|
||||
|
||||
useEffect(() => {
|
||||
const handleClickOutside = (event: MouseEvent) => {
|
||||
if (menuRef.current && !menuRef.current.contains(event.target as Node)) {
|
||||
setPreviewTheme(null);
|
||||
onClose();
|
||||
}
|
||||
};
|
||||
|
||||
const handleEscape = (event: KeyboardEvent) => {
|
||||
if (event.key === 'Escape') {
|
||||
setPreviewTheme(null);
|
||||
onClose();
|
||||
}
|
||||
};
|
||||
|
||||
document.addEventListener('mousedown', handleClickOutside);
|
||||
document.addEventListener('keydown', handleEscape);
|
||||
|
||||
return () => {
|
||||
document.removeEventListener('mousedown', handleClickOutside);
|
||||
document.removeEventListener('keydown', handleEscape);
|
||||
};
|
||||
}, [onClose, setPreviewTheme]);
|
||||
|
||||
const handleEdit = () => {
|
||||
onEdit(project);
|
||||
};
|
||||
|
||||
const handleRemove = () => {
|
||||
setShowRemoveDialog(true);
|
||||
};
|
||||
|
||||
const handleThemeSelect = (value: ThemeMode | '') => {
|
||||
setPreviewTheme(null);
|
||||
if (value !== '') {
|
||||
setTheme(value);
|
||||
} else {
|
||||
setTheme(globalTheme);
|
||||
}
|
||||
setProjectTheme(project.id, value === '' ? null : value);
|
||||
setShowThemeSubmenu(false);
|
||||
};
|
||||
|
||||
const handleConfirmRemove = () => {
|
||||
moveProjectToTrash(project.id);
|
||||
onClose();
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<div
|
||||
ref={menuRef}
|
||||
className={cn(
|
||||
'fixed min-w-48 rounded-lg',
|
||||
'bg-popover text-popover-foreground',
|
||||
'border border-border shadow-lg',
|
||||
'animate-in fade-in zoom-in-95 duration-100'
|
||||
)}
|
||||
style={{
|
||||
top: position.y,
|
||||
left: position.x,
|
||||
zIndex: Z_INDEX.CONTEXT_MENU,
|
||||
}}
|
||||
data-testid="project-context-menu"
|
||||
>
|
||||
<div className="p-1">
|
||||
<button
|
||||
onClick={handleEdit}
|
||||
className={cn(
|
||||
'w-full flex items-center gap-2 px-3 py-2 rounded-md',
|
||||
'text-sm font-medium text-left',
|
||||
'hover:bg-accent transition-colors',
|
||||
'focus:outline-none focus:bg-accent'
|
||||
)}
|
||||
data-testid="edit-project-button"
|
||||
>
|
||||
<Edit2 className="w-4 h-4" />
|
||||
<span>Edit Name & Icon</span>
|
||||
</button>
|
||||
|
||||
{/* Theme Submenu Trigger */}
|
||||
<div
|
||||
className="relative"
|
||||
onMouseEnter={() => setShowThemeSubmenu(true)}
|
||||
onMouseLeave={() => {
|
||||
setShowThemeSubmenu(false);
|
||||
setPreviewTheme(null);
|
||||
}}
|
||||
>
|
||||
<button
|
||||
onClick={() => setShowThemeSubmenu(!showThemeSubmenu)}
|
||||
className={cn(
|
||||
'w-full flex items-center gap-2 px-3 py-2 rounded-md',
|
||||
'text-sm font-medium text-left',
|
||||
'hover:bg-accent transition-colors',
|
||||
'focus:outline-none focus:bg-accent'
|
||||
)}
|
||||
data-testid="theme-project-button"
|
||||
>
|
||||
<Palette className="w-4 h-4" />
|
||||
<span className="flex-1">Project Theme</span>
|
||||
{project.theme && (
|
||||
<span className="text-[10px] text-muted-foreground capitalize">
|
||||
{project.theme}
|
||||
</span>
|
||||
)}
|
||||
<ChevronRight className="w-4 h-4 text-muted-foreground" />
|
||||
</button>
|
||||
|
||||
{/* Theme Submenu */}
|
||||
{showThemeSubmenu && (
|
||||
<div
|
||||
ref={themeSubmenuRef}
|
||||
className={cn(
|
||||
'absolute left-full top-0 ml-1 min-w-[420px] rounded-lg',
|
||||
'bg-popover text-popover-foreground',
|
||||
'border border-border shadow-lg',
|
||||
'animate-in fade-in zoom-in-95 duration-100'
|
||||
)}
|
||||
style={{ zIndex: Z_INDEX.THEME_SUBMENU }}
|
||||
data-testid="project-theme-submenu"
|
||||
>
|
||||
<div className="p-2">
|
||||
{/* Use Global Option */}
|
||||
<button
|
||||
onPointerEnter={() => handlePreviewEnter(globalTheme)}
|
||||
onPointerLeave={handlePreviewLeave}
|
||||
onClick={() => handleThemeSelect('')}
|
||||
className={cn(
|
||||
'w-full flex items-center gap-2 px-3 py-2 rounded-md',
|
||||
'text-sm font-medium text-left',
|
||||
'hover:bg-accent transition-colors',
|
||||
'focus:outline-none focus:bg-accent',
|
||||
!project.theme && 'bg-accent'
|
||||
)}
|
||||
data-testid="project-theme-global"
|
||||
>
|
||||
<Monitor className="w-4 h-4" />
|
||||
<span>Use Global</span>
|
||||
<span className="text-[10px] text-muted-foreground ml-1 capitalize">
|
||||
({globalTheme})
|
||||
</span>
|
||||
</button>
|
||||
|
||||
<div className="h-px bg-border my-2" />
|
||||
|
||||
{/* Two Column Layout - Using reusable ThemeColumn component */}
|
||||
<div className="flex gap-2">
|
||||
<ThemeColumn
|
||||
title="Dark"
|
||||
icon={Moon}
|
||||
themes={PROJECT_DARK_THEMES as ThemeOption[]}
|
||||
selectedTheme={project.theme as ThemeMode | null}
|
||||
onPreviewEnter={handlePreviewEnter}
|
||||
onPreviewLeave={handlePreviewLeave}
|
||||
onSelect={handleThemeSelect}
|
||||
/>
|
||||
<ThemeColumn
|
||||
title="Light"
|
||||
icon={Sun}
|
||||
themes={PROJECT_LIGHT_THEMES as ThemeOption[]}
|
||||
selectedTheme={project.theme as ThemeMode | null}
|
||||
onPreviewEnter={handlePreviewEnter}
|
||||
onPreviewLeave={handlePreviewLeave}
|
||||
onSelect={handleThemeSelect}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<button
|
||||
onClick={handleRemove}
|
||||
className={cn(
|
||||
'w-full flex items-center gap-2 px-3 py-2 rounded-md',
|
||||
'text-sm font-medium text-left',
|
||||
'text-destructive hover:bg-destructive/10',
|
||||
'transition-colors',
|
||||
'focus:outline-none focus:bg-destructive/10'
|
||||
)}
|
||||
data-testid="remove-project-button"
|
||||
>
|
||||
<Trash2 className="w-4 h-4" />
|
||||
<span>Remove Project</span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<ConfirmDialog
|
||||
open={showRemoveDialog}
|
||||
onOpenChange={setShowRemoveDialog}
|
||||
onConfirm={handleConfirmRemove}
|
||||
title="Remove Project"
|
||||
description={`Are you sure you want to remove "${project.name}" from the project list? This won't delete any files on disk.`}
|
||||
icon={Trash2}
|
||||
iconClassName="text-destructive"
|
||||
confirmText="Remove"
|
||||
confirmVariant="destructive"
|
||||
/>
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,116 @@
|
||||
import { Folder, LucideIcon } from 'lucide-react';
|
||||
import * as LucideIcons from 'lucide-react';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { getAuthenticatedImageUrl } from '@/lib/api-fetch';
|
||||
import type { Project } from '@/lib/electron';
|
||||
|
||||
interface ProjectSwitcherItemProps {
|
||||
project: Project;
|
||||
isActive: boolean;
|
||||
hotkeyIndex?: number; // 0-9 for hotkeys 1-9, 0
|
||||
onClick: () => void;
|
||||
onContextMenu: (event: React.MouseEvent) => void;
|
||||
}
|
||||
|
||||
export function ProjectSwitcherItem({
|
||||
project,
|
||||
isActive,
|
||||
hotkeyIndex,
|
||||
onClick,
|
||||
onContextMenu,
|
||||
}: ProjectSwitcherItemProps) {
|
||||
// Convert index to hotkey label: 0 -> "1", 1 -> "2", ..., 8 -> "9", 9 -> "0"
|
||||
const hotkeyLabel =
|
||||
hotkeyIndex !== undefined && hotkeyIndex >= 0 && hotkeyIndex <= 9
|
||||
? hotkeyIndex === 9
|
||||
? '0'
|
||||
: String(hotkeyIndex + 1)
|
||||
: undefined;
|
||||
// Get the icon component from lucide-react
|
||||
const getIconComponent = (): LucideIcon => {
|
||||
if (project.icon && project.icon in LucideIcons) {
|
||||
return (LucideIcons as Record<string, LucideIcon>)[project.icon];
|
||||
}
|
||||
return Folder;
|
||||
};
|
||||
|
||||
const IconComponent = getIconComponent();
|
||||
const hasCustomIcon = !!project.customIconPath;
|
||||
|
||||
return (
|
||||
<button
|
||||
onClick={onClick}
|
||||
onContextMenu={onContextMenu}
|
||||
className={cn(
|
||||
'group w-full aspect-square rounded-xl flex items-center justify-center relative overflow-hidden',
|
||||
'transition-all duration-200 ease-out',
|
||||
isActive
|
||||
? [
|
||||
// Active: Premium gradient with glow
|
||||
'bg-gradient-to-r from-brand-500/20 via-brand-500/15 to-brand-600/10',
|
||||
'border border-brand-500/30',
|
||||
'shadow-md shadow-brand-500/10',
|
||||
]
|
||||
: [
|
||||
// Inactive: Subtle hover state
|
||||
'hover:bg-accent/50',
|
||||
'border border-transparent hover:border-border/40',
|
||||
'hover:shadow-sm',
|
||||
],
|
||||
'hover:scale-105 active:scale-95'
|
||||
)}
|
||||
title={project.name}
|
||||
data-testid={`project-switcher-${project.id}`}
|
||||
>
|
||||
{hasCustomIcon ? (
|
||||
<img
|
||||
src={getAuthenticatedImageUrl(project.customIconPath!, project.path)}
|
||||
alt={project.name}
|
||||
className={cn(
|
||||
'w-8 h-8 rounded-lg object-cover transition-all duration-200',
|
||||
isActive ? 'ring-1 ring-brand-500/50' : 'group-hover:scale-110'
|
||||
)}
|
||||
/>
|
||||
) : (
|
||||
<IconComponent
|
||||
className={cn(
|
||||
'w-6 h-6 transition-all duration-200',
|
||||
isActive
|
||||
? 'text-brand-500 drop-shadow-sm'
|
||||
: 'text-muted-foreground group-hover:text-brand-400 group-hover:scale-110'
|
||||
)}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Tooltip on hover */}
|
||||
<span
|
||||
className={cn(
|
||||
'absolute left-full ml-3 px-2.5 py-1.5 rounded-lg',
|
||||
'bg-popover text-popover-foreground text-xs font-medium',
|
||||
'border border-border shadow-lg',
|
||||
'opacity-0 group-hover:opacity-100',
|
||||
'transition-all duration-200 whitespace-nowrap z-50',
|
||||
'translate-x-1 group-hover:translate-x-0 pointer-events-none'
|
||||
)}
|
||||
>
|
||||
{project.name}
|
||||
</span>
|
||||
|
||||
{/* Hotkey badge */}
|
||||
{hotkeyLabel && (
|
||||
<span
|
||||
className={cn(
|
||||
'absolute bottom-0.5 right-0.5 min-w-[16px] h-4 px-1',
|
||||
'flex items-center justify-center',
|
||||
'text-[10px] font-medium rounded',
|
||||
'bg-muted/80 text-muted-foreground',
|
||||
'border border-border/50',
|
||||
'pointer-events-none'
|
||||
)}
|
||||
>
|
||||
{hotkeyLabel}
|
||||
</span>
|
||||
)}
|
||||
</button>
|
||||
);
|
||||
}
|
||||
1
apps/ui/src/components/layout/project-switcher/index.ts
Normal file
1
apps/ui/src/components/layout/project-switcher/index.ts
Normal file
@@ -0,0 +1 @@
|
||||
export { ProjectSwitcher } from './project-switcher';
|
||||
@@ -0,0 +1,486 @@
|
||||
import { useState, useCallback, useEffect } from 'react';
|
||||
import { Plus, Bug, FolderOpen } from 'lucide-react';
|
||||
import { useNavigate } from '@tanstack/react-router';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { useAppStore, type ThemeMode } from '@/store/app-store';
|
||||
import { useOSDetection } from '@/hooks/use-os-detection';
|
||||
import { ProjectSwitcherItem } from './components/project-switcher-item';
|
||||
import { ProjectContextMenu } from './components/project-context-menu';
|
||||
import { EditProjectDialog } from './components/edit-project-dialog';
|
||||
import { NewProjectModal } from '@/components/dialogs/new-project-modal';
|
||||
import { OnboardingDialog } from '@/components/layout/sidebar/dialogs';
|
||||
import { useProjectCreation, useProjectTheme } from '@/components/layout/sidebar/hooks';
|
||||
import type { Project } from '@/lib/electron';
|
||||
import { getElectronAPI } from '@/lib/electron';
|
||||
import { initializeProject, hasAppSpec, hasAutomakerDir } from '@/lib/project-init';
|
||||
import { toast } from 'sonner';
|
||||
import { CreateSpecDialog } from '@/components/views/spec-view/dialogs';
|
||||
|
||||
function getOSAbbreviation(os: string): string {
|
||||
switch (os) {
|
||||
case 'mac':
|
||||
return 'M';
|
||||
case 'windows':
|
||||
return 'W';
|
||||
case 'linux':
|
||||
return 'L';
|
||||
default:
|
||||
return '?';
|
||||
}
|
||||
}
|
||||
|
||||
export function ProjectSwitcher() {
|
||||
const navigate = useNavigate();
|
||||
const {
|
||||
projects,
|
||||
currentProject,
|
||||
setCurrentProject,
|
||||
trashedProjects,
|
||||
upsertAndSetCurrentProject,
|
||||
specCreatingForProject,
|
||||
setSpecCreatingForProject,
|
||||
} = useAppStore();
|
||||
const [contextMenuProject, setContextMenuProject] = useState<Project | null>(null);
|
||||
const [contextMenuPosition, setContextMenuPosition] = useState<{ x: number; y: number } | null>(
|
||||
null
|
||||
);
|
||||
const [editDialogProject, setEditDialogProject] = useState<Project | null>(null);
|
||||
|
||||
// Setup dialog state for opening existing projects
|
||||
const [showSetupDialog, setShowSetupDialog] = useState(false);
|
||||
const [setupProjectPath, setSetupProjectPath] = useState<string | null>(null);
|
||||
const [projectOverview, setProjectOverview] = useState('');
|
||||
const [generateFeatures, setGenerateFeatures] = useState(true);
|
||||
const [analyzeProject, setAnalyzeProject] = useState(true);
|
||||
const [featureCount, setFeatureCount] = useState(5);
|
||||
|
||||
// Derive isCreatingSpec from store state
|
||||
const isCreatingSpec = specCreatingForProject !== null;
|
||||
|
||||
// Version info
|
||||
const appVersion = typeof __APP_VERSION__ !== 'undefined' ? __APP_VERSION__ : '0.0.0';
|
||||
const { os } = useOSDetection();
|
||||
const appMode = import.meta.env.VITE_APP_MODE || '?';
|
||||
const versionSuffix = `${getOSAbbreviation(os)}${appMode}`;
|
||||
|
||||
// Get global theme for project creation
|
||||
const { globalTheme } = useProjectTheme();
|
||||
|
||||
// Project creation state and handlers
|
||||
const {
|
||||
showNewProjectModal,
|
||||
setShowNewProjectModal,
|
||||
isCreatingProject,
|
||||
showOnboardingDialog,
|
||||
setShowOnboardingDialog,
|
||||
newProjectName,
|
||||
handleCreateBlankProject,
|
||||
handleCreateFromTemplate,
|
||||
handleCreateFromCustomUrl,
|
||||
} = useProjectCreation({
|
||||
trashedProjects,
|
||||
currentProject,
|
||||
globalTheme,
|
||||
upsertAndSetCurrentProject,
|
||||
});
|
||||
|
||||
const handleContextMenu = (project: Project, event: React.MouseEvent) => {
|
||||
event.preventDefault();
|
||||
setContextMenuProject(project);
|
||||
setContextMenuPosition({ x: event.clientX, y: event.clientY });
|
||||
};
|
||||
|
||||
const handleCloseContextMenu = () => {
|
||||
setContextMenuProject(null);
|
||||
setContextMenuPosition(null);
|
||||
};
|
||||
|
||||
const handleEditProject = (project: Project) => {
|
||||
setEditDialogProject(project);
|
||||
handleCloseContextMenu();
|
||||
};
|
||||
|
||||
const handleProjectClick = useCallback(
|
||||
(project: Project) => {
|
||||
setCurrentProject(project);
|
||||
// Navigate to board view when switching projects
|
||||
navigate({ to: '/board' });
|
||||
},
|
||||
[setCurrentProject, navigate]
|
||||
);
|
||||
|
||||
const handleNewProject = () => {
|
||||
// Open the new project modal
|
||||
setShowNewProjectModal(true);
|
||||
};
|
||||
|
||||
const handleOnboardingSkip = () => {
|
||||
setShowOnboardingDialog(false);
|
||||
navigate({ to: '/board' });
|
||||
};
|
||||
|
||||
const handleBugReportClick = useCallback(() => {
|
||||
const api = getElectronAPI();
|
||||
api.openExternalLink('https://github.com/AutoMaker-Org/automaker/issues');
|
||||
}, []);
|
||||
|
||||
/**
|
||||
* Opens the system folder selection dialog and initializes the selected project.
|
||||
*/
|
||||
const handleOpenFolder = useCallback(async () => {
|
||||
const api = getElectronAPI();
|
||||
const result = await api.openDirectory();
|
||||
|
||||
if (!result.canceled && result.filePaths[0]) {
|
||||
const path = result.filePaths[0];
|
||||
// Extract folder name from path (works on both Windows and Mac/Linux)
|
||||
const name = path.split(/[/\\]/).filter(Boolean).pop() || 'Untitled Project';
|
||||
|
||||
try {
|
||||
// Check if this is a brand new project (no .automaker directory)
|
||||
const hadAutomakerDir = await hasAutomakerDir(path);
|
||||
|
||||
// Initialize the .automaker directory structure
|
||||
const initResult = await initializeProject(path);
|
||||
|
||||
if (!initResult.success) {
|
||||
toast.error('Failed to initialize project', {
|
||||
description: initResult.error || 'Unknown error occurred',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Upsert project and set as current (handles both create and update cases)
|
||||
// Theme preservation is handled by the store action
|
||||
const trashedProject = trashedProjects.find((p) => p.path === path);
|
||||
const effectiveTheme =
|
||||
(trashedProject?.theme as ThemeMode | undefined) ||
|
||||
(currentProject?.theme as ThemeMode | undefined) ||
|
||||
globalTheme;
|
||||
upsertAndSetCurrentProject(path, name, effectiveTheme);
|
||||
|
||||
// Check if app_spec.txt exists
|
||||
const specExists = await hasAppSpec(path);
|
||||
|
||||
if (!hadAutomakerDir && !specExists) {
|
||||
// This is a brand new project - show setup dialog
|
||||
setSetupProjectPath(path);
|
||||
setShowSetupDialog(true);
|
||||
toast.success('Project opened', {
|
||||
description: `Opened ${name}. Let's set up your app specification!`,
|
||||
});
|
||||
} else if (initResult.createdFiles && initResult.createdFiles.length > 0) {
|
||||
toast.success(initResult.isNewProject ? 'Project initialized' : 'Project updated', {
|
||||
description: `Set up ${initResult.createdFiles.length} file(s) in .automaker`,
|
||||
});
|
||||
} else {
|
||||
toast.success('Project opened', {
|
||||
description: `Opened ${name}`,
|
||||
});
|
||||
}
|
||||
|
||||
// Navigate to board view
|
||||
navigate({ to: '/board' });
|
||||
} catch (error) {
|
||||
console.error('Failed to open project:', error);
|
||||
toast.error('Failed to open project', {
|
||||
description: error instanceof Error ? error.message : 'Unknown error',
|
||||
});
|
||||
}
|
||||
}
|
||||
}, [trashedProjects, upsertAndSetCurrentProject, currentProject, globalTheme, navigate]);
|
||||
|
||||
// Handler for creating initial spec from the setup dialog
|
||||
const handleCreateInitialSpec = useCallback(async () => {
|
||||
if (!setupProjectPath) return;
|
||||
|
||||
setSpecCreatingForProject(setupProjectPath);
|
||||
setShowSetupDialog(false);
|
||||
|
||||
try {
|
||||
const api = getElectronAPI();
|
||||
await api.generateAppSpec({
|
||||
projectPath: setupProjectPath,
|
||||
projectOverview,
|
||||
generateFeatures,
|
||||
analyzeProject,
|
||||
featureCount,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Failed to generate spec:', error);
|
||||
toast.error('Failed to generate spec', {
|
||||
description: error instanceof Error ? error.message : 'Unknown error',
|
||||
});
|
||||
setSpecCreatingForProject(null);
|
||||
}
|
||||
}, [
|
||||
setupProjectPath,
|
||||
projectOverview,
|
||||
generateFeatures,
|
||||
analyzeProject,
|
||||
featureCount,
|
||||
setSpecCreatingForProject,
|
||||
]);
|
||||
|
||||
const handleSkipSetup = useCallback(() => {
|
||||
setShowSetupDialog(false);
|
||||
setSetupProjectPath(null);
|
||||
}, []);
|
||||
|
||||
// Keyboard shortcuts for project switching (1-9, 0)
|
||||
useEffect(() => {
|
||||
const handleKeyDown = (event: KeyboardEvent) => {
|
||||
// Ignore if user is typing in an input, textarea, or contenteditable
|
||||
const target = event.target as HTMLElement;
|
||||
if (target.tagName === 'INPUT' || target.tagName === 'TEXTAREA' || target.isContentEditable) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Ignore if modifier keys are pressed (except for standalone number keys)
|
||||
if (event.ctrlKey || event.metaKey || event.altKey) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Map key to project index: "1" -> 0, "2" -> 1, ..., "9" -> 8, "0" -> 9
|
||||
const key = event.key;
|
||||
let projectIndex: number | null = null;
|
||||
|
||||
if (key >= '1' && key <= '9') {
|
||||
projectIndex = parseInt(key, 10) - 1; // "1" -> 0, "9" -> 8
|
||||
} else if (key === '0') {
|
||||
projectIndex = 9; // "0" -> 9
|
||||
}
|
||||
|
||||
if (projectIndex !== null && projectIndex < projects.length) {
|
||||
const targetProject = projects[projectIndex];
|
||||
if (targetProject && targetProject.id !== currentProject?.id) {
|
||||
handleProjectClick(targetProject);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
window.addEventListener('keydown', handleKeyDown);
|
||||
return () => window.removeEventListener('keydown', handleKeyDown);
|
||||
}, [projects, currentProject, handleProjectClick]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<aside
|
||||
className={cn(
|
||||
'flex-shrink-0 flex flex-col w-16 z-50 relative',
|
||||
// Glass morphism background with gradient
|
||||
'bg-gradient-to-b from-sidebar/95 via-sidebar/85 to-sidebar/90 backdrop-blur-2xl',
|
||||
// Premium border with subtle glow
|
||||
'border-r border-border/60 shadow-[1px_0_20px_-5px_rgba(0,0,0,0.1)]'
|
||||
)}
|
||||
data-testid="project-switcher"
|
||||
>
|
||||
{/* Automaker Logo and Version */}
|
||||
<div className="flex flex-col items-center pt-3 pb-2 px-2">
|
||||
<button
|
||||
onClick={() => navigate({ to: '/dashboard' })}
|
||||
className="group flex flex-col items-center gap-0.5"
|
||||
title="Go to Dashboard"
|
||||
>
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
viewBox="0 0 256 256"
|
||||
role="img"
|
||||
aria-label="Automaker Logo"
|
||||
className="size-10 group-hover:rotate-12 transition-transform duration-300 ease-out"
|
||||
>
|
||||
<defs>
|
||||
<linearGradient
|
||||
id="bg-switcher"
|
||||
x1="0"
|
||||
y1="0"
|
||||
x2="256"
|
||||
y2="256"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
>
|
||||
<stop offset="0%" style={{ stopColor: 'var(--brand-400)' }} />
|
||||
<stop offset="100%" style={{ stopColor: 'var(--brand-600)' }} />
|
||||
</linearGradient>
|
||||
</defs>
|
||||
<rect x="16" y="16" width="224" height="224" rx="56" fill="url(#bg-switcher)" />
|
||||
<g
|
||||
fill="none"
|
||||
stroke="#FFFFFF"
|
||||
strokeWidth="20"
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
>
|
||||
<path d="M92 92 L52 128 L92 164" />
|
||||
<path d="M144 72 L116 184" />
|
||||
<path d="M164 92 L204 128 L164 164" />
|
||||
</g>
|
||||
</svg>
|
||||
<span className="text-[0.625rem] text-muted-foreground leading-none font-medium">
|
||||
v{appVersion} {versionSuffix}
|
||||
</span>
|
||||
</button>
|
||||
<div className="w-full h-px bg-border mt-3" />
|
||||
</div>
|
||||
|
||||
{/* Projects List */}
|
||||
<div className="flex-1 overflow-y-auto pt-1 pb-3 px-2 space-y-2">
|
||||
{projects.map((project, index) => (
|
||||
<ProjectSwitcherItem
|
||||
key={project.id}
|
||||
project={project}
|
||||
isActive={currentProject?.id === project.id}
|
||||
hotkeyIndex={index < 10 ? index : undefined}
|
||||
onClick={() => handleProjectClick(project)}
|
||||
onContextMenu={(e) => handleContextMenu(project, e)}
|
||||
/>
|
||||
))}
|
||||
|
||||
{/* Horizontal rule and Add Project Button - only show if there are projects */}
|
||||
{projects.length > 0 && (
|
||||
<>
|
||||
<div className="w-full h-px bg-border my-2" />
|
||||
<button
|
||||
onClick={handleNewProject}
|
||||
className={cn(
|
||||
'w-full aspect-square rounded-xl flex items-center justify-center',
|
||||
'transition-all duration-200 ease-out',
|
||||
'text-muted-foreground hover:text-foreground',
|
||||
'hover:bg-accent/50 border border-transparent hover:border-border/40',
|
||||
'hover:shadow-sm hover:scale-105 active:scale-95'
|
||||
)}
|
||||
title="New Project"
|
||||
data-testid="new-project-button"
|
||||
>
|
||||
<Plus className="w-5 h-5" />
|
||||
</button>
|
||||
<button
|
||||
onClick={handleOpenFolder}
|
||||
className={cn(
|
||||
'w-full aspect-square rounded-xl flex items-center justify-center',
|
||||
'transition-all duration-200 ease-out',
|
||||
'text-muted-foreground hover:text-foreground',
|
||||
'hover:bg-accent/50 border border-transparent hover:border-border/40',
|
||||
'hover:shadow-sm hover:scale-105 active:scale-95'
|
||||
)}
|
||||
title="Open Project"
|
||||
data-testid="open-project-button"
|
||||
>
|
||||
<FolderOpen className="w-5 h-5" />
|
||||
</button>
|
||||
</>
|
||||
)}
|
||||
|
||||
{/* Add Project Button - when no projects, show without rule */}
|
||||
{projects.length === 0 && (
|
||||
<>
|
||||
<button
|
||||
onClick={handleNewProject}
|
||||
className={cn(
|
||||
'w-full aspect-square rounded-xl flex items-center justify-center',
|
||||
'transition-all duration-200 ease-out',
|
||||
'text-muted-foreground hover:text-foreground',
|
||||
'hover:bg-accent/50 border border-transparent hover:border-border/40',
|
||||
'hover:shadow-sm hover:scale-105 active:scale-95'
|
||||
)}
|
||||
title="New Project"
|
||||
data-testid="new-project-button"
|
||||
>
|
||||
<Plus className="w-5 h-5" />
|
||||
</button>
|
||||
<button
|
||||
onClick={handleOpenFolder}
|
||||
className={cn(
|
||||
'w-full aspect-square rounded-xl flex items-center justify-center',
|
||||
'transition-all duration-200 ease-out',
|
||||
'text-muted-foreground hover:text-foreground',
|
||||
'hover:bg-accent/50 border border-transparent hover:border-border/40',
|
||||
'hover:shadow-sm hover:scale-105 active:scale-95'
|
||||
)}
|
||||
title="Open Project"
|
||||
data-testid="open-project-button"
|
||||
>
|
||||
<FolderOpen className="w-5 h-5" />
|
||||
</button>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Bug Report Button at the very bottom */}
|
||||
<div className="p-2 border-t border-border/40">
|
||||
<button
|
||||
onClick={handleBugReportClick}
|
||||
className={cn(
|
||||
'w-full aspect-square rounded-xl flex items-center justify-center',
|
||||
'transition-all duration-200 ease-out',
|
||||
'text-muted-foreground hover:text-foreground',
|
||||
'hover:bg-accent/50 border border-transparent hover:border-border/40',
|
||||
'hover:shadow-sm hover:scale-105 active:scale-95'
|
||||
)}
|
||||
title="Report Bug / Feature Request"
|
||||
data-testid="bug-report-button"
|
||||
>
|
||||
<Bug className="w-5 h-5" />
|
||||
</button>
|
||||
</div>
|
||||
</aside>
|
||||
|
||||
{/* Context Menu */}
|
||||
{contextMenuProject && contextMenuPosition && (
|
||||
<ProjectContextMenu
|
||||
project={contextMenuProject}
|
||||
position={contextMenuPosition}
|
||||
onClose={handleCloseContextMenu}
|
||||
onEdit={handleEditProject}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Edit Project Dialog */}
|
||||
{editDialogProject && (
|
||||
<EditProjectDialog
|
||||
project={editDialogProject}
|
||||
open={!!editDialogProject}
|
||||
onOpenChange={(open) => !open && setEditDialogProject(null)}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* New Project Modal */}
|
||||
<NewProjectModal
|
||||
open={showNewProjectModal}
|
||||
onOpenChange={setShowNewProjectModal}
|
||||
onCreateBlankProject={handleCreateBlankProject}
|
||||
onCreateFromTemplate={handleCreateFromTemplate}
|
||||
onCreateFromCustomUrl={handleCreateFromCustomUrl}
|
||||
isCreating={isCreatingProject}
|
||||
/>
|
||||
|
||||
{/* Onboarding Dialog */}
|
||||
<OnboardingDialog
|
||||
open={showOnboardingDialog}
|
||||
onOpenChange={setShowOnboardingDialog}
|
||||
newProjectName={newProjectName}
|
||||
onSkip={handleOnboardingSkip}
|
||||
onGenerateSpec={handleOnboardingSkip}
|
||||
/>
|
||||
|
||||
{/* Setup Dialog for Open Project */}
|
||||
<CreateSpecDialog
|
||||
open={showSetupDialog}
|
||||
onOpenChange={setShowSetupDialog}
|
||||
projectOverview={projectOverview}
|
||||
onProjectOverviewChange={setProjectOverview}
|
||||
generateFeatures={generateFeatures}
|
||||
onGenerateFeaturesChange={setGenerateFeatures}
|
||||
analyzeProject={analyzeProject}
|
||||
onAnalyzeProjectChange={setAnalyzeProject}
|
||||
featureCount={featureCount}
|
||||
onFeatureCountChange={setFeatureCount}
|
||||
onCreateSpec={handleCreateInitialSpec}
|
||||
onSkip={handleSkipSetup}
|
||||
isCreatingSpec={isCreatingSpec}
|
||||
showSkipButton={true}
|
||||
title="Set Up Your Project"
|
||||
description="We didn't find an app_spec.txt file. Let us help you generate your app_spec.txt to help describe your project for our system. We'll analyze your project's tech stack and create a comprehensive specification."
|
||||
/>
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -17,9 +17,7 @@ import { CreateSpecDialog } from '@/components/views/spec-view/dialogs';
|
||||
import {
|
||||
CollapseToggleButton,
|
||||
SidebarHeader,
|
||||
ProjectActions,
|
||||
SidebarNavigation,
|
||||
ProjectSelectorWithOptions,
|
||||
SidebarFooter,
|
||||
} from './sidebar/components';
|
||||
import { TrashDialog, OnboardingDialog } from './sidebar/dialogs';
|
||||
@@ -59,15 +57,12 @@ export function Sidebar() {
|
||||
} = useAppStore();
|
||||
|
||||
// Environment variable flags for hiding sidebar items
|
||||
const { hideTerminal, hideWiki, hideRunningAgents, hideContext, hideSpecEditor, hideAiProfiles } =
|
||||
const { hideTerminal, hideWiki, hideRunningAgents, hideContext, hideSpecEditor } =
|
||||
SIDEBAR_FEATURE_FLAGS;
|
||||
|
||||
// Get customizable keyboard shortcuts
|
||||
const shortcuts = useKeyboardShortcutsConfig();
|
||||
|
||||
// State for project picker (needed for keyboard shortcuts)
|
||||
const [isProjectPickerOpen, setIsProjectPickerOpen] = useState(false);
|
||||
|
||||
// State for delete project confirmation dialog
|
||||
const [showDeleteProjectDialog, setShowDeleteProjectDialog] = useState(false);
|
||||
|
||||
@@ -127,6 +122,9 @@ export function Sidebar() {
|
||||
// Derive isCreatingSpec from store state
|
||||
const isCreatingSpec = specCreatingForProject !== null;
|
||||
const creatingSpecProjectPath = specCreatingForProject;
|
||||
// Check if the current project is specifically the one generating spec
|
||||
const isCurrentProjectGeneratingSpec =
|
||||
specCreatingForProject !== null && specCreatingForProject === currentProject?.path;
|
||||
|
||||
// Auto-collapse sidebar on small screens and update Electron window minWidth
|
||||
useSidebarAutoCollapse({ sidebarOpen, toggleSidebar });
|
||||
@@ -232,17 +230,16 @@ export function Sidebar() {
|
||||
hideSpecEditor,
|
||||
hideContext,
|
||||
hideTerminal,
|
||||
hideAiProfiles,
|
||||
currentProject,
|
||||
projects,
|
||||
projectHistory,
|
||||
navigate,
|
||||
toggleSidebar,
|
||||
handleOpenFolder,
|
||||
setIsProjectPickerOpen,
|
||||
cyclePrevProject,
|
||||
cycleNextProject,
|
||||
unviewedValidationsCount,
|
||||
isSpecGenerating: isCurrentProjectGeneratingSpec,
|
||||
});
|
||||
|
||||
// Register keyboard shortcuts
|
||||
@@ -255,121 +252,114 @@ export function Sidebar() {
|
||||
};
|
||||
|
||||
return (
|
||||
<aside
|
||||
className={cn(
|
||||
'flex-shrink-0 flex flex-col z-30 relative',
|
||||
// Glass morphism background with gradient
|
||||
'bg-gradient-to-b from-sidebar/95 via-sidebar/85 to-sidebar/90 backdrop-blur-2xl',
|
||||
// Premium border with subtle glow
|
||||
'border-r border-border/60 shadow-[1px_0_20px_-5px_rgba(0,0,0,0.1)]',
|
||||
// Smooth width transition
|
||||
'transition-all duration-300 ease-[cubic-bezier(0.4,0,0.2,1)]',
|
||||
sidebarOpen ? 'w-16 lg:w-72' : 'w-16'
|
||||
<>
|
||||
{/* Mobile backdrop overlay */}
|
||||
{sidebarOpen && (
|
||||
<div
|
||||
className="fixed inset-0 bg-black/50 z-20 lg:hidden"
|
||||
onClick={toggleSidebar}
|
||||
data-testid="sidebar-backdrop"
|
||||
/>
|
||||
)}
|
||||
data-testid="sidebar"
|
||||
>
|
||||
<CollapseToggleButton
|
||||
sidebarOpen={sidebarOpen}
|
||||
toggleSidebar={toggleSidebar}
|
||||
shortcut={shortcuts.toggleSidebar}
|
||||
/>
|
||||
|
||||
<div className="flex-1 flex flex-col overflow-hidden">
|
||||
<SidebarHeader sidebarOpen={sidebarOpen} navigate={navigate} />
|
||||
|
||||
{/* Project Actions - Moved above project selector */}
|
||||
{sidebarOpen && (
|
||||
<ProjectActions
|
||||
setShowNewProjectModal={setShowNewProjectModal}
|
||||
handleOpenFolder={handleOpenFolder}
|
||||
setShowTrashDialog={setShowTrashDialog}
|
||||
trashedProjects={trashedProjects}
|
||||
shortcuts={{ openProject: shortcuts.openProject }}
|
||||
/>
|
||||
<aside
|
||||
className={cn(
|
||||
'flex-shrink-0 flex flex-col z-30',
|
||||
// Glass morphism background with gradient
|
||||
'bg-gradient-to-b from-sidebar/95 via-sidebar/85 to-sidebar/90 backdrop-blur-2xl',
|
||||
// Premium border with subtle glow
|
||||
'border-r border-border/60 shadow-[1px_0_20px_-5px_rgba(0,0,0,0.1)]',
|
||||
// Smooth width transition
|
||||
'transition-all duration-300 ease-[cubic-bezier(0.4,0,0.2,1)]',
|
||||
// Mobile: overlay when open, collapsed when closed
|
||||
sidebarOpen ? 'fixed inset-y-0 left-0 w-72 lg:relative lg:w-72' : 'relative w-16'
|
||||
)}
|
||||
|
||||
<ProjectSelectorWithOptions
|
||||
data-testid="sidebar"
|
||||
>
|
||||
<CollapseToggleButton
|
||||
sidebarOpen={sidebarOpen}
|
||||
isProjectPickerOpen={isProjectPickerOpen}
|
||||
setIsProjectPickerOpen={setIsProjectPickerOpen}
|
||||
setShowDeleteProjectDialog={setShowDeleteProjectDialog}
|
||||
toggleSidebar={toggleSidebar}
|
||||
shortcut={shortcuts.toggleSidebar}
|
||||
/>
|
||||
|
||||
<SidebarNavigation
|
||||
currentProject={currentProject}
|
||||
<div className="flex-1 flex flex-col overflow-hidden">
|
||||
<SidebarHeader sidebarOpen={sidebarOpen} currentProject={currentProject} />
|
||||
|
||||
<SidebarNavigation
|
||||
currentProject={currentProject}
|
||||
sidebarOpen={sidebarOpen}
|
||||
navSections={navSections}
|
||||
isActiveRoute={isActiveRoute}
|
||||
navigate={navigate}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<SidebarFooter
|
||||
sidebarOpen={sidebarOpen}
|
||||
navSections={navSections}
|
||||
isActiveRoute={isActiveRoute}
|
||||
navigate={navigate}
|
||||
hideWiki={hideWiki}
|
||||
hideRunningAgents={hideRunningAgents}
|
||||
runningAgentsCount={runningAgentsCount}
|
||||
shortcuts={{ settings: shortcuts.settings }}
|
||||
/>
|
||||
<TrashDialog
|
||||
open={showTrashDialog}
|
||||
onOpenChange={setShowTrashDialog}
|
||||
trashedProjects={trashedProjects}
|
||||
activeTrashId={activeTrashId}
|
||||
handleRestoreProject={handleRestoreProject}
|
||||
handleDeleteProjectFromDisk={handleDeleteProjectFromDisk}
|
||||
deleteTrashedProject={deleteTrashedProject}
|
||||
handleEmptyTrash={handleEmptyTrash}
|
||||
isEmptyingTrash={isEmptyingTrash}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<SidebarFooter
|
||||
sidebarOpen={sidebarOpen}
|
||||
isActiveRoute={isActiveRoute}
|
||||
navigate={navigate}
|
||||
hideWiki={hideWiki}
|
||||
hideRunningAgents={hideRunningAgents}
|
||||
runningAgentsCount={runningAgentsCount}
|
||||
shortcuts={{ settings: shortcuts.settings }}
|
||||
/>
|
||||
<TrashDialog
|
||||
open={showTrashDialog}
|
||||
onOpenChange={setShowTrashDialog}
|
||||
trashedProjects={trashedProjects}
|
||||
activeTrashId={activeTrashId}
|
||||
handleRestoreProject={handleRestoreProject}
|
||||
handleDeleteProjectFromDisk={handleDeleteProjectFromDisk}
|
||||
deleteTrashedProject={deleteTrashedProject}
|
||||
handleEmptyTrash={handleEmptyTrash}
|
||||
isEmptyingTrash={isEmptyingTrash}
|
||||
/>
|
||||
{/* New Project Setup Dialog */}
|
||||
<CreateSpecDialog
|
||||
open={showSetupDialog}
|
||||
onOpenChange={setShowSetupDialog}
|
||||
projectOverview={projectOverview}
|
||||
onProjectOverviewChange={setProjectOverview}
|
||||
generateFeatures={generateFeatures}
|
||||
onGenerateFeaturesChange={setGenerateFeatures}
|
||||
analyzeProject={analyzeProject}
|
||||
onAnalyzeProjectChange={setAnalyzeProject}
|
||||
featureCount={featureCount}
|
||||
onFeatureCountChange={setFeatureCount}
|
||||
onCreateSpec={handleCreateInitialSpec}
|
||||
onSkip={handleSkipSetup}
|
||||
isCreatingSpec={isCreatingSpec}
|
||||
showSkipButton={true}
|
||||
title="Set Up Your Project"
|
||||
description="We didn't find an app_spec.txt file. Let us help you generate your app_spec.txt to help describe your project for our system. We'll analyze your project's tech stack and create a comprehensive specification."
|
||||
/>
|
||||
|
||||
{/* New Project Setup Dialog */}
|
||||
<CreateSpecDialog
|
||||
open={showSetupDialog}
|
||||
onOpenChange={setShowSetupDialog}
|
||||
projectOverview={projectOverview}
|
||||
onProjectOverviewChange={setProjectOverview}
|
||||
generateFeatures={generateFeatures}
|
||||
onGenerateFeaturesChange={setGenerateFeatures}
|
||||
analyzeProject={analyzeProject}
|
||||
onAnalyzeProjectChange={setAnalyzeProject}
|
||||
featureCount={featureCount}
|
||||
onFeatureCountChange={setFeatureCount}
|
||||
onCreateSpec={handleCreateInitialSpec}
|
||||
onSkip={handleSkipSetup}
|
||||
isCreatingSpec={isCreatingSpec}
|
||||
showSkipButton={true}
|
||||
title="Set Up Your Project"
|
||||
description="We didn't find an app_spec.txt file. Let us help you generate your app_spec.txt to help describe your project for our system. We'll analyze your project's tech stack and create a comprehensive specification."
|
||||
/>
|
||||
<OnboardingDialog
|
||||
open={showOnboardingDialog}
|
||||
onOpenChange={setShowOnboardingDialog}
|
||||
newProjectName={newProjectName}
|
||||
onSkip={handleOnboardingSkip}
|
||||
onGenerateSpec={handleOnboardingGenerateSpec}
|
||||
/>
|
||||
|
||||
<OnboardingDialog
|
||||
open={showOnboardingDialog}
|
||||
onOpenChange={setShowOnboardingDialog}
|
||||
newProjectName={newProjectName}
|
||||
onSkip={handleOnboardingSkip}
|
||||
onGenerateSpec={handleOnboardingGenerateSpec}
|
||||
/>
|
||||
{/* Delete Project Confirmation Dialog */}
|
||||
<DeleteProjectDialog
|
||||
open={showDeleteProjectDialog}
|
||||
onOpenChange={setShowDeleteProjectDialog}
|
||||
project={currentProject}
|
||||
onConfirm={moveProjectToTrash}
|
||||
/>
|
||||
|
||||
{/* Delete Project Confirmation Dialog */}
|
||||
<DeleteProjectDialog
|
||||
open={showDeleteProjectDialog}
|
||||
onOpenChange={setShowDeleteProjectDialog}
|
||||
project={currentProject}
|
||||
onConfirm={moveProjectToTrash}
|
||||
/>
|
||||
|
||||
{/* New Project Modal */}
|
||||
<NewProjectModal
|
||||
open={showNewProjectModal}
|
||||
onOpenChange={setShowNewProjectModal}
|
||||
onCreateBlankProject={handleCreateBlankProject}
|
||||
onCreateFromTemplate={handleCreateFromTemplate}
|
||||
onCreateFromCustomUrl={handleCreateFromCustomUrl}
|
||||
isCreating={isCreatingProject}
|
||||
/>
|
||||
</aside>
|
||||
{/* New Project Modal */}
|
||||
<NewProjectModal
|
||||
open={showNewProjectModal}
|
||||
onOpenChange={setShowNewProjectModal}
|
||||
onCreateBlankProject={handleCreateBlankProject}
|
||||
onCreateFromTemplate={handleCreateFromTemplate}
|
||||
onCreateFromCustomUrl={handleCreateFromCustomUrl}
|
||||
isCreating={isCreatingProject}
|
||||
/>
|
||||
</aside>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -32,14 +32,14 @@ export function AutomakerLogo({ sidebarOpen, navigate }: AutomakerLogoProps) {
|
||||
'flex items-center gap-3 titlebar-no-drag cursor-pointer group',
|
||||
!sidebarOpen && 'flex-col gap-1'
|
||||
)}
|
||||
onClick={() => navigate({ to: '/' })}
|
||||
onClick={() => navigate({ to: '/dashboard' })}
|
||||
data-testid="logo-button"
|
||||
>
|
||||
{/* Collapsed logo - shown when sidebar is closed OR on small screens when sidebar is open */}
|
||||
{/* Collapsed logo - only shown when sidebar is closed */}
|
||||
<div
|
||||
className={cn(
|
||||
'relative flex flex-col items-center justify-center rounded-lg gap-0.5',
|
||||
sidebarOpen ? 'flex lg:hidden' : 'flex'
|
||||
sidebarOpen ? 'hidden' : 'flex'
|
||||
)}
|
||||
>
|
||||
<svg
|
||||
@@ -90,16 +90,16 @@ export function AutomakerLogo({ sidebarOpen, navigate }: AutomakerLogoProps) {
|
||||
</span>
|
||||
</div>
|
||||
|
||||
{/* Expanded logo - only shown when sidebar is open on large screens */}
|
||||
{/* Expanded logo - shown when sidebar is open */}
|
||||
{sidebarOpen && (
|
||||
<div className="hidden lg:flex flex-col">
|
||||
<div className="flex flex-col">
|
||||
<div className="flex items-center gap-1">
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
viewBox="0 0 256 256"
|
||||
role="img"
|
||||
aria-label="automaker"
|
||||
className="h-[36.8px] w-[36.8px] group-hover:rotate-12 transition-transform duration-300 ease-out"
|
||||
className="h-8 w-8 lg:h-[36.8px] lg:w-[36.8px] shrink-0 group-hover:rotate-12 transition-transform duration-300 ease-out"
|
||||
>
|
||||
<defs>
|
||||
<linearGradient
|
||||
@@ -137,11 +137,11 @@ export function AutomakerLogo({ sidebarOpen, navigate }: AutomakerLogoProps) {
|
||||
<path d="M164 92 L204 128 L164 164" />
|
||||
</g>
|
||||
</svg>
|
||||
<span className="font-bold text-foreground text-[1.7rem] tracking-tight leading-none translate-y-[-2px]">
|
||||
<span className="font-bold text-foreground text-xl lg:text-[1.7rem] tracking-tight leading-none translate-y-[-2px]">
|
||||
automaker<span className="text-brand-500">.</span>
|
||||
</span>
|
||||
</div>
|
||||
<span className="text-[0.625rem] text-muted-foreground leading-none font-medium ml-[38.8px]">
|
||||
<span className="text-[0.625rem] text-muted-foreground leading-none font-medium ml-9 lg:ml-[38.8px]">
|
||||
v{appVersion} {versionSuffix}
|
||||
</span>
|
||||
</div>
|
||||
|
||||
@@ -17,7 +17,7 @@ export function CollapseToggleButton({
|
||||
<button
|
||||
onClick={toggleSidebar}
|
||||
className={cn(
|
||||
'hidden lg:flex absolute top-[68px] -right-3 z-9999',
|
||||
'flex absolute top-[68px] -right-3 z-9999',
|
||||
'group/toggle items-center justify-center w-7 h-7 rounded-full',
|
||||
// Glass morphism button
|
||||
'bg-card/95 backdrop-blur-sm border border-border/80',
|
||||
|
||||
@@ -40,7 +40,7 @@ export function ProjectActions({
|
||||
data-testid="new-project-button"
|
||||
>
|
||||
<Plus className="w-4 h-4 shrink-0 transition-transform duration-200 group-hover:rotate-90 group-hover:text-brand-500" />
|
||||
<span className="ml-2 text-sm font-medium hidden lg:block whitespace-nowrap">New</span>
|
||||
<span className="ml-2 text-sm font-medium block whitespace-nowrap">New</span>
|
||||
</button>
|
||||
<button
|
||||
onClick={handleOpenFolder}
|
||||
@@ -59,7 +59,7 @@ export function ProjectActions({
|
||||
data-testid="open-project-button"
|
||||
>
|
||||
<FolderOpen className="w-4 h-4 shrink-0 transition-transform duration-200 group-hover:scale-110" />
|
||||
<span className="hidden lg:flex items-center justify-center min-w-5 h-5 px-1.5 text-[10px] font-mono rounded-md bg-muted/80 text-muted-foreground ml-2">
|
||||
<span className="flex items-center justify-center min-w-5 h-5 px-1.5 text-[10px] font-mono rounded-md bg-muted/80 text-muted-foreground ml-2">
|
||||
{formatShortcut(shortcuts.openProject, true)}
|
||||
</span>
|
||||
</button>
|
||||
|
||||
@@ -117,7 +117,7 @@ export function ProjectSelectorWithOptions({
|
||||
</div>
|
||||
<div className="flex items-center gap-1.5">
|
||||
<span
|
||||
className="hidden lg:flex items-center justify-center min-w-5 h-5 px-1.5 text-[10px] font-mono rounded-md bg-muted text-muted-foreground"
|
||||
className="hidden sm:flex items-center justify-center min-w-5 h-5 px-1.5 text-[10px] font-mono rounded-md bg-muted text-muted-foreground"
|
||||
data-testid="project-picker-shortcut"
|
||||
>
|
||||
{formatShortcut(shortcuts.projectPicker, true)}
|
||||
@@ -219,7 +219,7 @@ export function ProjectSelectorWithOptions({
|
||||
<DropdownMenuTrigger asChild>
|
||||
<button
|
||||
className={cn(
|
||||
'hidden lg:flex items-center justify-center w-[42px] h-[42px] rounded-lg',
|
||||
'flex items-center justify-center w-[42px] h-[42px] rounded-lg',
|
||||
'text-muted-foreground hover:text-foreground',
|
||||
'bg-transparent hover:bg-accent/60',
|
||||
'border border-border/50 hover:border-border',
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user