mirror of
https://github.com/AutoMaker-Org/automaker.git
synced 2026-01-30 14:22:02 +00:00
Compare commits
364 Commits
v0.9.0
...
feat/coder
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5b620011ad | ||
|
|
327aef89a2 | ||
|
|
44e665f1bf | ||
|
|
5b1e0105f4 | ||
|
|
832d10e133 | ||
|
|
044c3d50d1 | ||
|
|
a1de0a78a0 | ||
|
|
fef9639e01 | ||
|
|
aef479218d | ||
|
|
ded5ecf4e9 | ||
|
|
a01f299597 | ||
|
|
21c9e88a86 | ||
|
|
af17f6e36f | ||
|
|
e69a2ad722 | ||
|
|
0480f6ccd6 | ||
|
|
24042d20c2 | ||
|
|
9c3b3a4104 | ||
|
|
17e2cdfc85 | ||
|
|
466c34afd4 | ||
|
|
b9567f5904 | ||
|
|
c2cf8ae892 | ||
|
|
3aa3c10ea4 | ||
|
|
5cd4183a7b | ||
|
|
2d9e38ad99 | ||
|
|
93d73f6d26 | ||
|
|
5209395a74 | ||
|
|
ef6b9ac2d2 | ||
|
|
92afbeb6bd | ||
|
|
bbdc11ce47 | ||
|
|
545bf2045d | ||
|
|
a0471098fa | ||
|
|
3320b40d15 | ||
|
|
bac5e1c220 | ||
|
|
33fa138d21 | ||
|
|
bc09a22e1f | ||
|
|
b771b51842 | ||
|
|
1a7bf27ead | ||
|
|
f3b00d0f78 | ||
|
|
c747baaee2 | ||
|
|
1322722db2 | ||
|
|
aa35eb3d3a | ||
|
|
616e2ef75f | ||
|
|
d98cae124f | ||
|
|
26aaef002d | ||
|
|
09bb59d090 | ||
|
|
2f38ffe2d5 | ||
|
|
12fa9d858d | ||
|
|
c4e1a58e0d | ||
|
|
8661f33c6d | ||
|
|
5c24ca2220 | ||
|
|
14559354dd | ||
|
|
3bf9dbd43a | ||
|
|
bd3999416b | ||
|
|
cc9f7d48c8 | ||
|
|
6bb0461be7 | ||
|
|
16ef026b38 | ||
|
|
50ed405c4a | ||
|
|
5407e1a9ff | ||
|
|
5436b18f70 | ||
|
|
8b7700364d | ||
|
|
3bdf3cbb5c | ||
|
|
45d9c9a5d8 | ||
|
|
6a23e6ce78 | ||
|
|
4e53215104 | ||
|
|
2899b6d416 | ||
|
|
b263cc615e | ||
|
|
97b0028919 | ||
|
|
fd1727a443 | ||
|
|
597cb9bfae | ||
|
|
c2430e5bd3 | ||
|
|
68df8efd10 | ||
|
|
c0d64bc994 | ||
|
|
6237f1a0fe | ||
|
|
30c50d9b78 | ||
|
|
03516ac09e | ||
|
|
5e5a136f1f | ||
|
|
98c50d44a4 | ||
|
|
0e9369816f | ||
|
|
be63a59e9c | ||
|
|
dbb84aba23 | ||
|
|
9819d2e91c | ||
|
|
4c24ba5a8b | ||
|
|
e67cab1e07 | ||
|
|
132b8f7529 | ||
|
|
d651e9d8d6 | ||
|
|
92f14508aa | ||
|
|
842b059fac | ||
|
|
49f9ecc168 | ||
|
|
e02fd889c2 | ||
|
|
52a821d3bb | ||
|
|
becd79f1e3 | ||
|
|
883ad2a04b | ||
|
|
bf93cdf0c4 | ||
|
|
c0ea1c736a | ||
|
|
8b448b9481 | ||
|
|
12f2b9f2b3 | ||
|
|
017ff3ca0a | ||
|
|
bcec178bbe | ||
|
|
e3347c7b9c | ||
|
|
6529446281 | ||
|
|
379551c40e | ||
|
|
7465017600 | ||
|
|
874c5a36de | ||
|
|
03436103d1 | ||
|
|
cb544e0011 | ||
|
|
df23c9e6ab | ||
|
|
52cc82fb3f | ||
|
|
d9571bfb8d | ||
|
|
07d800b589 | ||
|
|
ec042de69c | ||
|
|
585ae32c32 | ||
|
|
a89ba04109 | ||
|
|
05a3b95d75 | ||
|
|
0e269ca15d | ||
|
|
fd03cb4afa | ||
|
|
d6c5c93fe5 | ||
|
|
1abf219230 | ||
|
|
3a2ba6dbfe | ||
|
|
8fa8ba0a16 | ||
|
|
285f526e0c | ||
|
|
bd68b497ac | ||
|
|
06b047cfcb | ||
|
|
c585cee12f | ||
|
|
241fd0b252 | ||
|
|
164acc1b4e | ||
|
|
78e5ddb4a8 | ||
|
|
43904cdb02 | ||
|
|
7ea1383e10 | ||
|
|
425e38811f | ||
|
|
f6bda66ed4 | ||
|
|
0df7e4a33d | ||
|
|
41ad717b8e | ||
|
|
fec5f88d91 | ||
|
|
724858d215 | ||
|
|
2b93afbd43 | ||
|
|
ca0f3ecedf | ||
|
|
ee0d0c6c59 | ||
|
|
ac38e85f3c | ||
|
|
ca3286a374 | ||
|
|
0898578c11 | ||
|
|
07593f8704 | ||
|
|
3f8a8db7a5 | ||
|
|
13eead3855 | ||
|
|
cb910feae9 | ||
|
|
c75f9a29cb | ||
|
|
3c5e453b01 | ||
|
|
63e0ffac42 | ||
|
|
d0155f28c8 | ||
|
|
27ca08d98a | ||
|
|
df99950475 | ||
|
|
6a85073d94 | ||
|
|
7b73ff34f1 | ||
|
|
8419b12f3f | ||
|
|
f1a5bcd17a | ||
|
|
28d8a4cc9e | ||
|
|
7108cdd2ca | ||
|
|
e7bfb19203 | ||
|
|
beac823472 | ||
|
|
c7fac3d9e6 | ||
|
|
3689eb969d | ||
|
|
5e330b7691 | ||
|
|
5ec5fe82e6 | ||
|
|
ee13bf9a8f | ||
|
|
219af28afc | ||
|
|
b64025b134 | ||
|
|
51e4e8489a | ||
|
|
bb70d04b88 | ||
|
|
32f6c6d6eb | ||
|
|
b6688e630e | ||
|
|
073f6d5793 | ||
|
|
9153b06f09 | ||
|
|
6cb2af8757 | ||
|
|
ca3b013a7b | ||
|
|
abde1ba40a | ||
|
|
b04659fb56 | ||
|
|
74ee30d5db | ||
|
|
a300466ca9 | ||
|
|
9311f2e62a | ||
|
|
67245158ea | ||
|
|
520d9a945c | ||
|
|
fa3ead0e8d | ||
|
|
253ab94646 | ||
|
|
fbb3f697e1 | ||
|
|
1a1517dffb | ||
|
|
690cf1f281 | ||
|
|
6f55da46ac | ||
|
|
57453966ac | ||
|
|
298acc9f89 | ||
|
|
f4390bc82f | ||
|
|
62af2031f6 | ||
|
|
0ddd672e0e | ||
|
|
7ef525effa | ||
|
|
2303dcd133 | ||
|
|
cc4f39a6ab | ||
|
|
d4076ad0ce | ||
|
|
3bd8626d48 | ||
|
|
ff5915dd20 | ||
|
|
8500f71565 | ||
|
|
81bab1d8ab | ||
|
|
24a6633322 | ||
|
|
f073f6ecc3 | ||
|
|
2870ddb223 | ||
|
|
1578d02e70 | ||
|
|
bb710ada1a | ||
|
|
33ae860059 | ||
|
|
3de6d58af3 | ||
|
|
c8e66a866e | ||
|
|
c25efdc0d8 | ||
|
|
bde82492ae | ||
|
|
67f18021c3 | ||
|
|
6704293cb1 | ||
|
|
8f1740c0f5 | ||
|
|
62019d5916 | ||
|
|
e66283b1d6 | ||
|
|
a0d6d76626 | ||
|
|
c2f5c07038 | ||
|
|
419abf88dd | ||
|
|
b7596617ed | ||
|
|
26da99e834 | ||
|
|
2b33a0d322 | ||
|
|
c796adbae8 | ||
|
|
18d82b1bb1 | ||
|
|
0c68fcc8c8 | ||
|
|
e4458b8222 | ||
|
|
eb8ebe3ce0 | ||
|
|
0dc70addb6 | ||
|
|
f3f5d05349 | ||
|
|
0c4b833b07 | ||
|
|
029c5ca855 | ||
|
|
1f270edbe1 | ||
|
|
47c188d8f9 | ||
|
|
cca4638b71 | ||
|
|
19c12b7813 | ||
|
|
0261ec2892 | ||
|
|
5e4f5f86cd | ||
|
|
fbab1d323f | ||
|
|
8b19266c9a | ||
|
|
1b9d194dd1 | ||
|
|
74c793b6c6 | ||
|
|
d1222268c3 | ||
|
|
df7a0f8687 | ||
|
|
c7def000df | ||
|
|
e2394244f6 | ||
|
|
007830ec74 | ||
|
|
f721eb7152 | ||
|
|
e56db2362c | ||
|
|
d2c7a9e05d | ||
|
|
acce06b304 | ||
|
|
4ab54270db | ||
|
|
f50520c93f | ||
|
|
cebf57ffd3 | ||
|
|
6020219fda | ||
|
|
8094941385 | ||
|
|
9ce3cfee7d | ||
|
|
6184440441 | ||
|
|
0cff4cf510 | ||
|
|
b152f119c5 | ||
|
|
9f936c6968 | ||
|
|
b8531cf7e8 | ||
|
|
edcc4e789b | ||
|
|
20cc401238 | ||
|
|
70204a2d36 | ||
|
|
e38325c27f | ||
|
|
5e4b422315 | ||
|
|
6c5206daf4 | ||
|
|
ed65f70315 | ||
|
|
f41a42010c | ||
|
|
aa8caeaeb0 | ||
|
|
a0669d4262 | ||
|
|
a4a792c6b1 | ||
|
|
6842e4c7f7 | ||
|
|
6638c35945 | ||
|
|
53f5c2b2bb | ||
|
|
6e13cdd516 | ||
|
|
a48c67d271 | ||
|
|
43fc3de2e1 | ||
|
|
80081b60bf | ||
|
|
cbca9b68e6 | ||
|
|
b9b3695497 | ||
|
|
1b9acb1395 | ||
|
|
01cf81a105 | ||
|
|
6381ecaa37 | ||
|
|
6d267ce0fa | ||
|
|
8b0b565282 | ||
|
|
a046d1232e | ||
|
|
d724e782dd | ||
|
|
a266d85ecd | ||
|
|
a4a111fad0 | ||
|
|
2a98de85a8 | ||
|
|
fb3a8499f3 | ||
|
|
33dd9ae347 | ||
|
|
ac87594b5d | ||
|
|
32656a9662 | ||
|
|
785a4d2c3b | ||
|
|
41a6c7f712 | ||
|
|
7e5d915b60 | ||
|
|
8321c06e16 | ||
|
|
f60c18d31a | ||
|
|
e171b6a049 | ||
|
|
6e4b611662 | ||
|
|
7522e58fee | ||
|
|
317c21ffc0 | ||
|
|
9c5fe44617 | ||
|
|
7f79d9692c | ||
|
|
2d4ffc7514 | ||
|
|
5f3db1f25e | ||
|
|
7115460804 | ||
|
|
0db8808b2a | ||
|
|
cf3ed1dd8f | ||
|
|
da682e3993 | ||
|
|
4a59e901e6 | ||
|
|
8ed2fa07a0 | ||
|
|
385e7f5c1e | ||
|
|
861fff1aae | ||
|
|
09527b3b67 | ||
|
|
d98ff16c8f | ||
|
|
e902e8ea4c | ||
|
|
aeb5bd829f | ||
|
|
a92457b871 | ||
|
|
c24e6207d0 | ||
|
|
6c412cd367 | ||
|
|
89a960629a | ||
|
|
05d96a7d6e | ||
|
|
41144ff1fa | ||
|
|
360cddcb91 | ||
|
|
427832e72e | ||
|
|
27c60658f7 | ||
|
|
fa8ae149d3 | ||
|
|
0c19beb11c | ||
|
|
e34e4a59e9 | ||
|
|
7cc092cd59 | ||
|
|
51cd7156d2 | ||
|
|
1dc843d2d0 | ||
|
|
4040bef4b8 | ||
|
|
e64a850f57 | ||
|
|
555523df38 | ||
|
|
dd882139f3 | ||
|
|
a67b8c6109 | ||
|
|
134208dab6 | ||
|
|
887343d232 | ||
|
|
299b838400 | ||
|
|
c5d0a8be7d | ||
|
|
fe433a84c9 | ||
|
|
543aa7a27b | ||
|
|
36ddf0513b | ||
|
|
c99883e634 | ||
|
|
604f98b08f | ||
|
|
c5009a0333 | ||
|
|
99b05d35a2 | ||
|
|
a3ecc6fe02 | ||
|
|
fc20dd5ad4 | ||
|
|
eb94e4de72 | ||
|
|
0fa5fdd478 | ||
|
|
472342c246 | ||
|
|
71e03c2a13 | ||
|
|
c3403c033c | ||
|
|
2a87d55519 | ||
|
|
2d309f6833 | ||
|
|
7a2a3ef500 | ||
|
|
3ff9658723 | ||
|
|
c587947de6 | ||
|
|
a9403651d4 | ||
|
|
d2f64f10ff | ||
|
|
9fe5b485f8 |
108
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
108
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
name: Feature Request
|
||||||
|
description: Suggest a new feature or enhancement for Automaker
|
||||||
|
title: '[Feature]: '
|
||||||
|
labels: ['enhancement']
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Thanks for taking the time to suggest a feature! Please fill out the form below to help us understand your request.
|
||||||
|
|
||||||
|
- type: dropdown
|
||||||
|
id: feature-area
|
||||||
|
attributes:
|
||||||
|
label: Feature Area
|
||||||
|
description: Which area of Automaker does this feature relate to?
|
||||||
|
options:
|
||||||
|
- UI/UX (User Interface)
|
||||||
|
- Agent/AI
|
||||||
|
- Kanban Board
|
||||||
|
- Git/Worktree Management
|
||||||
|
- Project Management
|
||||||
|
- Settings/Configuration
|
||||||
|
- Documentation
|
||||||
|
- Performance
|
||||||
|
- Other
|
||||||
|
default: 0
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: dropdown
|
||||||
|
id: priority
|
||||||
|
attributes:
|
||||||
|
label: Priority
|
||||||
|
description: How important is this feature to your workflow?
|
||||||
|
options:
|
||||||
|
- Nice to have
|
||||||
|
- Would improve my workflow
|
||||||
|
- Critical for my use case
|
||||||
|
default: 0
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: problem-statement
|
||||||
|
attributes:
|
||||||
|
label: Problem Statement
|
||||||
|
description: Is your feature request related to a problem? Please describe the problem you're trying to solve.
|
||||||
|
placeholder: A clear and concise description of what the problem is. Ex. I'm always frustrated when...
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: proposed-solution
|
||||||
|
attributes:
|
||||||
|
label: Proposed Solution
|
||||||
|
description: Describe the solution you'd like to see implemented.
|
||||||
|
placeholder: A clear and concise description of what you want to happen.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: alternatives-considered
|
||||||
|
attributes:
|
||||||
|
label: Alternatives Considered
|
||||||
|
description: Describe any alternative solutions or workarounds you've considered.
|
||||||
|
placeholder: A clear and concise description of any alternative solutions or features you've considered.
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: use-cases
|
||||||
|
attributes:
|
||||||
|
label: Use Cases
|
||||||
|
description: Describe specific scenarios where this feature would be useful.
|
||||||
|
placeholder: |
|
||||||
|
1. When working on...
|
||||||
|
2. As a user who needs to...
|
||||||
|
3. In situations where...
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: mockups
|
||||||
|
attributes:
|
||||||
|
label: Mockups/Screenshots
|
||||||
|
description: If applicable, add mockups, wireframes, or screenshots to help illustrate your feature request.
|
||||||
|
placeholder: Drag and drop images here or paste image URLs
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: additional-context
|
||||||
|
attributes:
|
||||||
|
label: Additional Context
|
||||||
|
description: Add any other context, references, or examples about the feature request here.
|
||||||
|
placeholder: Any additional information that might be helpful...
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
- type: checkboxes
|
||||||
|
id: terms
|
||||||
|
attributes:
|
||||||
|
label: Checklist
|
||||||
|
options:
|
||||||
|
- label: I have searched existing issues to ensure this feature hasn't been requested already
|
||||||
|
required: true
|
||||||
|
- label: I have provided a clear description of the problem and proposed solution
|
||||||
|
required: true
|
||||||
3
.github/actions/setup-project/action.yml
vendored
3
.github/actions/setup-project/action.yml
vendored
@@ -41,7 +41,8 @@ runs:
|
|||||||
# Use npm install instead of npm ci to correctly resolve platform-specific
|
# Use npm install instead of npm ci to correctly resolve platform-specific
|
||||||
# optional dependencies (e.g., @tailwindcss/oxide, lightningcss binaries)
|
# optional dependencies (e.g., @tailwindcss/oxide, lightningcss binaries)
|
||||||
# Skip scripts to avoid electron-builder install-app-deps which uses too much memory
|
# Skip scripts to avoid electron-builder install-app-deps which uses too much memory
|
||||||
run: npm install --ignore-scripts
|
# Use --force to allow platform-specific dev dependencies like dmg-license on non-darwin platforms
|
||||||
|
run: npm install --ignore-scripts --force
|
||||||
|
|
||||||
- name: Install Linux native bindings
|
- name: Install Linux native bindings
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|||||||
88
.github/workflows/e2e-tests.yml
vendored
88
.github/workflows/e2e-tests.yml
vendored
@@ -37,7 +37,14 @@ jobs:
|
|||||||
git config --global user.email "ci@example.com"
|
git config --global user.email "ci@example.com"
|
||||||
|
|
||||||
- name: Start backend server
|
- name: Start backend server
|
||||||
run: npm run start --workspace=apps/server &
|
run: |
|
||||||
|
echo "Starting backend server..."
|
||||||
|
# Start server in background and save PID
|
||||||
|
npm run start --workspace=apps/server > backend.log 2>&1 &
|
||||||
|
SERVER_PID=$!
|
||||||
|
echo "Server started with PID: $SERVER_PID"
|
||||||
|
echo "SERVER_PID=$SERVER_PID" >> $GITHUB_ENV
|
||||||
|
|
||||||
env:
|
env:
|
||||||
PORT: 3008
|
PORT: 3008
|
||||||
NODE_ENV: test
|
NODE_ENV: test
|
||||||
@@ -53,21 +60,70 @@ jobs:
|
|||||||
- name: Wait for backend server
|
- name: Wait for backend server
|
||||||
run: |
|
run: |
|
||||||
echo "Waiting for backend server to be ready..."
|
echo "Waiting for backend server to be ready..."
|
||||||
|
|
||||||
|
# Check if server process is running
|
||||||
|
if [ -z "$SERVER_PID" ]; then
|
||||||
|
echo "ERROR: Server PID not found in environment"
|
||||||
|
cat backend.log 2>/dev/null || echo "No backend log found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if process is actually running
|
||||||
|
if ! kill -0 $SERVER_PID 2>/dev/null; then
|
||||||
|
echo "ERROR: Server process $SERVER_PID is not running!"
|
||||||
|
echo "=== Backend logs ==="
|
||||||
|
cat backend.log
|
||||||
|
echo ""
|
||||||
|
echo "=== Recent system logs ==="
|
||||||
|
dmesg 2>/dev/null | tail -20 || echo "No dmesg available"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Wait for health endpoint
|
||||||
for i in {1..60}; do
|
for i in {1..60}; do
|
||||||
if curl -s -f http://localhost:3008/api/health > /dev/null 2>&1; then
|
if curl -s -f http://localhost:3008/api/health > /dev/null 2>&1; then
|
||||||
echo "Backend server is ready!"
|
echo "Backend server is ready!"
|
||||||
curl -s http://localhost:3008/api/health | jq . 2>/dev/null || echo "Health check response: $(curl -s http://localhost:3008/api/health 2>/dev/null || echo 'No response')"
|
echo "=== Backend logs ==="
|
||||||
|
cat backend.log
|
||||||
|
echo ""
|
||||||
|
echo "Health check response:"
|
||||||
|
curl -s http://localhost:3008/api/health | jq . 2>/dev/null || echo "Health check: $(curl -s http://localhost:3008/api/health 2>/dev/null || echo 'No response')"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Check if server process is still running
|
||||||
|
if ! kill -0 $SERVER_PID 2>/dev/null; then
|
||||||
|
echo "ERROR: Server process died during wait!"
|
||||||
|
echo "=== Backend logs ==="
|
||||||
|
cat backend.log
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
echo "Waiting... ($i/60)"
|
echo "Waiting... ($i/60)"
|
||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
echo "Backend server failed to start!"
|
|
||||||
echo "Checking server status..."
|
echo "ERROR: Backend server failed to start within 60 seconds!"
|
||||||
|
echo "=== Backend logs ==="
|
||||||
|
cat backend.log
|
||||||
|
echo ""
|
||||||
|
echo "=== Process status ==="
|
||||||
ps aux | grep -E "(node|tsx)" | grep -v grep || echo "No node processes found"
|
ps aux | grep -E "(node|tsx)" | grep -v grep || echo "No node processes found"
|
||||||
|
echo ""
|
||||||
|
echo "=== Port status ==="
|
||||||
netstat -tlnp 2>/dev/null | grep :3008 || echo "Port 3008 not listening"
|
netstat -tlnp 2>/dev/null | grep :3008 || echo "Port 3008 not listening"
|
||||||
echo "Testing health endpoint..."
|
lsof -i :3008 2>/dev/null || echo "lsof not available or port not in use"
|
||||||
|
echo ""
|
||||||
|
echo "=== Health endpoint test ==="
|
||||||
curl -v http://localhost:3008/api/health 2>&1 || echo "Health endpoint failed"
|
curl -v http://localhost:3008/api/health 2>&1 || echo "Health endpoint failed"
|
||||||
|
|
||||||
|
# Kill the server process if it's still hanging
|
||||||
|
if kill -0 $SERVER_PID 2>/dev/null; then
|
||||||
|
echo ""
|
||||||
|
echo "Killing stuck server process..."
|
||||||
|
kill -9 $SERVER_PID 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
|
||||||
exit 1
|
exit 1
|
||||||
|
|
||||||
- name: Run E2E tests
|
- name: Run E2E tests
|
||||||
@@ -81,6 +137,18 @@ jobs:
|
|||||||
# Keep UI-side login/defaults consistent
|
# Keep UI-side login/defaults consistent
|
||||||
AUTOMAKER_API_KEY: test-api-key-for-e2e-tests
|
AUTOMAKER_API_KEY: test-api-key-for-e2e-tests
|
||||||
|
|
||||||
|
- name: Print backend logs on failure
|
||||||
|
if: failure()
|
||||||
|
run: |
|
||||||
|
echo "=== E2E Tests Failed - Backend Logs ==="
|
||||||
|
cat backend.log 2>/dev/null || echo "No backend log found"
|
||||||
|
echo ""
|
||||||
|
echo "=== Process status at failure ==="
|
||||||
|
ps aux | grep -E "(node|tsx)" | grep -v grep || echo "No node processes found"
|
||||||
|
echo ""
|
||||||
|
echo "=== Port status ==="
|
||||||
|
netstat -tlnp 2>/dev/null | grep :3008 || echo "Port 3008 not listening"
|
||||||
|
|
||||||
- name: Upload Playwright report
|
- name: Upload Playwright report
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
if: always()
|
if: always()
|
||||||
@@ -98,3 +166,13 @@ jobs:
|
|||||||
apps/ui/test-results/
|
apps/ui/test-results/
|
||||||
retention-days: 7
|
retention-days: 7
|
||||||
if-no-files-found: ignore
|
if-no-files-found: ignore
|
||||||
|
|
||||||
|
- name: Cleanup - Kill backend server
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
if [ -n "$SERVER_PID" ]; then
|
||||||
|
echo "Cleaning up backend server (PID: $SERVER_PID)..."
|
||||||
|
kill $SERVER_PID 2>/dev/null || true
|
||||||
|
kill -9 $SERVER_PID 2>/dev/null || true
|
||||||
|
echo "Backend server cleanup complete"
|
||||||
|
fi
|
||||||
|
|||||||
2
.github/workflows/format-check.yml
vendored
2
.github/workflows/format-check.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
|||||||
cache-dependency-path: package-lock.json
|
cache-dependency-path: package-lock.json
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: npm install --ignore-scripts
|
run: npm install --ignore-scripts --force
|
||||||
|
|
||||||
- name: Check formatting
|
- name: Check formatting
|
||||||
run: npm run format:check
|
run: npm run format:check
|
||||||
|
|||||||
13
.github/workflows/release.yml
vendored
13
.github/workflows/release.yml
vendored
@@ -35,6 +35,11 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
check-lockfile: 'true'
|
check-lockfile: 'true'
|
||||||
|
|
||||||
|
- name: Install RPM build tools (Linux)
|
||||||
|
if: matrix.os == 'ubuntu-latest'
|
||||||
|
shell: bash
|
||||||
|
run: sudo apt-get update && sudo apt-get install -y rpm
|
||||||
|
|
||||||
- name: Build Electron app (macOS)
|
- name: Build Electron app (macOS)
|
||||||
if: matrix.os == 'macos-latest'
|
if: matrix.os == 'macos-latest'
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -73,7 +78,7 @@ jobs:
|
|||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: linux-builds
|
name: linux-builds
|
||||||
path: apps/ui/release/*.{AppImage,deb}
|
path: apps/ui/release/*.{AppImage,deb,rpm}
|
||||||
retention-days: 30
|
retention-days: 30
|
||||||
|
|
||||||
upload:
|
upload:
|
||||||
@@ -104,8 +109,8 @@ jobs:
|
|||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v2
|
||||||
with:
|
with:
|
||||||
files: |
|
files: |
|
||||||
artifacts/macos-builds/*
|
artifacts/macos-builds/*.{dmg,zip,blockmap}
|
||||||
artifacts/windows-builds/*
|
artifacts/windows-builds/*.{exe,blockmap}
|
||||||
artifacts/linux-builds/*
|
artifacts/linux-builds/*.{AppImage,deb,rpm,blockmap}
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|||||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -73,6 +73,9 @@ blob-report/
|
|||||||
!.env.example
|
!.env.example
|
||||||
!.env.local.example
|
!.env.local.example
|
||||||
|
|
||||||
|
# Codex config (contains API keys)
|
||||||
|
.codex/config.toml
|
||||||
|
|
||||||
# TypeScript
|
# TypeScript
|
||||||
*.tsbuildinfo
|
*.tsbuildinfo
|
||||||
|
|
||||||
@@ -84,4 +87,11 @@ docker-compose.override.yml
|
|||||||
.claude/hans/
|
.claude/hans/
|
||||||
|
|
||||||
pnpm-lock.yaml
|
pnpm-lock.yaml
|
||||||
yarn.lock
|
yarn.lock
|
||||||
|
|
||||||
|
# Fork-specific workflow files (should never be committed)
|
||||||
|
# API key files
|
||||||
|
data/.api-key
|
||||||
|
data/credentials.json
|
||||||
|
data/
|
||||||
|
.codex/
|
||||||
|
|||||||
@@ -31,7 +31,12 @@ fi
|
|||||||
|
|
||||||
# Ensure common system paths are in PATH (for systems without nvm)
|
# Ensure common system paths are in PATH (for systems without nvm)
|
||||||
# This helps find node/npm installed via Homebrew, system packages, etc.
|
# This helps find node/npm installed via Homebrew, system packages, etc.
|
||||||
export PATH="$PATH:/usr/local/bin:/opt/homebrew/bin:/usr/bin"
|
if [ -n "$WINDIR" ]; then
|
||||||
|
export PATH="$PATH:/c/Program Files/nodejs:/c/Program Files (x86)/nodejs"
|
||||||
|
export PATH="$PATH:$APPDATA/npm:$LOCALAPPDATA/Programs/nodejs"
|
||||||
|
else
|
||||||
|
export PATH="$PATH:/usr/local/bin:/opt/homebrew/bin:/usr/bin"
|
||||||
|
fi
|
||||||
|
|
||||||
# Run lint-staged - works with or without nvm
|
# Run lint-staged - works with or without nvm
|
||||||
# Prefer npx, fallback to npm exec, both work with system-installed Node.js
|
# Prefer npx, fallback to npm exec, both work with system-installed Node.js
|
||||||
|
|||||||
@@ -166,7 +166,10 @@ Use `resolveModelString()` from `@automaker/model-resolver` to convert model ali
|
|||||||
## Environment Variables
|
## Environment Variables
|
||||||
|
|
||||||
- `ANTHROPIC_API_KEY` - Anthropic API key (or use Claude Code CLI auth)
|
- `ANTHROPIC_API_KEY` - Anthropic API key (or use Claude Code CLI auth)
|
||||||
|
- `HOST` - Host to bind server to (default: 0.0.0.0)
|
||||||
|
- `HOSTNAME` - Hostname for user-facing URLs (default: localhost)
|
||||||
- `PORT` - Server port (default: 3008)
|
- `PORT` - Server port (default: 3008)
|
||||||
- `DATA_DIR` - Data storage directory (default: ./data)
|
- `DATA_DIR` - Data storage directory (default: ./data)
|
||||||
- `ALLOWED_ROOT_DIRECTORY` - Restrict file operations to specific directory
|
- `ALLOWED_ROOT_DIRECTORY` - Restrict file operations to specific directory
|
||||||
- `AUTOMAKER_MOCK_AGENT=true` - Enable mock agent mode for CI testing
|
- `AUTOMAKER_MOCK_AGENT=true` - Enable mock agent mode for CI testing
|
||||||
|
- `VITE_HOSTNAME` - Hostname for frontend API URLs (default: localhost)
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ For complete details on contribution terms and rights assignment, please review
|
|||||||
- [Development Setup](#development-setup)
|
- [Development Setup](#development-setup)
|
||||||
- [Project Structure](#project-structure)
|
- [Project Structure](#project-structure)
|
||||||
- [Pull Request Process](#pull-request-process)
|
- [Pull Request Process](#pull-request-process)
|
||||||
|
- [Branching Strategy (RC Branches)](#branching-strategy-rc-branches)
|
||||||
- [Branch Naming Convention](#branch-naming-convention)
|
- [Branch Naming Convention](#branch-naming-convention)
|
||||||
- [Commit Message Format](#commit-message-format)
|
- [Commit Message Format](#commit-message-format)
|
||||||
- [Submitting a Pull Request](#submitting-a-pull-request)
|
- [Submitting a Pull Request](#submitting-a-pull-request)
|
||||||
@@ -186,6 +187,59 @@ automaker/
|
|||||||
|
|
||||||
This section covers everything you need to know about contributing changes through pull requests, from creating your branch to getting your code merged.
|
This section covers everything you need to know about contributing changes through pull requests, from creating your branch to getting your code merged.
|
||||||
|
|
||||||
|
### Branching Strategy (RC Branches)
|
||||||
|
|
||||||
|
Automaker uses **Release Candidate (RC) branches** for all development work. Understanding this workflow is essential before contributing.
|
||||||
|
|
||||||
|
**How it works:**
|
||||||
|
|
||||||
|
1. **All development happens on RC branches** - We maintain version-specific RC branches (e.g., `v0.10.0rc`, `v0.11.0rc`) where all active development occurs
|
||||||
|
2. **RC branches are eventually merged to main** - Once an RC branch is stable and ready for release, it gets merged into `main`
|
||||||
|
3. **Main branch is for releases only** - The `main` branch contains only released, stable code
|
||||||
|
|
||||||
|
**Before creating a PR:**
|
||||||
|
|
||||||
|
1. **Check for the latest RC branch** - Before starting work, check the repository for the current RC branch:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git fetch upstream
|
||||||
|
git branch -r | grep rc
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Base your work on the RC branch** - Create your feature branch from the latest RC branch, not from `main`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Find the latest RC branch (e.g., v0.11.0rc)
|
||||||
|
git checkout upstream/v0.11.0rc
|
||||||
|
git checkout -b feature/your-feature-name
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Target the RC branch in your PR** - When opening your pull request, set the base branch to the current RC branch, not `main`
|
||||||
|
|
||||||
|
**Example workflow:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Fetch latest changes
|
||||||
|
git fetch upstream
|
||||||
|
|
||||||
|
# 2. Check for RC branches
|
||||||
|
git branch -r | grep rc
|
||||||
|
# Output: upstream/v0.11.0rc
|
||||||
|
|
||||||
|
# 3. Create your branch from the RC
|
||||||
|
git checkout -b feature/add-dark-mode upstream/v0.11.0rc
|
||||||
|
|
||||||
|
# 4. Make your changes and commit
|
||||||
|
git commit -m "feat: Add dark mode support"
|
||||||
|
|
||||||
|
# 5. Push to your fork
|
||||||
|
git push origin feature/add-dark-mode
|
||||||
|
|
||||||
|
# 6. Open PR targeting the RC branch (v0.11.0rc), NOT main
|
||||||
|
```
|
||||||
|
|
||||||
|
**Important:** PRs opened directly against `main` will be asked to retarget to the current RC branch.
|
||||||
|
|
||||||
### Branch Naming Convention
|
### Branch Naming Convention
|
||||||
|
|
||||||
We use a consistent branch naming pattern to keep our repository organized:
|
We use a consistent branch naming pattern to keep our repository organized:
|
||||||
@@ -275,14 +329,14 @@ Follow these steps to submit your contribution:
|
|||||||
|
|
||||||
#### 1. Prepare Your Changes
|
#### 1. Prepare Your Changes
|
||||||
|
|
||||||
Ensure you've synced with the latest upstream changes:
|
Ensure you've synced with the latest upstream changes from the RC branch:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Fetch latest changes from upstream
|
# Fetch latest changes from upstream
|
||||||
git fetch upstream
|
git fetch upstream
|
||||||
|
|
||||||
# Rebase your branch on main (if needed)
|
# Rebase your branch on the current RC branch (if needed)
|
||||||
git rebase upstream/main
|
git rebase upstream/v0.11.0rc # Use the current RC branch name
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 2. Run Pre-submission Checks
|
#### 2. Run Pre-submission Checks
|
||||||
@@ -314,18 +368,19 @@ git push origin feature/your-feature-name
|
|||||||
|
|
||||||
1. Go to your fork on GitHub
|
1. Go to your fork on GitHub
|
||||||
2. Click "Compare & pull request" for your branch
|
2. Click "Compare & pull request" for your branch
|
||||||
3. Ensure the base repository is `AutoMaker-Org/automaker` and base branch is `main`
|
3. **Important:** Set the base repository to `AutoMaker-Org/automaker` and the base branch to the **current RC branch** (e.g., `v0.11.0rc`), not `main`
|
||||||
4. Fill out the PR template completely
|
4. Fill out the PR template completely
|
||||||
|
|
||||||
#### PR Requirements Checklist
|
#### PR Requirements Checklist
|
||||||
|
|
||||||
Your PR should include:
|
Your PR should include:
|
||||||
|
|
||||||
|
- [ ] **Targets the current RC branch** (not `main`) - see [Branching Strategy](#branching-strategy-rc-branches)
|
||||||
- [ ] **Clear title** describing the change (use conventional commit format)
|
- [ ] **Clear title** describing the change (use conventional commit format)
|
||||||
- [ ] **Description** explaining what changed and why
|
- [ ] **Description** explaining what changed and why
|
||||||
- [ ] **Link to related issue** (if applicable): `Closes #123` or `Fixes #456`
|
- [ ] **Link to related issue** (if applicable): `Closes #123` or `Fixes #456`
|
||||||
- [ ] **All CI checks passing** (format, lint, build, tests)
|
- [ ] **All CI checks passing** (format, lint, build, tests)
|
||||||
- [ ] **No merge conflicts** with main branch
|
- [ ] **No merge conflicts** with the RC branch
|
||||||
- [ ] **Tests included** for new functionality
|
- [ ] **Tests included** for new functionality
|
||||||
- [ ] **Documentation updated** if adding/changing public APIs
|
- [ ] **Documentation updated** if adding/changing public APIs
|
||||||
|
|
||||||
|
|||||||
253
DEVELOPMENT_WORKFLOW.md
Normal file
253
DEVELOPMENT_WORKFLOW.md
Normal file
@@ -0,0 +1,253 @@
|
|||||||
|
# Development Workflow
|
||||||
|
|
||||||
|
This document defines the standard workflow for keeping a branch in sync with the upstream
|
||||||
|
release candidate (RC) and for shipping feature work. It is paired with `check-sync.sh`.
|
||||||
|
|
||||||
|
## Quick Decision Rule
|
||||||
|
|
||||||
|
1. Ask the user to select a workflow:
|
||||||
|
- **Sync Workflow** → you are maintaining the current RC branch with fixes/improvements
|
||||||
|
and will push the same fixes to both origin and upstream RC when you have local
|
||||||
|
commits to publish.
|
||||||
|
- **PR Workflow** → you are starting new feature work on a new branch; upstream updates
|
||||||
|
happen via PR only.
|
||||||
|
2. After the user selects, run:
|
||||||
|
```bash
|
||||||
|
./check-sync.sh
|
||||||
|
```
|
||||||
|
3. Use the status output to confirm alignment. If it reports **diverged**, default to
|
||||||
|
merging `upstream/<TARGET_RC>` into the current branch and preserving local commits.
|
||||||
|
For Sync Workflow, when the working tree is clean and you are behind upstream RC,
|
||||||
|
proceed with the fetch + merge without asking for additional confirmation.
|
||||||
|
|
||||||
|
## Target RC Resolution
|
||||||
|
|
||||||
|
The target RC is resolved dynamically so the workflow stays current as the RC changes.
|
||||||
|
|
||||||
|
Resolution order:
|
||||||
|
|
||||||
|
1. Latest `upstream/v*rc` branch (auto-detected)
|
||||||
|
2. `upstream/HEAD` (fallback)
|
||||||
|
3. If neither is available, you must pass `--rc <branch>`
|
||||||
|
|
||||||
|
Override for a single run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./check-sync.sh --rc <rc-branch>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Pre-Flight Checklist
|
||||||
|
|
||||||
|
1. Confirm a clean working tree:
|
||||||
|
```bash
|
||||||
|
git status
|
||||||
|
```
|
||||||
|
2. Confirm the current branch:
|
||||||
|
```bash
|
||||||
|
git branch --show-current
|
||||||
|
```
|
||||||
|
3. Ensure remotes exist (origin + upstream):
|
||||||
|
```bash
|
||||||
|
git remote -v
|
||||||
|
```
|
||||||
|
|
||||||
|
## Sync Workflow (Upstream Sync)
|
||||||
|
|
||||||
|
Use this flow when you are updating the current branch with fixes or improvements and
|
||||||
|
intend to keep origin and upstream RC in lockstep.
|
||||||
|
|
||||||
|
1. **Check sync status**
|
||||||
|
```bash
|
||||||
|
./check-sync.sh
|
||||||
|
```
|
||||||
|
2. **Update from upstream RC before editing (no pulls)**
|
||||||
|
- **Behind upstream RC** → fetch and merge RC into your branch:
|
||||||
|
```bash
|
||||||
|
git fetch upstream
|
||||||
|
git merge upstream/<TARGET_RC> --no-edit
|
||||||
|
```
|
||||||
|
When the working tree is clean and the user selected Sync Workflow, proceed without
|
||||||
|
an extra confirmation prompt.
|
||||||
|
- **Diverged** → stop and resolve manually.
|
||||||
|
3. **Resolve conflicts if needed**
|
||||||
|
- Handle conflicts intelligently: preserve upstream behavior and your local intent.
|
||||||
|
4. **Make changes and commit (if you are delivering fixes)**
|
||||||
|
```bash
|
||||||
|
git add -A
|
||||||
|
git commit -m "type: description"
|
||||||
|
```
|
||||||
|
5. **Build to verify**
|
||||||
|
```bash
|
||||||
|
npm run build:packages
|
||||||
|
npm run build
|
||||||
|
```
|
||||||
|
6. **Push after a successful merge to keep remotes aligned**
|
||||||
|
- If you only merged upstream RC changes, push **origin only** to sync your fork:
|
||||||
|
```bash
|
||||||
|
git push origin <branch>
|
||||||
|
```
|
||||||
|
- If you have local fixes to publish, push **origin + upstream**:
|
||||||
|
```bash
|
||||||
|
git push origin <branch>
|
||||||
|
git push upstream <branch>:<TARGET_RC>
|
||||||
|
```
|
||||||
|
- Always ask the user which push to perform.
|
||||||
|
- Origin (origin-only sync):
|
||||||
|
```bash
|
||||||
|
git push origin <branch>
|
||||||
|
```
|
||||||
|
- Upstream RC (publish the same fixes when you have local commits):
|
||||||
|
```bash
|
||||||
|
git push upstream <branch>:<TARGET_RC>
|
||||||
|
```
|
||||||
|
7. **Re-check sync**
|
||||||
|
```bash
|
||||||
|
./check-sync.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## PR Workflow (Feature Work)
|
||||||
|
|
||||||
|
Use this flow only for new feature work on a new branch. Do not push to upstream RC.
|
||||||
|
|
||||||
|
1. **Create or switch to a feature branch**
|
||||||
|
```bash
|
||||||
|
git checkout -b <branch>
|
||||||
|
```
|
||||||
|
2. **Make changes and commit**
|
||||||
|
```bash
|
||||||
|
git add -A
|
||||||
|
git commit -m "type: description"
|
||||||
|
```
|
||||||
|
3. **Merge upstream RC before shipping**
|
||||||
|
```bash
|
||||||
|
git merge upstream/<TARGET_RC> --no-edit
|
||||||
|
```
|
||||||
|
4. **Build and/or test**
|
||||||
|
```bash
|
||||||
|
npm run build:packages
|
||||||
|
npm run build
|
||||||
|
```
|
||||||
|
5. **Push to origin**
|
||||||
|
```bash
|
||||||
|
git push -u origin <branch>
|
||||||
|
```
|
||||||
|
6. **Create or update the PR**
|
||||||
|
- Use `gh pr create` or the GitHub UI.
|
||||||
|
7. **Review and follow-up**
|
||||||
|
|
||||||
|
- Apply feedback, commit changes, and push again.
|
||||||
|
- Re-run `./check-sync.sh` if additional upstream sync is needed.
|
||||||
|
|
||||||
|
## Conflict Resolution Checklist
|
||||||
|
|
||||||
|
1. Identify which changes are from upstream vs. local.
|
||||||
|
2. Preserve both behaviors where possible; avoid dropping either side.
|
||||||
|
3. Prefer minimal, safe integrations over refactors.
|
||||||
|
4. Re-run build commands after resolving conflicts.
|
||||||
|
5. Re-run `./check-sync.sh` to confirm status.
|
||||||
|
|
||||||
|
## Build/Test Matrix
|
||||||
|
|
||||||
|
- **Sync Workflow**: `npm run build:packages` and `npm run build`.
|
||||||
|
- **PR Workflow**: `npm run build:packages` and `npm run build` (plus relevant tests).
|
||||||
|
|
||||||
|
## Post-Sync Verification
|
||||||
|
|
||||||
|
1. `git status` should be clean.
|
||||||
|
2. `./check-sync.sh` should show expected alignment.
|
||||||
|
3. Verify recent commits with:
|
||||||
|
```bash
|
||||||
|
git log --oneline -5
|
||||||
|
```
|
||||||
|
|
||||||
|
## check-sync.sh Usage
|
||||||
|
|
||||||
|
- Uses dynamic Target RC resolution (see above).
|
||||||
|
- Override target RC:
|
||||||
|
```bash
|
||||||
|
./check-sync.sh --rc <rc-branch>
|
||||||
|
```
|
||||||
|
- Optional preview limit:
|
||||||
|
```bash
|
||||||
|
./check-sync.sh --preview 10
|
||||||
|
```
|
||||||
|
- The script prints sync status for both origin and upstream and previews recent commits
|
||||||
|
when you are behind.
|
||||||
|
|
||||||
|
## Stop Conditions
|
||||||
|
|
||||||
|
Stop and ask for guidance if any of the following are true:
|
||||||
|
|
||||||
|
- The working tree is dirty and you are about to merge or push.
|
||||||
|
- `./check-sync.sh` reports **diverged** during PR Workflow, or a merge cannot be completed.
|
||||||
|
- The script cannot resolve a target RC and requests `--rc`.
|
||||||
|
- A build fails after sync or conflict resolution.
|
||||||
|
|
||||||
|
## AI Agent Guardrails
|
||||||
|
|
||||||
|
- Always run `./check-sync.sh` before merges or pushes.
|
||||||
|
- Always ask for explicit user approval before any push command.
|
||||||
|
- Do not ask for additional confirmation before a Sync Workflow fetch + merge when the
|
||||||
|
working tree is clean and the user has already selected the Sync Workflow.
|
||||||
|
- Choose Sync vs PR workflow based on intent (RC maintenance vs new feature work), not
|
||||||
|
on the script's workflow hint.
|
||||||
|
- Only use force push when the user explicitly requests a history rewrite.
|
||||||
|
- Ask for explicit approval before dependency installs, branch deletion, or destructive operations.
|
||||||
|
- When resolving merge conflicts, preserve both upstream changes and local intent where possible.
|
||||||
|
- Do not create or switch to new branches unless the user explicitly requests it.
|
||||||
|
|
||||||
|
## AI Agent Decision Guidance
|
||||||
|
|
||||||
|
Agents should provide concrete, task-specific suggestions instead of repeatedly asking
|
||||||
|
open-ended questions. Use the user's stated goal and the `./check-sync.sh` status to
|
||||||
|
propose a default path plus one or two alternatives, and only ask for confirmation when
|
||||||
|
an action requires explicit approval.
|
||||||
|
|
||||||
|
Default behavior:
|
||||||
|
|
||||||
|
- If the intent is RC maintenance, recommend the Sync Workflow and proceed with
|
||||||
|
safe preparation steps (status checks, previews). If the branch is behind upstream RC,
|
||||||
|
fetch and merge without additional confirmation when the working tree is clean, then
|
||||||
|
push to origin to keep the fork aligned. Push upstream only when there are local fixes
|
||||||
|
to publish.
|
||||||
|
- If the intent is new feature work, recommend the PR Workflow and proceed with safe
|
||||||
|
preparation steps (status checks, identifying scope). Ask for approval before merges,
|
||||||
|
pushes, or dependency installs.
|
||||||
|
- If `./check-sync.sh` reports **diverged** during Sync Workflow, merge
|
||||||
|
`upstream/<TARGET_RC>` into the current branch and preserve local commits.
|
||||||
|
- If `./check-sync.sh` reports **diverged** during PR Workflow, stop and ask for guidance
|
||||||
|
with a short explanation of the divergence and the minimal options to resolve it.
|
||||||
|
If the user's intent is RC maintenance, prefer the Sync Workflow regardless of the
|
||||||
|
script hint. When the intent is new feature work, use the PR Workflow and avoid upstream
|
||||||
|
RC pushes.
|
||||||
|
|
||||||
|
Suggestion format (keep it short):
|
||||||
|
|
||||||
|
- **Recommended**: one sentence with the default path and why it fits the task.
|
||||||
|
- **Alternatives**: one or two options with the tradeoff or prerequisite.
|
||||||
|
- **Approval points**: mention any upcoming actions that need explicit approval (exclude sync
|
||||||
|
workflow pushes and merges).
|
||||||
|
|
||||||
|
## Failure Modes and How to Avoid Them
|
||||||
|
|
||||||
|
Sync Workflow:
|
||||||
|
|
||||||
|
- Wrong RC target: verify the auto-detected RC in `./check-sync.sh` output before merging.
|
||||||
|
- Diverged from upstream RC: stop and resolve manually before any merge or push.
|
||||||
|
- Dirty working tree: commit or stash before syncing to avoid accidental merges.
|
||||||
|
- Missing remotes: ensure both `origin` and `upstream` are configured before syncing.
|
||||||
|
- Build breaks after sync: run `npm run build:packages` and `npm run build` before pushing.
|
||||||
|
|
||||||
|
PR Workflow:
|
||||||
|
|
||||||
|
- Branch not synced to current RC: re-run `./check-sync.sh` and merge RC before shipping.
|
||||||
|
- Pushing the wrong branch: confirm `git branch --show-current` before pushing.
|
||||||
|
- Unreviewed changes: always commit and push to origin before opening or updating a PR.
|
||||||
|
- Skipped tests/builds: run the build commands before declaring the PR ready.
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Avoid merging with uncommitted changes; commit or stash first.
|
||||||
|
- Prefer merge over rebase for PR branches; rebases rewrite history and often require a force push,
|
||||||
|
which should only be done with an explicit user request.
|
||||||
|
- Use clear, conventional commit messages and split unrelated changes into separate commits.
|
||||||
25
Dockerfile
25
Dockerfile
@@ -59,9 +59,22 @@ FROM node:22-slim AS server
|
|||||||
ARG GIT_COMMIT_SHA=unknown
|
ARG GIT_COMMIT_SHA=unknown
|
||||||
LABEL automaker.git.commit.sha="${GIT_COMMIT_SHA}"
|
LABEL automaker.git.commit.sha="${GIT_COMMIT_SHA}"
|
||||||
|
|
||||||
|
# Build arguments for user ID matching (allows matching host user for mounted volumes)
|
||||||
|
# Override at build time: docker build --build-arg UID=$(id -u) --build-arg GID=$(id -g) ...
|
||||||
|
ARG UID=1001
|
||||||
|
ARG GID=1001
|
||||||
|
|
||||||
# Install git, curl, bash (for terminal), gosu (for user switching), and GitHub CLI (pinned version, multi-arch)
|
# Install git, curl, bash (for terminal), gosu (for user switching), and GitHub CLI (pinned version, multi-arch)
|
||||||
|
# Also install Playwright/Chromium system dependencies (aligns with playwright install-deps on Debian/Ubuntu)
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
git curl bash gosu ca-certificates openssh-client \
|
git curl bash gosu ca-certificates openssh-client \
|
||||||
|
# Playwright/Chromium dependencies
|
||||||
|
libglib2.0-0 libnss3 libnspr4 libdbus-1-3 libatk1.0-0 libatk-bridge2.0-0 \
|
||||||
|
libcups2 libdrm2 libxkbcommon0 libatspi2.0-0 libxcomposite1 libxdamage1 \
|
||||||
|
libxfixes3 libxrandr2 libgbm1 libasound2 libpango-1.0-0 libcairo2 \
|
||||||
|
libx11-6 libx11-xcb1 libxcb1 libxext6 libxrender1 libxss1 libxtst6 \
|
||||||
|
libxshmfence1 libgtk-3-0 libexpat1 libfontconfig1 fonts-liberation \
|
||||||
|
xdg-utils libpangocairo-1.0-0 libpangoft2-1.0-0 libu2f-udev libvulkan1 \
|
||||||
&& GH_VERSION="2.63.2" \
|
&& GH_VERSION="2.63.2" \
|
||||||
&& ARCH=$(uname -m) \
|
&& ARCH=$(uname -m) \
|
||||||
&& case "$ARCH" in \
|
&& case "$ARCH" in \
|
||||||
@@ -79,8 +92,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
|||||||
RUN npm install -g @anthropic-ai/claude-code
|
RUN npm install -g @anthropic-ai/claude-code
|
||||||
|
|
||||||
# Create non-root user with home directory BEFORE installing Cursor CLI
|
# Create non-root user with home directory BEFORE installing Cursor CLI
|
||||||
RUN groupadd -g 1001 automaker && \
|
# Uses UID/GID build args to match host user for mounted volume permissions
|
||||||
useradd -u 1001 -g automaker -m -d /home/automaker -s /bin/bash automaker && \
|
# Use -o flag to allow non-unique IDs (GID 1000 may already exist as 'node' group)
|
||||||
|
RUN groupadd -o -g ${GID} automaker && \
|
||||||
|
useradd -o -u ${UID} -g automaker -m -d /home/automaker -s /bin/bash automaker && \
|
||||||
mkdir -p /home/automaker/.local/bin && \
|
mkdir -p /home/automaker/.local/bin && \
|
||||||
mkdir -p /home/automaker/.cursor && \
|
mkdir -p /home/automaker/.cursor && \
|
||||||
chown -R automaker:automaker /home/automaker && \
|
chown -R automaker:automaker /home/automaker && \
|
||||||
@@ -95,6 +110,12 @@ RUN curl https://cursor.com/install -fsS | bash && \
|
|||||||
ls -la /home/automaker/.local/bin/ && \
|
ls -la /home/automaker/.local/bin/ && \
|
||||||
echo "=== PATH is: $PATH ===" && \
|
echo "=== PATH is: $PATH ===" && \
|
||||||
(which cursor-agent && cursor-agent --version) || echo "cursor-agent installed (may need auth setup)"
|
(which cursor-agent && cursor-agent --version) || echo "cursor-agent installed (may need auth setup)"
|
||||||
|
|
||||||
|
# Install OpenCode CLI (for multi-provider AI model access)
|
||||||
|
RUN curl -fsSL https://opencode.ai/install | bash && \
|
||||||
|
echo "=== Checking OpenCode CLI installation ===" && \
|
||||||
|
ls -la /home/automaker/.local/bin/ && \
|
||||||
|
(which opencode && opencode --version) || echo "opencode installed (may need auth setup)"
|
||||||
USER root
|
USER root
|
||||||
|
|
||||||
# Add PATH to profile so it's available in all interactive shells (for login shells)
|
# Add PATH to profile so it's available in all interactive shells (for login shells)
|
||||||
|
|||||||
@@ -8,9 +8,17 @@
|
|||||||
FROM node:22-slim
|
FROM node:22-slim
|
||||||
|
|
||||||
# Install build dependencies for native modules (node-pty) and runtime tools
|
# Install build dependencies for native modules (node-pty) and runtime tools
|
||||||
|
# Also install Playwright/Chromium system dependencies (aligns with playwright install-deps on Debian/Ubuntu)
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
python3 make g++ \
|
python3 make g++ \
|
||||||
git curl bash gosu ca-certificates openssh-client \
|
git curl bash gosu ca-certificates openssh-client \
|
||||||
|
# Playwright/Chromium dependencies
|
||||||
|
libglib2.0-0 libnss3 libnspr4 libdbus-1-3 libatk1.0-0 libatk-bridge2.0-0 \
|
||||||
|
libcups2 libdrm2 libxkbcommon0 libatspi2.0-0 libxcomposite1 libxdamage1 \
|
||||||
|
libxfixes3 libxrandr2 libgbm1 libasound2 libpango-1.0-0 libcairo2 \
|
||||||
|
libx11-6 libx11-xcb1 libxcb1 libxext6 libxrender1 libxss1 libxtst6 \
|
||||||
|
libxshmfence1 libgtk-3-0 libexpat1 libfontconfig1 fonts-liberation \
|
||||||
|
xdg-utils libpangocairo-1.0-0 libpangoft2-1.0-0 libu2f-udev libvulkan1 \
|
||||||
&& GH_VERSION="2.63.2" \
|
&& GH_VERSION="2.63.2" \
|
||||||
&& ARCH=$(uname -m) \
|
&& ARCH=$(uname -m) \
|
||||||
&& case "$ARCH" in \
|
&& case "$ARCH" in \
|
||||||
@@ -27,9 +35,15 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
|||||||
# Install Claude CLI globally
|
# Install Claude CLI globally
|
||||||
RUN npm install -g @anthropic-ai/claude-code
|
RUN npm install -g @anthropic-ai/claude-code
|
||||||
|
|
||||||
# Create non-root user
|
# Build arguments for user ID matching (allows matching host user for mounted volumes)
|
||||||
RUN groupadd -g 1001 automaker && \
|
# Override at build time: docker-compose build --build-arg UID=$(id -u) --build-arg GID=$(id -g)
|
||||||
useradd -u 1001 -g automaker -m -d /home/automaker -s /bin/bash automaker && \
|
ARG UID=1001
|
||||||
|
ARG GID=1001
|
||||||
|
|
||||||
|
# Create non-root user with configurable UID/GID
|
||||||
|
# Use -o flag to allow non-unique IDs (GID 1000 may already exist as 'node' group)
|
||||||
|
RUN groupadd -o -g ${GID} automaker && \
|
||||||
|
useradd -o -u ${UID} -g automaker -m -d /home/automaker -s /bin/bash automaker && \
|
||||||
mkdir -p /home/automaker/.local/bin && \
|
mkdir -p /home/automaker/.local/bin && \
|
||||||
mkdir -p /home/automaker/.cursor && \
|
mkdir -p /home/automaker/.cursor && \
|
||||||
chown -R automaker:automaker /home/automaker && \
|
chown -R automaker:automaker /home/automaker && \
|
||||||
|
|||||||
159
README.md
159
README.md
@@ -28,6 +28,7 @@
|
|||||||
- [Quick Start](#quick-start)
|
- [Quick Start](#quick-start)
|
||||||
- [How to Run](#how-to-run)
|
- [How to Run](#how-to-run)
|
||||||
- [Development Mode](#development-mode)
|
- [Development Mode](#development-mode)
|
||||||
|
- [Interactive TUI Launcher](#interactive-tui-launcher-recommended-for-new-users)
|
||||||
- [Building for Production](#building-for-production)
|
- [Building for Production](#building-for-production)
|
||||||
- [Testing](#testing)
|
- [Testing](#testing)
|
||||||
- [Linting](#linting)
|
- [Linting](#linting)
|
||||||
@@ -101,11 +102,9 @@ In the Discord, you can:
|
|||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
|
|
||||||
- **Node.js 18+** (tested with Node.js 22)
|
- **Node.js 22+** (required: >=22.0.0 <23.0.0)
|
||||||
- **npm** (comes with Node.js)
|
- **npm** (comes with Node.js)
|
||||||
- **Authentication** (choose one):
|
- **[Claude Code CLI](https://code.claude.com/docs/en/overview)** - Install and authenticate with your Anthropic subscription. Automaker integrates with your authenticated Claude Code CLI to access Claude models.
|
||||||
- **[Claude Code CLI](https://code.claude.com/docs/en/overview)** (recommended) - Install and authenticate, credentials used automatically
|
|
||||||
- **Anthropic API Key** - Direct API key for Claude Agent SDK ([get one here](https://console.anthropic.com/))
|
|
||||||
|
|
||||||
### Quick Start
|
### Quick Start
|
||||||
|
|
||||||
@@ -117,30 +116,14 @@ cd automaker
|
|||||||
# 2. Install dependencies
|
# 2. Install dependencies
|
||||||
npm install
|
npm install
|
||||||
|
|
||||||
# 3. Build shared packages (can be skipped - npm run dev does it automatically)
|
# 3. Start Automaker
|
||||||
npm run build:packages
|
|
||||||
|
|
||||||
# 4. Start Automaker
|
|
||||||
npm run dev
|
npm run dev
|
||||||
# Choose between:
|
# Choose between:
|
||||||
# 1. Web Application (browser at localhost:3007)
|
# 1. Web Application (browser at localhost:3007)
|
||||||
# 2. Desktop Application (Electron - recommended)
|
# 2. Desktop Application (Electron - recommended)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Authentication Setup:** On first run, Automaker will automatically show a setup wizard where you can configure authentication. You can choose to:
|
**Authentication:** Automaker integrates with your authenticated Claude Code CLI. Make sure you have [installed and authenticated](https://code.claude.com/docs/en/quickstart) the Claude Code CLI before running Automaker. Your CLI credentials will be detected automatically.
|
||||||
|
|
||||||
- Use **Claude Code CLI** (recommended) - Automaker will detect your CLI credentials automatically
|
|
||||||
- Enter an **API key** directly in the wizard
|
|
||||||
|
|
||||||
If you prefer to set up authentication before running (e.g., for headless deployments or CI/CD), you can set it manually:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Option A: Environment variable
|
|
||||||
export ANTHROPIC_API_KEY="sk-ant-..."
|
|
||||||
|
|
||||||
# Option B: Create .env file in project root
|
|
||||||
echo "ANTHROPIC_API_KEY=sk-ant-..." > .env
|
|
||||||
```
|
|
||||||
|
|
||||||
**For Development:** `npm run dev` starts the development server with Vite live reload and hot module replacement for fast refresh and instant updates as you make changes.
|
**For Development:** `npm run dev` starts the development server with Vite live reload and hot module replacement for fast refresh and instant updates as you make changes.
|
||||||
|
|
||||||
@@ -179,6 +162,40 @@ npm run dev:electron:wsl:gpu
|
|||||||
npm run dev:web
|
npm run dev:web
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Interactive TUI Launcher (Recommended for New Users)
|
||||||
|
|
||||||
|
For a user-friendly interactive menu, use the built-in TUI launcher script:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Show interactive menu with all launch options
|
||||||
|
./start-automaker.sh
|
||||||
|
|
||||||
|
# Or launch directly without menu
|
||||||
|
./start-automaker.sh web # Web browser
|
||||||
|
./start-automaker.sh electron # Desktop app
|
||||||
|
./start-automaker.sh electron-debug # Desktop + DevTools
|
||||||
|
|
||||||
|
# Additional options
|
||||||
|
./start-automaker.sh --help # Show all available options
|
||||||
|
./start-automaker.sh --version # Show version information
|
||||||
|
./start-automaker.sh --check-deps # Verify project dependencies
|
||||||
|
./start-automaker.sh --no-colors # Disable colored output
|
||||||
|
./start-automaker.sh --no-history # Don't remember last choice
|
||||||
|
```
|
||||||
|
|
||||||
|
**Features:**
|
||||||
|
|
||||||
|
- 🎨 Beautiful terminal UI with gradient colors and ASCII art
|
||||||
|
- ⌨️ Interactive menu (press 1-3 to select, Q to exit)
|
||||||
|
- 💾 Remembers your last choice
|
||||||
|
- ✅ Pre-flight checks (validates Node.js, npm, dependencies)
|
||||||
|
- 📏 Responsive layout (adapts to terminal size)
|
||||||
|
- ⏱️ 30-second timeout for hands-free selection
|
||||||
|
- 🌐 Cross-shell compatible (bash/zsh)
|
||||||
|
|
||||||
|
**History File:**
|
||||||
|
Your last selected mode is saved in `~/.automaker_launcher_history` for quick re-runs.
|
||||||
|
|
||||||
### Building for Production
|
### Building for Production
|
||||||
|
|
||||||
#### Web Application
|
#### Web Application
|
||||||
@@ -197,11 +214,30 @@ npm run build:electron
|
|||||||
# Platform-specific builds
|
# Platform-specific builds
|
||||||
npm run build:electron:mac # macOS (DMG + ZIP, x64 + arm64)
|
npm run build:electron:mac # macOS (DMG + ZIP, x64 + arm64)
|
||||||
npm run build:electron:win # Windows (NSIS installer, x64)
|
npm run build:electron:win # Windows (NSIS installer, x64)
|
||||||
npm run build:electron:linux # Linux (AppImage + DEB, x64)
|
npm run build:electron:linux # Linux (AppImage + DEB + RPM, x64)
|
||||||
|
|
||||||
# Output directory: apps/ui/release/
|
# Output directory: apps/ui/release/
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Linux Distribution Packages:**
|
||||||
|
|
||||||
|
- **AppImage**: Universal format, works on any Linux distribution
|
||||||
|
- **DEB**: Ubuntu, Debian, Linux Mint, Pop!\_OS
|
||||||
|
- **RPM**: Fedora, RHEL, Rocky Linux, AlmaLinux, openSUSE
|
||||||
|
|
||||||
|
**Installing on Fedora/RHEL:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Download the RPM package
|
||||||
|
wget https://github.com/AutoMaker-Org/automaker/releases/latest/download/Automaker-<version>-x86_64.rpm
|
||||||
|
|
||||||
|
# Install with dnf (Fedora)
|
||||||
|
sudo dnf install ./Automaker-<version>-x86_64.rpm
|
||||||
|
|
||||||
|
# Or with yum (RHEL/CentOS)
|
||||||
|
sudo yum localinstall ./Automaker-<version>-x86_64.rpm
|
||||||
|
```
|
||||||
|
|
||||||
#### Docker Deployment
|
#### Docker Deployment
|
||||||
|
|
||||||
Docker provides the most secure way to run Automaker by isolating it from your host filesystem.
|
Docker provides the most secure way to run Automaker by isolating it from your host filesystem.
|
||||||
@@ -220,16 +256,9 @@ docker-compose logs -f
|
|||||||
docker-compose down
|
docker-compose down
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Configuration
|
##### Authentication
|
||||||
|
|
||||||
Create a `.env` file in the project root if using API key authentication:
|
Automaker integrates with your authenticated Claude Code CLI. To use CLI authentication in Docker, mount your Claude CLI config directory (see [Claude CLI Authentication](#claude-cli-authentication) below).
|
||||||
|
|
||||||
```bash
|
|
||||||
# Optional: Anthropic API key (not needed if using Claude CLI authentication)
|
|
||||||
ANTHROPIC_API_KEY=sk-ant-...
|
|
||||||
```
|
|
||||||
|
|
||||||
**Note:** Most users authenticate via Claude CLI instead of API keys. See [Claude CLI Authentication](#claude-cli-authentication-optional) below.
|
|
||||||
|
|
||||||
##### Working with Projects (Host Directory Access)
|
##### Working with Projects (Host Directory Access)
|
||||||
|
|
||||||
@@ -243,9 +272,9 @@ services:
|
|||||||
- /path/to/your/project:/projects/your-project
|
- /path/to/your/project:/projects/your-project
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Claude CLI Authentication (Optional)
|
##### Claude CLI Authentication
|
||||||
|
|
||||||
To use Claude Code CLI authentication instead of an API key, mount your Claude CLI config directory:
|
Mount your Claude CLI config directory to use your authenticated CLI credentials:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
services:
|
services:
|
||||||
@@ -343,10 +372,6 @@ npm run lint
|
|||||||
|
|
||||||
### Environment Configuration
|
### Environment Configuration
|
||||||
|
|
||||||
#### Authentication (if not using Claude Code CLI)
|
|
||||||
|
|
||||||
- `ANTHROPIC_API_KEY` - Your Anthropic API key for Claude Agent SDK (not needed if using Claude Code CLI)
|
|
||||||
|
|
||||||
#### Optional - Server
|
#### Optional - Server
|
||||||
|
|
||||||
- `PORT` - Server port (default: 3008)
|
- `PORT` - Server port (default: 3008)
|
||||||
@@ -357,49 +382,22 @@ npm run lint
|
|||||||
|
|
||||||
- `AUTOMAKER_API_KEY` - Optional API authentication for the server
|
- `AUTOMAKER_API_KEY` - Optional API authentication for the server
|
||||||
- `ALLOWED_ROOT_DIRECTORY` - Restrict file operations to specific directory
|
- `ALLOWED_ROOT_DIRECTORY` - Restrict file operations to specific directory
|
||||||
- `CORS_ORIGIN` - CORS policy (default: \*)
|
- `CORS_ORIGIN` - CORS allowed origins (comma-separated list; defaults to localhost only)
|
||||||
|
|
||||||
#### Optional - Development
|
#### Optional - Development
|
||||||
|
|
||||||
- `VITE_SKIP_ELECTRON` - Skip Electron in dev mode
|
- `VITE_SKIP_ELECTRON` - Skip Electron in dev mode
|
||||||
- `OPEN_DEVTOOLS` - Auto-open DevTools in Electron
|
- `OPEN_DEVTOOLS` - Auto-open DevTools in Electron
|
||||||
|
- `AUTOMAKER_SKIP_SANDBOX_WARNING` - Skip sandbox warning dialog (useful for dev/CI)
|
||||||
|
|
||||||
### Authentication Setup
|
### Authentication Setup
|
||||||
|
|
||||||
#### Option 1: Claude Code CLI (Recommended)
|
Automaker integrates with your authenticated Claude Code CLI and uses your Anthropic subscription.
|
||||||
|
|
||||||
Install and authenticate the Claude Code CLI following the [official quickstart guide](https://code.claude.com/docs/en/quickstart).
|
Install and authenticate the Claude Code CLI following the [official quickstart guide](https://code.claude.com/docs/en/quickstart).
|
||||||
|
|
||||||
Once authenticated, Automaker will automatically detect and use your CLI credentials. No additional configuration needed!
|
Once authenticated, Automaker will automatically detect and use your CLI credentials. No additional configuration needed!
|
||||||
|
|
||||||
#### Option 2: Direct API Key
|
|
||||||
|
|
||||||
If you prefer not to use the CLI, you can provide an Anthropic API key directly using one of these methods:
|
|
||||||
|
|
||||||
##### 2a. Shell Configuration
|
|
||||||
|
|
||||||
Add to your `~/.bashrc` or `~/.zshrc`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export ANTHROPIC_API_KEY="sk-ant-..."
|
|
||||||
```
|
|
||||||
|
|
||||||
Then restart your terminal or run `source ~/.bashrc` (or `source ~/.zshrc`).
|
|
||||||
|
|
||||||
##### 2b. .env File
|
|
||||||
|
|
||||||
Create a `.env` file in the project root (gitignored):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ANTHROPIC_API_KEY=sk-ant-...
|
|
||||||
PORT=3008
|
|
||||||
DATA_DIR=./data
|
|
||||||
```
|
|
||||||
|
|
||||||
##### 2c. In-App Storage
|
|
||||||
|
|
||||||
The application can store your API key securely in the settings UI. The key is persisted in the `DATA_DIR` directory.
|
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
### Core Workflow
|
### Core Workflow
|
||||||
@@ -508,20 +506,24 @@ Automaker provides several specialized views accessible via the sidebar or keybo
|
|||||||
| **Agent** | `A` | Interactive chat sessions with AI agents for exploratory work and questions |
|
| **Agent** | `A` | Interactive chat sessions with AI agents for exploratory work and questions |
|
||||||
| **Spec** | `D` | Project specification editor with AI-powered generation and feature suggestions |
|
| **Spec** | `D` | Project specification editor with AI-powered generation and feature suggestions |
|
||||||
| **Context** | `C` | Manage context files (markdown, images) that AI agents automatically reference |
|
| **Context** | `C` | Manage context files (markdown, images) that AI agents automatically reference |
|
||||||
| **Profiles** | `M` | Create and manage AI agent profiles with custom prompts and configurations |
|
|
||||||
| **Settings** | `S` | Configure themes, shortcuts, defaults, authentication, and more |
|
| **Settings** | `S` | Configure themes, shortcuts, defaults, authentication, and more |
|
||||||
| **Terminal** | `T` | Integrated terminal with tabs, splits, and persistent sessions |
|
| **Terminal** | `T` | Integrated terminal with tabs, splits, and persistent sessions |
|
||||||
| **GitHub Issues** | - | Import and validate GitHub issues, convert to tasks |
|
| **Graph** | `H` | Visualize feature dependencies with interactive graph visualization |
|
||||||
|
| **Ideation** | `I` | Brainstorm and generate ideas with AI assistance |
|
||||||
|
| **Memory** | `Y` | View and manage agent memory and conversation history |
|
||||||
|
| **GitHub Issues** | `G` | Import and validate GitHub issues, convert to tasks |
|
||||||
|
| **GitHub PRs** | `R` | View and manage GitHub pull requests |
|
||||||
| **Running Agents** | - | View all active agents across projects with status and progress |
|
| **Running Agents** | - | View all active agents across projects with status and progress |
|
||||||
|
|
||||||
### Keyboard Navigation
|
### Keyboard Navigation
|
||||||
|
|
||||||
All shortcuts are customizable in Settings. Default shortcuts:
|
All shortcuts are customizable in Settings. Default shortcuts:
|
||||||
|
|
||||||
- **Navigation:** `K` (Board), `A` (Agent), `D` (Spec), `C` (Context), `S` (Settings), `M` (Profiles), `T` (Terminal)
|
- **Navigation:** `K` (Board), `A` (Agent), `D` (Spec), `C` (Context), `S` (Settings), `T` (Terminal), `H` (Graph), `I` (Ideation), `Y` (Memory), `G` (GitHub Issues), `R` (GitHub PRs)
|
||||||
- **UI:** `` ` `` (Toggle sidebar)
|
- **UI:** `` ` `` (Toggle sidebar)
|
||||||
- **Actions:** `N` (New item in current view), `G` (Start next features), `O` (Open project), `P` (Project picker)
|
- **Actions:** `N` (New item in current view), `O` (Open project), `P` (Project picker)
|
||||||
- **Projects:** `Q`/`E` (Cycle previous/next project)
|
- **Projects:** `Q`/`E` (Cycle previous/next project)
|
||||||
|
- **Terminal:** `Alt+D` (Split right), `Alt+S` (Split down), `Alt+W` (Close), `Alt+T` (New tab)
|
||||||
|
|
||||||
## Architecture
|
## Architecture
|
||||||
|
|
||||||
@@ -586,10 +588,16 @@ Stored in `{projectPath}/.automaker/`:
|
|||||||
│ ├── agent-output.md # AI agent output log
|
│ ├── agent-output.md # AI agent output log
|
||||||
│ └── images/ # Attached images
|
│ └── images/ # Attached images
|
||||||
├── context/ # Context files for AI agents
|
├── context/ # Context files for AI agents
|
||||||
|
├── worktrees/ # Git worktree metadata
|
||||||
|
├── validations/ # GitHub issue validation results
|
||||||
|
├── ideation/ # Brainstorming and analysis data
|
||||||
|
│ └── analysis.json # Project structure analysis
|
||||||
|
├── board/ # Board-related data
|
||||||
|
├── images/ # Project-level images
|
||||||
├── settings.json # Project-specific settings
|
├── settings.json # Project-specific settings
|
||||||
├── spec.md # Project specification
|
├── app_spec.txt # Project specification (XML format)
|
||||||
├── analysis.json # Project structure analysis
|
├── active-branches.json # Active git branches tracking
|
||||||
└── feature-suggestions.json # AI-generated suggestions
|
└── execution-state.json # Auto-mode execution state
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Global Data
|
#### Global Data
|
||||||
@@ -627,7 +635,6 @@ data/
|
|||||||
|
|
||||||
- [Contributing Guide](./CONTRIBUTING.md) - How to contribute to Automaker
|
- [Contributing Guide](./CONTRIBUTING.md) - How to contribute to Automaker
|
||||||
- [Project Documentation](./docs/) - Architecture guides, patterns, and developer docs
|
- [Project Documentation](./docs/) - Architecture guides, patterns, and developer docs
|
||||||
- [Docker Isolation Guide](./docs/docker-isolation.md) - Security-focused Docker deployment
|
|
||||||
- [Shared Packages Guide](./docs/llm-shared-packages.md) - Using monorepo packages
|
- [Shared Packages Guide](./docs/llm-shared-packages.md) - Using monorepo packages
|
||||||
|
|
||||||
### Community
|
### Community
|
||||||
|
|||||||
@@ -44,6 +44,11 @@ CORS_ORIGIN=http://localhost:3007
|
|||||||
# OPTIONAL - Server
|
# OPTIONAL - Server
|
||||||
# ============================================
|
# ============================================
|
||||||
|
|
||||||
|
# Host to bind the server to (default: 0.0.0.0)
|
||||||
|
# Use 0.0.0.0 to listen on all interfaces (recommended for Docker/remote access)
|
||||||
|
# Use 127.0.0.1 or localhost to restrict to local connections only
|
||||||
|
HOST=0.0.0.0
|
||||||
|
|
||||||
# Port to run the server on
|
# Port to run the server on
|
||||||
PORT=3008
|
PORT=3008
|
||||||
|
|
||||||
@@ -63,6 +68,14 @@ TERMINAL_PASSWORD=
|
|||||||
|
|
||||||
ENABLE_REQUEST_LOGGING=false
|
ENABLE_REQUEST_LOGGING=false
|
||||||
|
|
||||||
|
# ============================================
|
||||||
|
# OPTIONAL - UI Behavior
|
||||||
|
# ============================================
|
||||||
|
|
||||||
|
# Skip the sandbox warning dialog on startup (default: false)
|
||||||
|
# Set to "true" to disable the warning entirely (useful for dev/CI environments)
|
||||||
|
AUTOMAKER_SKIP_SANDBOX_WARNING=false
|
||||||
|
|
||||||
# ============================================
|
# ============================================
|
||||||
# OPTIONAL - Debugging
|
# OPTIONAL - Debugging
|
||||||
# ============================================
|
# ============================================
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@automaker/server",
|
"name": "@automaker/server",
|
||||||
"version": "0.9.0",
|
"version": "0.12.0",
|
||||||
"description": "Backend server for Automaker - provides API for both web and Electron modes",
|
"description": "Backend server for Automaker - provides API for both web and Electron modes",
|
||||||
"author": "AutoMaker Team",
|
"author": "AutoMaker Team",
|
||||||
"license": "SEE LICENSE IN LICENSE",
|
"license": "SEE LICENSE IN LICENSE",
|
||||||
@@ -32,7 +32,7 @@
|
|||||||
"@automaker/prompts": "1.0.0",
|
"@automaker/prompts": "1.0.0",
|
||||||
"@automaker/types": "1.0.0",
|
"@automaker/types": "1.0.0",
|
||||||
"@automaker/utils": "1.0.0",
|
"@automaker/utils": "1.0.0",
|
||||||
"@modelcontextprotocol/sdk": "1.25.1",
|
"@modelcontextprotocol/sdk": "1.25.2",
|
||||||
"@openai/codex-sdk": "^0.77.0",
|
"@openai/codex-sdk": "^0.77.0",
|
||||||
"cookie-parser": "1.4.7",
|
"cookie-parser": "1.4.7",
|
||||||
"cors": "2.8.5",
|
"cors": "2.8.5",
|
||||||
|
|||||||
@@ -17,9 +17,19 @@ import dotenv from 'dotenv';
|
|||||||
|
|
||||||
import { createEventEmitter, type EventEmitter } from './lib/events.js';
|
import { createEventEmitter, type EventEmitter } from './lib/events.js';
|
||||||
import { initAllowedPaths } from '@automaker/platform';
|
import { initAllowedPaths } from '@automaker/platform';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger, setLogLevel, LogLevel } from '@automaker/utils';
|
||||||
|
|
||||||
const logger = createLogger('Server');
|
const logger = createLogger('Server');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Map server log level string to LogLevel enum
|
||||||
|
*/
|
||||||
|
const LOG_LEVEL_MAP: Record<string, LogLevel> = {
|
||||||
|
error: LogLevel.ERROR,
|
||||||
|
warn: LogLevel.WARN,
|
||||||
|
info: LogLevel.INFO,
|
||||||
|
debug: LogLevel.DEBUG,
|
||||||
|
};
|
||||||
import { authMiddleware, validateWsConnectionToken, checkRawAuthentication } from './lib/auth.js';
|
import { authMiddleware, validateWsConnectionToken, checkRawAuthentication } from './lib/auth.js';
|
||||||
import { requireJsonContentType } from './middleware/require-json-content-type.js';
|
import { requireJsonContentType } from './middleware/require-json-content-type.js';
|
||||||
import { createAuthRoutes } from './routes/auth/index.js';
|
import { createAuthRoutes } from './routes/auth/index.js';
|
||||||
@@ -55,6 +65,8 @@ import { createClaudeRoutes } from './routes/claude/index.js';
|
|||||||
import { ClaudeUsageService } from './services/claude-usage-service.js';
|
import { ClaudeUsageService } from './services/claude-usage-service.js';
|
||||||
import { createCodexRoutes } from './routes/codex/index.js';
|
import { createCodexRoutes } from './routes/codex/index.js';
|
||||||
import { CodexUsageService } from './services/codex-usage-service.js';
|
import { CodexUsageService } from './services/codex-usage-service.js';
|
||||||
|
import { CodexAppServerService } from './services/codex-app-server-service.js';
|
||||||
|
import { CodexModelCacheService } from './services/codex-model-cache-service.js';
|
||||||
import { createGitHubRoutes } from './routes/github/index.js';
|
import { createGitHubRoutes } from './routes/github/index.js';
|
||||||
import { createContextRoutes } from './routes/context/index.js';
|
import { createContextRoutes } from './routes/context/index.js';
|
||||||
import { createBacklogPlanRoutes } from './routes/backlog-plan/index.js';
|
import { createBacklogPlanRoutes } from './routes/backlog-plan/index.js';
|
||||||
@@ -65,13 +77,40 @@ import { createPipelineRoutes } from './routes/pipeline/index.js';
|
|||||||
import { pipelineService } from './services/pipeline-service.js';
|
import { pipelineService } from './services/pipeline-service.js';
|
||||||
import { createIdeationRoutes } from './routes/ideation/index.js';
|
import { createIdeationRoutes } from './routes/ideation/index.js';
|
||||||
import { IdeationService } from './services/ideation-service.js';
|
import { IdeationService } from './services/ideation-service.js';
|
||||||
|
import { getDevServerService } from './services/dev-server-service.js';
|
||||||
|
import { eventHookService } from './services/event-hook-service.js';
|
||||||
|
import { createNotificationsRoutes } from './routes/notifications/index.js';
|
||||||
|
import { getNotificationService } from './services/notification-service.js';
|
||||||
|
import { createEventHistoryRoutes } from './routes/event-history/index.js';
|
||||||
|
import { getEventHistoryService } from './services/event-history-service.js';
|
||||||
|
import { createCodeReviewRoutes } from './routes/code-review/index.js';
|
||||||
|
import { CodeReviewService } from './services/code-review-service.js';
|
||||||
|
|
||||||
// Load environment variables
|
// Load environment variables
|
||||||
dotenv.config();
|
dotenv.config();
|
||||||
|
|
||||||
const PORT = parseInt(process.env.PORT || '3008', 10);
|
const PORT = parseInt(process.env.PORT || '3008', 10);
|
||||||
|
const HOST = process.env.HOST || '0.0.0.0';
|
||||||
|
const HOSTNAME = process.env.HOSTNAME || 'localhost';
|
||||||
const DATA_DIR = process.env.DATA_DIR || './data';
|
const DATA_DIR = process.env.DATA_DIR || './data';
|
||||||
const ENABLE_REQUEST_LOGGING = process.env.ENABLE_REQUEST_LOGGING !== 'false'; // Default to true
|
const ENABLE_REQUEST_LOGGING_DEFAULT = process.env.ENABLE_REQUEST_LOGGING !== 'false'; // Default to true
|
||||||
|
|
||||||
|
// Runtime-configurable request logging flag (can be changed via settings)
|
||||||
|
let requestLoggingEnabled = ENABLE_REQUEST_LOGGING_DEFAULT;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enable or disable HTTP request logging at runtime
|
||||||
|
*/
|
||||||
|
export function setRequestLoggingEnabled(enabled: boolean): void {
|
||||||
|
requestLoggingEnabled = enabled;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current request logging state
|
||||||
|
*/
|
||||||
|
export function isRequestLoggingEnabled(): boolean {
|
||||||
|
return requestLoggingEnabled;
|
||||||
|
}
|
||||||
|
|
||||||
// Check for required environment variables
|
// Check for required environment variables
|
||||||
const hasAnthropicKey = !!process.env.ANTHROPIC_API_KEY;
|
const hasAnthropicKey = !!process.env.ANTHROPIC_API_KEY;
|
||||||
@@ -100,22 +139,21 @@ initAllowedPaths();
|
|||||||
const app = express();
|
const app = express();
|
||||||
|
|
||||||
// Middleware
|
// Middleware
|
||||||
// Custom colored logger showing only endpoint and status code (configurable via ENABLE_REQUEST_LOGGING env var)
|
// Custom colored logger showing only endpoint and status code (dynamically configurable)
|
||||||
if (ENABLE_REQUEST_LOGGING) {
|
morgan.token('status-colored', (_req, res) => {
|
||||||
morgan.token('status-colored', (_req, res) => {
|
const status = res.statusCode;
|
||||||
const status = res.statusCode;
|
if (status >= 500) return `\x1b[31m${status}\x1b[0m`; // Red for server errors
|
||||||
if (status >= 500) return `\x1b[31m${status}\x1b[0m`; // Red for server errors
|
if (status >= 400) return `\x1b[33m${status}\x1b[0m`; // Yellow for client errors
|
||||||
if (status >= 400) return `\x1b[33m${status}\x1b[0m`; // Yellow for client errors
|
if (status >= 300) return `\x1b[36m${status}\x1b[0m`; // Cyan for redirects
|
||||||
if (status >= 300) return `\x1b[36m${status}\x1b[0m`; // Cyan for redirects
|
return `\x1b[32m${status}\x1b[0m`; // Green for success
|
||||||
return `\x1b[32m${status}\x1b[0m`; // Green for success
|
});
|
||||||
});
|
|
||||||
|
|
||||||
app.use(
|
app.use(
|
||||||
morgan(':method :url :status-colored', {
|
morgan(':method :url :status-colored', {
|
||||||
skip: (req) => req.url === '/api/health', // Skip health check logs
|
// Skip when request logging is disabled or for health check endpoints
|
||||||
})
|
skip: (req) => !requestLoggingEnabled || req.url === '/api/health',
|
||||||
);
|
})
|
||||||
}
|
);
|
||||||
// CORS configuration
|
// CORS configuration
|
||||||
// When using credentials (cookies), origin cannot be '*'
|
// When using credentials (cookies), origin cannot be '*'
|
||||||
// We dynamically allow the requesting origin for local development
|
// We dynamically allow the requesting origin for local development
|
||||||
@@ -168,14 +206,51 @@ const agentService = new AgentService(DATA_DIR, events, settingsService);
|
|||||||
const featureLoader = new FeatureLoader();
|
const featureLoader = new FeatureLoader();
|
||||||
const autoModeService = new AutoModeService(events, settingsService);
|
const autoModeService = new AutoModeService(events, settingsService);
|
||||||
const claudeUsageService = new ClaudeUsageService();
|
const claudeUsageService = new ClaudeUsageService();
|
||||||
const codexUsageService = new CodexUsageService();
|
const codexAppServerService = new CodexAppServerService();
|
||||||
|
const codexModelCacheService = new CodexModelCacheService(DATA_DIR, codexAppServerService);
|
||||||
|
const codexUsageService = new CodexUsageService(codexAppServerService);
|
||||||
const mcpTestService = new MCPTestService(settingsService);
|
const mcpTestService = new MCPTestService(settingsService);
|
||||||
const ideationService = new IdeationService(events, settingsService, featureLoader);
|
const ideationService = new IdeationService(events, settingsService, featureLoader);
|
||||||
|
const codeReviewService = new CodeReviewService(events, settingsService);
|
||||||
|
|
||||||
|
// Initialize DevServerService with event emitter for real-time log streaming
|
||||||
|
const devServerService = getDevServerService();
|
||||||
|
devServerService.setEventEmitter(events);
|
||||||
|
|
||||||
|
// Initialize Notification Service with event emitter for real-time updates
|
||||||
|
const notificationService = getNotificationService();
|
||||||
|
notificationService.setEventEmitter(events);
|
||||||
|
|
||||||
|
// Initialize Event History Service
|
||||||
|
const eventHistoryService = getEventHistoryService();
|
||||||
|
|
||||||
|
// Initialize Event Hook Service for custom event triggers (with history storage)
|
||||||
|
eventHookService.initialize(events, settingsService, eventHistoryService);
|
||||||
|
|
||||||
// Initialize services
|
// Initialize services
|
||||||
(async () => {
|
(async () => {
|
||||||
|
// Apply logging settings from saved settings
|
||||||
|
try {
|
||||||
|
const settings = await settingsService.getGlobalSettings();
|
||||||
|
if (settings.serverLogLevel && LOG_LEVEL_MAP[settings.serverLogLevel] !== undefined) {
|
||||||
|
setLogLevel(LOG_LEVEL_MAP[settings.serverLogLevel]);
|
||||||
|
logger.info(`Server log level set to: ${settings.serverLogLevel}`);
|
||||||
|
}
|
||||||
|
// Apply request logging setting (default true if not set)
|
||||||
|
const enableRequestLog = settings.enableRequestLogging ?? true;
|
||||||
|
setRequestLoggingEnabled(enableRequestLog);
|
||||||
|
logger.info(`HTTP request logging: ${enableRequestLog ? 'enabled' : 'disabled'}`);
|
||||||
|
} catch (err) {
|
||||||
|
logger.warn('Failed to load logging settings, using defaults');
|
||||||
|
}
|
||||||
|
|
||||||
await agentService.initialize();
|
await agentService.initialize();
|
||||||
logger.info('Agent service initialized');
|
logger.info('Agent service initialized');
|
||||||
|
|
||||||
|
// Bootstrap Codex model cache in background (don't block server startup)
|
||||||
|
void codexModelCacheService.getModels().catch((err) => {
|
||||||
|
logger.error('Failed to bootstrap Codex model cache:', err);
|
||||||
|
});
|
||||||
})();
|
})();
|
||||||
|
|
||||||
// Run stale validation cleanup every hour to prevent memory leaks from crashed validations
|
// Run stale validation cleanup every hour to prevent memory leaks from crashed validations
|
||||||
@@ -205,10 +280,10 @@ app.get('/api/health/detailed', createDetailedHandler());
|
|||||||
app.use('/api/fs', createFsRoutes(events));
|
app.use('/api/fs', createFsRoutes(events));
|
||||||
app.use('/api/agent', createAgentRoutes(agentService, events));
|
app.use('/api/agent', createAgentRoutes(agentService, events));
|
||||||
app.use('/api/sessions', createSessionsRoutes(agentService));
|
app.use('/api/sessions', createSessionsRoutes(agentService));
|
||||||
app.use('/api/features', createFeaturesRoutes(featureLoader));
|
app.use('/api/features', createFeaturesRoutes(featureLoader, settingsService, events));
|
||||||
app.use('/api/auto-mode', createAutoModeRoutes(autoModeService));
|
app.use('/api/auto-mode', createAutoModeRoutes(autoModeService));
|
||||||
app.use('/api/enhance-prompt', createEnhancePromptRoutes(settingsService));
|
app.use('/api/enhance-prompt', createEnhancePromptRoutes(settingsService));
|
||||||
app.use('/api/worktree', createWorktreeRoutes());
|
app.use('/api/worktree', createWorktreeRoutes(events, settingsService));
|
||||||
app.use('/api/git', createGitRoutes());
|
app.use('/api/git', createGitRoutes());
|
||||||
app.use('/api/suggestions', createSuggestionsRoutes(events, settingsService));
|
app.use('/api/suggestions', createSuggestionsRoutes(events, settingsService));
|
||||||
app.use('/api/models', createModelsRoutes());
|
app.use('/api/models', createModelsRoutes());
|
||||||
@@ -219,13 +294,16 @@ app.use('/api/templates', createTemplatesRoutes());
|
|||||||
app.use('/api/terminal', createTerminalRoutes());
|
app.use('/api/terminal', createTerminalRoutes());
|
||||||
app.use('/api/settings', createSettingsRoutes(settingsService));
|
app.use('/api/settings', createSettingsRoutes(settingsService));
|
||||||
app.use('/api/claude', createClaudeRoutes(claudeUsageService));
|
app.use('/api/claude', createClaudeRoutes(claudeUsageService));
|
||||||
app.use('/api/codex', createCodexRoutes(codexUsageService));
|
app.use('/api/codex', createCodexRoutes(codexUsageService, codexModelCacheService));
|
||||||
app.use('/api/github', createGitHubRoutes(events, settingsService));
|
app.use('/api/github', createGitHubRoutes(events, settingsService));
|
||||||
app.use('/api/context', createContextRoutes(settingsService));
|
app.use('/api/context', createContextRoutes(settingsService));
|
||||||
app.use('/api/backlog-plan', createBacklogPlanRoutes(events, settingsService));
|
app.use('/api/backlog-plan', createBacklogPlanRoutes(events, settingsService));
|
||||||
app.use('/api/mcp', createMCPRoutes(mcpTestService));
|
app.use('/api/mcp', createMCPRoutes(mcpTestService));
|
||||||
app.use('/api/pipeline', createPipelineRoutes(pipelineService));
|
app.use('/api/pipeline', createPipelineRoutes(pipelineService));
|
||||||
app.use('/api/ideation', createIdeationRoutes(events, ideationService, featureLoader));
|
app.use('/api/ideation', createIdeationRoutes(events, ideationService, featureLoader));
|
||||||
|
app.use('/api/notifications', createNotificationsRoutes(notificationService));
|
||||||
|
app.use('/api/event-history', createEventHistoryRoutes(eventHistoryService, settingsService));
|
||||||
|
app.use('/api/code-review', createCodeReviewRoutes(codeReviewService));
|
||||||
|
|
||||||
// Create HTTP server
|
// Create HTTP server
|
||||||
const server = createServer(app);
|
const server = createServer(app);
|
||||||
@@ -537,8 +615,8 @@ terminalWss.on('connection', (ws: WebSocket, req: import('http').IncomingMessage
|
|||||||
});
|
});
|
||||||
|
|
||||||
// Start server with error handling for port conflicts
|
// Start server with error handling for port conflicts
|
||||||
const startServer = (port: number) => {
|
const startServer = (port: number, host: string) => {
|
||||||
server.listen(port, () => {
|
server.listen(port, host, () => {
|
||||||
const terminalStatus = isTerminalEnabled()
|
const terminalStatus = isTerminalEnabled()
|
||||||
? isTerminalPasswordRequired()
|
? isTerminalPasswordRequired()
|
||||||
? 'enabled (password protected)'
|
? 'enabled (password protected)'
|
||||||
@@ -549,10 +627,11 @@ const startServer = (port: number) => {
|
|||||||
╔═══════════════════════════════════════════════════════╗
|
╔═══════════════════════════════════════════════════════╗
|
||||||
║ Automaker Backend Server ║
|
║ Automaker Backend Server ║
|
||||||
╠═══════════════════════════════════════════════════════╣
|
╠═══════════════════════════════════════════════════════╣
|
||||||
║ HTTP API: http://localhost:${portStr} ║
|
║ Listening: ${host}:${port}${' '.repeat(Math.max(0, 34 - host.length - port.toString().length))}║
|
||||||
║ WebSocket: ws://localhost:${portStr}/api/events ║
|
║ HTTP API: http://${HOSTNAME}:${portStr} ║
|
||||||
║ Terminal: ws://localhost:${portStr}/api/terminal/ws ║
|
║ WebSocket: ws://${HOSTNAME}:${portStr}/api/events ║
|
||||||
║ Health: http://localhost:${portStr}/api/health ║
|
║ Terminal: ws://${HOSTNAME}:${portStr}/api/terminal/ws ║
|
||||||
|
║ Health: http://${HOSTNAME}:${portStr}/api/health ║
|
||||||
║ Terminal: ${terminalStatus.padEnd(37)}║
|
║ Terminal: ${terminalStatus.padEnd(37)}║
|
||||||
╚═══════════════════════════════════════════════════════╝
|
╚═══════════════════════════════════════════════════════╝
|
||||||
`);
|
`);
|
||||||
@@ -586,7 +665,27 @@ const startServer = (port: number) => {
|
|||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
startServer(PORT);
|
startServer(PORT, HOST);
|
||||||
|
|
||||||
|
// Global error handlers to prevent crashes from uncaught errors
|
||||||
|
process.on('unhandledRejection', (reason: unknown, _promise: Promise<unknown>) => {
|
||||||
|
logger.error('Unhandled Promise Rejection:', {
|
||||||
|
reason: reason instanceof Error ? reason.message : String(reason),
|
||||||
|
stack: reason instanceof Error ? reason.stack : undefined,
|
||||||
|
});
|
||||||
|
// Don't exit - log the error and continue running
|
||||||
|
// This prevents the server from crashing due to unhandled rejections
|
||||||
|
});
|
||||||
|
|
||||||
|
process.on('uncaughtException', (error: Error) => {
|
||||||
|
logger.error('Uncaught Exception:', {
|
||||||
|
message: error.message,
|
||||||
|
stack: error.stack,
|
||||||
|
});
|
||||||
|
// Exit on uncaught exceptions to prevent undefined behavior
|
||||||
|
// The process is in an unknown state after an uncaught exception
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
|
|
||||||
// Graceful shutdown
|
// Graceful shutdown
|
||||||
process.on('SIGTERM', () => {
|
process.on('SIGTERM', () => {
|
||||||
|
|||||||
@@ -11,8 +11,12 @@ export { specOutputSchema } from '@automaker/types';
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Escape special XML characters
|
* Escape special XML characters
|
||||||
|
* Handles undefined/null values by converting them to empty strings
|
||||||
*/
|
*/
|
||||||
function escapeXml(str: string): string {
|
export function escapeXml(str: string | undefined | null): string {
|
||||||
|
if (str == null) {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
return str
|
return str
|
||||||
.replace(/&/g, '&')
|
.replace(/&/g, '&')
|
||||||
.replace(/</g, '<')
|
.replace(/</g, '<')
|
||||||
|
|||||||
@@ -142,6 +142,8 @@ if (process.env.AUTOMAKER_HIDE_API_KEY !== 'true') {
|
|||||||
║ ${API_KEY}
|
║ ${API_KEY}
|
||||||
║ ║
|
║ ║
|
||||||
║ In Electron mode, authentication is handled automatically. ║
|
║ In Electron mode, authentication is handled automatically. ║
|
||||||
|
║ ║
|
||||||
|
║ 💡 Tip: Set AUTOMAKER_API_KEY env var to use a fixed key for dev ║
|
||||||
╚═══════════════════════════════════════════════════════════════════════╝
|
╚═══════════════════════════════════════════════════════════════════════╝
|
||||||
`);
|
`);
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -40,6 +40,7 @@ export interface UnifiedCliDetection {
|
|||||||
claude?: CliDetectionResult;
|
claude?: CliDetectionResult;
|
||||||
codex?: CliDetectionResult;
|
codex?: CliDetectionResult;
|
||||||
cursor?: CliDetectionResult;
|
cursor?: CliDetectionResult;
|
||||||
|
coderabbit?: CliDetectionResult;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -76,6 +77,16 @@ const CLI_CONFIGS = {
|
|||||||
win32: 'iwr https://cursor.sh/install.ps1 -UseBasicParsing | iex',
|
win32: 'iwr https://cursor.sh/install.ps1 -UseBasicParsing | iex',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
coderabbit: {
|
||||||
|
name: 'CodeRabbit CLI',
|
||||||
|
commands: ['coderabbit', 'cr'],
|
||||||
|
versionArgs: ['--version'],
|
||||||
|
installCommands: {
|
||||||
|
darwin: 'npm install -g coderabbit',
|
||||||
|
linux: 'npm install -g coderabbit',
|
||||||
|
win32: 'npm install -g coderabbit',
|
||||||
|
},
|
||||||
|
},
|
||||||
} as const;
|
} as const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -230,6 +241,8 @@ export async function checkCliAuth(
|
|||||||
return await checkCodexAuth(command);
|
return await checkCodexAuth(command);
|
||||||
case 'cursor':
|
case 'cursor':
|
||||||
return await checkCursorAuth(command);
|
return await checkCursorAuth(command);
|
||||||
|
case 'coderabbit':
|
||||||
|
return await checkCodeRabbitAuth(command);
|
||||||
default:
|
default:
|
||||||
return 'none';
|
return 'none';
|
||||||
}
|
}
|
||||||
@@ -355,6 +368,64 @@ async function checkCursorAuth(command: string): Promise<'cli' | 'api_key' | 'no
|
|||||||
return 'none';
|
return 'none';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check CodeRabbit CLI authentication
|
||||||
|
*
|
||||||
|
* Expected output when authenticated:
|
||||||
|
* ```
|
||||||
|
* CodeRabbit CLI Status
|
||||||
|
* ✅ Authentication: Logged in
|
||||||
|
* User Information:
|
||||||
|
* 👤 Name: ...
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
async function checkCodeRabbitAuth(command: string): Promise<'cli' | 'api_key' | 'none'> {
|
||||||
|
// Check for environment variable
|
||||||
|
if (process.env.CODERABBIT_API_KEY) {
|
||||||
|
return 'api_key';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try running auth status command
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
const child = spawn(command, ['auth', 'status'], {
|
||||||
|
stdio: 'pipe',
|
||||||
|
timeout: 10000, // Increased timeout for slower systems
|
||||||
|
});
|
||||||
|
|
||||||
|
let stdout = '';
|
||||||
|
let stderr = '';
|
||||||
|
|
||||||
|
child.stdout?.on('data', (data) => {
|
||||||
|
stdout += data.toString();
|
||||||
|
});
|
||||||
|
|
||||||
|
child.stderr?.on('data', (data) => {
|
||||||
|
stderr += data.toString();
|
||||||
|
});
|
||||||
|
|
||||||
|
child.on('close', (code) => {
|
||||||
|
const output = stdout + stderr;
|
||||||
|
|
||||||
|
// Check for positive authentication indicators in output
|
||||||
|
const isAuthenticated =
|
||||||
|
code === 0 &&
|
||||||
|
(output.includes('Logged in') || output.includes('logged in')) &&
|
||||||
|
!output.toLowerCase().includes('not logged in') &&
|
||||||
|
!output.toLowerCase().includes('not authenticated');
|
||||||
|
|
||||||
|
if (isAuthenticated) {
|
||||||
|
resolve('cli');
|
||||||
|
} else {
|
||||||
|
resolve('none');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
child.on('error', () => {
|
||||||
|
resolve('none');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get installation instructions for a provider
|
* Get installation instructions for a provider
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -5,9 +5,11 @@
|
|||||||
* Never assumes authenticated - only returns true if CLI confirms.
|
* Never assumes authenticated - only returns true if CLI confirms.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { spawnProcess, getCodexAuthPath } from '@automaker/platform';
|
import { spawnProcess } from '@automaker/platform';
|
||||||
import { findCodexCliPath } from '@automaker/platform';
|
import { findCodexCliPath } from '@automaker/platform';
|
||||||
import * as fs from 'fs';
|
import { createLogger } from '@automaker/utils';
|
||||||
|
|
||||||
|
const logger = createLogger('CodexAuth');
|
||||||
|
|
||||||
const CODEX_COMMAND = 'codex';
|
const CODEX_COMMAND = 'codex';
|
||||||
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
|
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
|
||||||
@@ -26,36 +28,16 @@ export interface CodexAuthCheckResult {
|
|||||||
export async function checkCodexAuthentication(
|
export async function checkCodexAuthentication(
|
||||||
cliPath?: string | null
|
cliPath?: string | null
|
||||||
): Promise<CodexAuthCheckResult> {
|
): Promise<CodexAuthCheckResult> {
|
||||||
console.log('[CodexAuth] checkCodexAuthentication called with cliPath:', cliPath);
|
|
||||||
|
|
||||||
const resolvedCliPath = cliPath || (await findCodexCliPath());
|
const resolvedCliPath = cliPath || (await findCodexCliPath());
|
||||||
const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
|
const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
|
||||||
|
|
||||||
console.log('[CodexAuth] resolvedCliPath:', resolvedCliPath);
|
|
||||||
console.log('[CodexAuth] hasApiKey:', hasApiKey);
|
|
||||||
|
|
||||||
// Debug: Check auth file
|
|
||||||
const authFilePath = getCodexAuthPath();
|
|
||||||
console.log('[CodexAuth] Auth file path:', authFilePath);
|
|
||||||
try {
|
|
||||||
const authFileExists = fs.existsSync(authFilePath);
|
|
||||||
console.log('[CodexAuth] Auth file exists:', authFileExists);
|
|
||||||
if (authFileExists) {
|
|
||||||
const authContent = fs.readFileSync(authFilePath, 'utf-8');
|
|
||||||
console.log('[CodexAuth] Auth file content:', authContent.substring(0, 500)); // First 500 chars
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
console.log('[CodexAuth] Error reading auth file:', error);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If CLI is not installed, cannot be authenticated
|
// If CLI is not installed, cannot be authenticated
|
||||||
if (!resolvedCliPath) {
|
if (!resolvedCliPath) {
|
||||||
console.log('[CodexAuth] No CLI path found, returning not authenticated');
|
logger.info('CLI not found');
|
||||||
return { authenticated: false, method: 'none' };
|
return { authenticated: false, method: 'none' };
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
console.log('[CodexAuth] Running: ' + resolvedCliPath + ' login status');
|
|
||||||
const result = await spawnProcess({
|
const result = await spawnProcess({
|
||||||
command: resolvedCliPath || CODEX_COMMAND,
|
command: resolvedCliPath || CODEX_COMMAND,
|
||||||
args: ['login', 'status'],
|
args: ['login', 'status'],
|
||||||
@@ -66,33 +48,21 @@ export async function checkCodexAuthentication(
|
|||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log('[CodexAuth] Command result:');
|
|
||||||
console.log('[CodexAuth] exitCode:', result.exitCode);
|
|
||||||
console.log('[CodexAuth] stdout:', JSON.stringify(result.stdout));
|
|
||||||
console.log('[CodexAuth] stderr:', JSON.stringify(result.stderr));
|
|
||||||
|
|
||||||
// Check both stdout and stderr for "logged in" - Codex CLI outputs to stderr
|
// Check both stdout and stderr for "logged in" - Codex CLI outputs to stderr
|
||||||
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
|
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
|
||||||
const isLoggedIn = combinedOutput.includes('logged in');
|
const isLoggedIn = combinedOutput.includes('logged in');
|
||||||
console.log('[CodexAuth] isLoggedIn (contains "logged in" in stdout or stderr):', isLoggedIn);
|
|
||||||
|
|
||||||
if (result.exitCode === 0 && isLoggedIn) {
|
if (result.exitCode === 0 && isLoggedIn) {
|
||||||
// Determine auth method based on what we know
|
// Determine auth method based on what we know
|
||||||
const method = hasApiKey ? 'api_key_env' : 'cli_authenticated';
|
const method = hasApiKey ? 'api_key_env' : 'cli_authenticated';
|
||||||
console.log('[CodexAuth] Authenticated! method:', method);
|
logger.info(`✓ Authenticated (${method})`);
|
||||||
return { authenticated: true, method };
|
return { authenticated: true, method };
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log(
|
logger.info('Not authenticated');
|
||||||
'[CodexAuth] Not authenticated. exitCode:',
|
return { authenticated: false, method: 'none' };
|
||||||
result.exitCode,
|
|
||||||
'isLoggedIn:',
|
|
||||||
isLoggedIn
|
|
||||||
);
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.log('[CodexAuth] Error running command:', error);
|
logger.error('Failed to check authentication:', error);
|
||||||
|
return { authenticated: false, method: 'none' };
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log('[CodexAuth] Returning not authenticated');
|
|
||||||
return { authenticated: false, method: 'none' };
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -129,10 +129,30 @@ export const TOOL_PRESETS = {
|
|||||||
specGeneration: ['Read', 'Glob', 'Grep'] as const,
|
specGeneration: ['Read', 'Glob', 'Grep'] as const,
|
||||||
|
|
||||||
/** Full tool access for feature implementation */
|
/** Full tool access for feature implementation */
|
||||||
fullAccess: ['Read', 'Write', 'Edit', 'Glob', 'Grep', 'Bash', 'WebSearch', 'WebFetch'] as const,
|
fullAccess: [
|
||||||
|
'Read',
|
||||||
|
'Write',
|
||||||
|
'Edit',
|
||||||
|
'Glob',
|
||||||
|
'Grep',
|
||||||
|
'Bash',
|
||||||
|
'WebSearch',
|
||||||
|
'WebFetch',
|
||||||
|
'TodoWrite',
|
||||||
|
] as const,
|
||||||
|
|
||||||
/** Tools for chat/interactive mode */
|
/** Tools for chat/interactive mode */
|
||||||
chat: ['Read', 'Write', 'Edit', 'Glob', 'Grep', 'Bash', 'WebSearch', 'WebFetch'] as const,
|
chat: [
|
||||||
|
'Read',
|
||||||
|
'Write',
|
||||||
|
'Edit',
|
||||||
|
'Glob',
|
||||||
|
'Grep',
|
||||||
|
'Bash',
|
||||||
|
'WebSearch',
|
||||||
|
'WebFetch',
|
||||||
|
'TodoWrite',
|
||||||
|
] as const,
|
||||||
} as const;
|
} as const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -11,6 +11,14 @@ import {
|
|||||||
mergeAgentPrompts,
|
mergeAgentPrompts,
|
||||||
mergeBacklogPlanPrompts,
|
mergeBacklogPlanPrompts,
|
||||||
mergeEnhancementPrompts,
|
mergeEnhancementPrompts,
|
||||||
|
mergeCommitMessagePrompts,
|
||||||
|
mergeTitleGenerationPrompts,
|
||||||
|
mergeIssueValidationPrompts,
|
||||||
|
mergeIdeationPrompts,
|
||||||
|
mergeAppSpecPrompts,
|
||||||
|
mergeContextDescriptionPrompts,
|
||||||
|
mergeSuggestionsPrompts,
|
||||||
|
mergeTaskExecutionPrompts,
|
||||||
} from '@automaker/prompts';
|
} from '@automaker/prompts';
|
||||||
|
|
||||||
const logger = createLogger('SettingsHelper');
|
const logger = createLogger('SettingsHelper');
|
||||||
@@ -218,6 +226,14 @@ export async function getPromptCustomization(
|
|||||||
agent: ReturnType<typeof mergeAgentPrompts>;
|
agent: ReturnType<typeof mergeAgentPrompts>;
|
||||||
backlogPlan: ReturnType<typeof mergeBacklogPlanPrompts>;
|
backlogPlan: ReturnType<typeof mergeBacklogPlanPrompts>;
|
||||||
enhancement: ReturnType<typeof mergeEnhancementPrompts>;
|
enhancement: ReturnType<typeof mergeEnhancementPrompts>;
|
||||||
|
commitMessage: ReturnType<typeof mergeCommitMessagePrompts>;
|
||||||
|
titleGeneration: ReturnType<typeof mergeTitleGenerationPrompts>;
|
||||||
|
issueValidation: ReturnType<typeof mergeIssueValidationPrompts>;
|
||||||
|
ideation: ReturnType<typeof mergeIdeationPrompts>;
|
||||||
|
appSpec: ReturnType<typeof mergeAppSpecPrompts>;
|
||||||
|
contextDescription: ReturnType<typeof mergeContextDescriptionPrompts>;
|
||||||
|
suggestions: ReturnType<typeof mergeSuggestionsPrompts>;
|
||||||
|
taskExecution: ReturnType<typeof mergeTaskExecutionPrompts>;
|
||||||
}> {
|
}> {
|
||||||
let customization: PromptCustomization = {};
|
let customization: PromptCustomization = {};
|
||||||
|
|
||||||
@@ -239,6 +255,14 @@ export async function getPromptCustomization(
|
|||||||
agent: mergeAgentPrompts(customization.agent),
|
agent: mergeAgentPrompts(customization.agent),
|
||||||
backlogPlan: mergeBacklogPlanPrompts(customization.backlogPlan),
|
backlogPlan: mergeBacklogPlanPrompts(customization.backlogPlan),
|
||||||
enhancement: mergeEnhancementPrompts(customization.enhancement),
|
enhancement: mergeEnhancementPrompts(customization.enhancement),
|
||||||
|
commitMessage: mergeCommitMessagePrompts(customization.commitMessage),
|
||||||
|
titleGeneration: mergeTitleGenerationPrompts(customization.titleGeneration),
|
||||||
|
issueValidation: mergeIssueValidationPrompts(customization.issueValidation),
|
||||||
|
ideation: mergeIdeationPrompts(customization.ideation),
|
||||||
|
appSpec: mergeAppSpecPrompts(customization.appSpec),
|
||||||
|
contextDescription: mergeContextDescriptionPrompts(customization.contextDescription),
|
||||||
|
suggestions: mergeSuggestionsPrompts(customization.suggestions),
|
||||||
|
taskExecution: mergeTaskExecutionPrompts(customization.taskExecution),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,22 +5,24 @@
|
|||||||
|
|
||||||
import * as secureFs from './secure-fs.js';
|
import * as secureFs from './secure-fs.js';
|
||||||
import * as path from 'path';
|
import * as path from 'path';
|
||||||
|
import type { PRState, WorktreePRInfo } from '@automaker/types';
|
||||||
|
|
||||||
|
// Re-export types for backwards compatibility
|
||||||
|
export type { PRState, WorktreePRInfo };
|
||||||
|
|
||||||
/** Maximum length for sanitized branch names in filesystem paths */
|
/** Maximum length for sanitized branch names in filesystem paths */
|
||||||
const MAX_SANITIZED_BRANCH_PATH_LENGTH = 200;
|
const MAX_SANITIZED_BRANCH_PATH_LENGTH = 200;
|
||||||
|
|
||||||
export interface WorktreePRInfo {
|
|
||||||
number: number;
|
|
||||||
url: string;
|
|
||||||
title: string;
|
|
||||||
state: string;
|
|
||||||
createdAt: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface WorktreeMetadata {
|
export interface WorktreeMetadata {
|
||||||
branch: string;
|
branch: string;
|
||||||
createdAt: string;
|
createdAt: string;
|
||||||
pr?: WorktreePRInfo;
|
pr?: WorktreePRInfo;
|
||||||
|
/** Whether the init script has been executed for this worktree */
|
||||||
|
initScriptRan?: boolean;
|
||||||
|
/** Status of the init script execution */
|
||||||
|
initScriptStatus?: 'running' | 'success' | 'failed';
|
||||||
|
/** Error message if init script failed */
|
||||||
|
initScriptError?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
611
apps/server/src/lib/xml-extractor.ts
Normal file
611
apps/server/src/lib/xml-extractor.ts
Normal file
@@ -0,0 +1,611 @@
|
|||||||
|
/**
|
||||||
|
* XML Extraction Utilities
|
||||||
|
*
|
||||||
|
* Robust XML parsing utilities for extracting and updating sections
|
||||||
|
* from app_spec.txt XML content. Uses regex-based parsing which is
|
||||||
|
* sufficient for our controlled XML structure.
|
||||||
|
*
|
||||||
|
* Note: If more complex XML parsing is needed in the future, consider
|
||||||
|
* using a library like 'fast-xml-parser' or 'xml2js'.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import type { SpecOutput } from '@automaker/types';
|
||||||
|
|
||||||
|
const logger = createLogger('XmlExtractor');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represents an implemented feature extracted from XML
|
||||||
|
*/
|
||||||
|
export interface ImplementedFeature {
|
||||||
|
name: string;
|
||||||
|
description: string;
|
||||||
|
file_locations?: string[];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Logger interface for optional custom logging
|
||||||
|
*/
|
||||||
|
export interface XmlExtractorLogger {
|
||||||
|
debug: (message: string, ...args: unknown[]) => void;
|
||||||
|
warn?: (message: string, ...args: unknown[]) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for XML extraction operations
|
||||||
|
*/
|
||||||
|
export interface ExtractXmlOptions {
|
||||||
|
/** Custom logger (defaults to internal logger) */
|
||||||
|
logger?: XmlExtractorLogger;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Escape special XML characters
|
||||||
|
* Handles undefined/null values by converting them to empty strings
|
||||||
|
*/
|
||||||
|
export function escapeXml(str: string | undefined | null): string {
|
||||||
|
if (str == null) {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
return str
|
||||||
|
.replace(/&/g, '&')
|
||||||
|
.replace(/</g, '<')
|
||||||
|
.replace(/>/g, '>')
|
||||||
|
.replace(/"/g, '"')
|
||||||
|
.replace(/'/g, ''');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unescape XML entities back to regular characters
|
||||||
|
*/
|
||||||
|
export function unescapeXml(str: string): string {
|
||||||
|
return str
|
||||||
|
.replace(/'/g, "'")
|
||||||
|
.replace(/"/g, '"')
|
||||||
|
.replace(/>/g, '>')
|
||||||
|
.replace(/</g, '<')
|
||||||
|
.replace(/&/g, '&');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract the content of a specific XML section
|
||||||
|
*
|
||||||
|
* @param xmlContent - The full XML content
|
||||||
|
* @param tagName - The tag name to extract (e.g., 'implemented_features')
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns The content between the tags, or null if not found
|
||||||
|
*/
|
||||||
|
export function extractXmlSection(
|
||||||
|
xmlContent: string,
|
||||||
|
tagName: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string | null {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
|
||||||
|
const regex = new RegExp(`<${tagName}>([\\s\\S]*?)<\\/${tagName}>`, 'i');
|
||||||
|
const match = xmlContent.match(regex);
|
||||||
|
|
||||||
|
if (match) {
|
||||||
|
log.debug(`Extracted <${tagName}> section`);
|
||||||
|
return match[1];
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug(`Section <${tagName}> not found`);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract all values from repeated XML elements
|
||||||
|
*
|
||||||
|
* @param xmlContent - The XML content to search
|
||||||
|
* @param tagName - The tag name to extract values from
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Array of extracted values (unescaped)
|
||||||
|
*/
|
||||||
|
export function extractXmlElements(
|
||||||
|
xmlContent: string,
|
||||||
|
tagName: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string[] {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
const values: string[] = [];
|
||||||
|
|
||||||
|
const regex = new RegExp(`<${tagName}>([\\s\\S]*?)<\\/${tagName}>`, 'g');
|
||||||
|
const matches = xmlContent.matchAll(regex);
|
||||||
|
|
||||||
|
for (const match of matches) {
|
||||||
|
values.push(unescapeXml(match[1].trim()));
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug(`Extracted ${values.length} <${tagName}> elements`);
|
||||||
|
return values;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract implemented features from app_spec.txt XML content
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content of app_spec.txt
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Array of implemented features with name, description, and optional file_locations
|
||||||
|
*/
|
||||||
|
export function extractImplementedFeatures(
|
||||||
|
specContent: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): ImplementedFeature[] {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
const features: ImplementedFeature[] = [];
|
||||||
|
|
||||||
|
// Match <implemented_features>...</implemented_features> section
|
||||||
|
const implementedSection = extractXmlSection(specContent, 'implemented_features', options);
|
||||||
|
|
||||||
|
if (!implementedSection) {
|
||||||
|
log.debug('No implemented_features section found');
|
||||||
|
return features;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract individual feature blocks
|
||||||
|
const featureRegex = /<feature>([\s\S]*?)<\/feature>/g;
|
||||||
|
const featureMatches = implementedSection.matchAll(featureRegex);
|
||||||
|
|
||||||
|
for (const featureMatch of featureMatches) {
|
||||||
|
const featureContent = featureMatch[1];
|
||||||
|
|
||||||
|
// Extract name
|
||||||
|
const nameMatch = featureContent.match(/<name>([\s\S]*?)<\/name>/);
|
||||||
|
const name = nameMatch ? unescapeXml(nameMatch[1].trim()) : '';
|
||||||
|
|
||||||
|
// Extract description
|
||||||
|
const descMatch = featureContent.match(/<description>([\s\S]*?)<\/description>/);
|
||||||
|
const description = descMatch ? unescapeXml(descMatch[1].trim()) : '';
|
||||||
|
|
||||||
|
// Extract file_locations if present
|
||||||
|
const locationsSection = extractXmlSection(featureContent, 'file_locations', options);
|
||||||
|
const file_locations = locationsSection
|
||||||
|
? extractXmlElements(locationsSection, 'location', options)
|
||||||
|
: undefined;
|
||||||
|
|
||||||
|
if (name) {
|
||||||
|
features.push({
|
||||||
|
name,
|
||||||
|
description,
|
||||||
|
...(file_locations && file_locations.length > 0 ? { file_locations } : {}),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug(`Extracted ${features.length} implemented features`);
|
||||||
|
return features;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract only the feature names from implemented_features section
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content of app_spec.txt
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Array of feature names
|
||||||
|
*/
|
||||||
|
export function extractImplementedFeatureNames(
|
||||||
|
specContent: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string[] {
|
||||||
|
const features = extractImplementedFeatures(specContent, options);
|
||||||
|
return features.map((f) => f.name);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate XML for a single implemented feature
|
||||||
|
*
|
||||||
|
* @param feature - The feature to convert to XML
|
||||||
|
* @param indent - The base indentation level (default: 2 spaces)
|
||||||
|
* @returns XML string for the feature
|
||||||
|
*/
|
||||||
|
export function featureToXml(feature: ImplementedFeature, indent: string = ' '): string {
|
||||||
|
const i2 = indent.repeat(2);
|
||||||
|
const i3 = indent.repeat(3);
|
||||||
|
const i4 = indent.repeat(4);
|
||||||
|
|
||||||
|
let xml = `${i2}<feature>
|
||||||
|
${i3}<name>${escapeXml(feature.name)}</name>
|
||||||
|
${i3}<description>${escapeXml(feature.description)}</description>`;
|
||||||
|
|
||||||
|
if (feature.file_locations && feature.file_locations.length > 0) {
|
||||||
|
xml += `
|
||||||
|
${i3}<file_locations>
|
||||||
|
${feature.file_locations.map((loc) => `${i4}<location>${escapeXml(loc)}</location>`).join('\n')}
|
||||||
|
${i3}</file_locations>`;
|
||||||
|
}
|
||||||
|
|
||||||
|
xml += `
|
||||||
|
${i2}</feature>`;
|
||||||
|
|
||||||
|
return xml;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate XML for an array of implemented features
|
||||||
|
*
|
||||||
|
* @param features - Array of features to convert to XML
|
||||||
|
* @param indent - The base indentation level (default: 2 spaces)
|
||||||
|
* @returns XML string for the implemented_features section content
|
||||||
|
*/
|
||||||
|
export function featuresToXml(features: ImplementedFeature[], indent: string = ' '): string {
|
||||||
|
return features.map((f) => featureToXml(f, indent)).join('\n');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update the implemented_features section in XML content
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param newFeatures - The new features to set
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Updated XML content with the new implemented_features section
|
||||||
|
*/
|
||||||
|
export function updateImplementedFeaturesSection(
|
||||||
|
specContent: string,
|
||||||
|
newFeatures: ImplementedFeature[],
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
const indent = ' ';
|
||||||
|
|
||||||
|
// Generate new section content
|
||||||
|
const newSectionContent = featuresToXml(newFeatures, indent);
|
||||||
|
|
||||||
|
// Build the new section
|
||||||
|
const newSection = `<implemented_features>
|
||||||
|
${newSectionContent}
|
||||||
|
${indent}</implemented_features>`;
|
||||||
|
|
||||||
|
// Check if section exists
|
||||||
|
const sectionRegex = /<implemented_features>[\s\S]*?<\/implemented_features>/;
|
||||||
|
|
||||||
|
if (sectionRegex.test(specContent)) {
|
||||||
|
log.debug('Replacing existing implemented_features section');
|
||||||
|
return specContent.replace(sectionRegex, newSection);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If section doesn't exist, try to insert after core_capabilities
|
||||||
|
const coreCapabilitiesEnd = '</core_capabilities>';
|
||||||
|
const insertIndex = specContent.indexOf(coreCapabilitiesEnd);
|
||||||
|
|
||||||
|
if (insertIndex !== -1) {
|
||||||
|
const insertPosition = insertIndex + coreCapabilitiesEnd.length;
|
||||||
|
log.debug('Inserting implemented_features after core_capabilities');
|
||||||
|
return (
|
||||||
|
specContent.slice(0, insertPosition) +
|
||||||
|
'\n\n' +
|
||||||
|
indent +
|
||||||
|
newSection +
|
||||||
|
specContent.slice(insertPosition)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// As a fallback, insert before </project_specification>
|
||||||
|
const projectSpecEnd = '</project_specification>';
|
||||||
|
const fallbackIndex = specContent.indexOf(projectSpecEnd);
|
||||||
|
|
||||||
|
if (fallbackIndex !== -1) {
|
||||||
|
log.debug('Inserting implemented_features before </project_specification>');
|
||||||
|
return (
|
||||||
|
specContent.slice(0, fallbackIndex) +
|
||||||
|
indent +
|
||||||
|
newSection +
|
||||||
|
'\n' +
|
||||||
|
specContent.slice(fallbackIndex)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
log.warn?.('Could not find appropriate insertion point for implemented_features');
|
||||||
|
log.debug('Could not find appropriate insertion point for implemented_features');
|
||||||
|
return specContent;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a new feature to the implemented_features section
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param newFeature - The feature to add
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Updated XML content with the new feature added
|
||||||
|
*/
|
||||||
|
export function addImplementedFeature(
|
||||||
|
specContent: string,
|
||||||
|
newFeature: ImplementedFeature,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
|
||||||
|
// Extract existing features
|
||||||
|
const existingFeatures = extractImplementedFeatures(specContent, options);
|
||||||
|
|
||||||
|
// Check for duplicates by name
|
||||||
|
const isDuplicate = existingFeatures.some(
|
||||||
|
(f) => f.name.toLowerCase() === newFeature.name.toLowerCase()
|
||||||
|
);
|
||||||
|
|
||||||
|
if (isDuplicate) {
|
||||||
|
log.debug(`Feature "${newFeature.name}" already exists, skipping`);
|
||||||
|
return specContent;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the new feature
|
||||||
|
const updatedFeatures = [...existingFeatures, newFeature];
|
||||||
|
|
||||||
|
log.debug(`Adding feature "${newFeature.name}"`);
|
||||||
|
return updateImplementedFeaturesSection(specContent, updatedFeatures, options);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove a feature from the implemented_features section by name
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param featureName - The name of the feature to remove
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Updated XML content with the feature removed
|
||||||
|
*/
|
||||||
|
export function removeImplementedFeature(
|
||||||
|
specContent: string,
|
||||||
|
featureName: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
|
||||||
|
// Extract existing features
|
||||||
|
const existingFeatures = extractImplementedFeatures(specContent, options);
|
||||||
|
|
||||||
|
// Filter out the feature to remove
|
||||||
|
const updatedFeatures = existingFeatures.filter(
|
||||||
|
(f) => f.name.toLowerCase() !== featureName.toLowerCase()
|
||||||
|
);
|
||||||
|
|
||||||
|
if (updatedFeatures.length === existingFeatures.length) {
|
||||||
|
log.debug(`Feature "${featureName}" not found, no changes made`);
|
||||||
|
return specContent;
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug(`Removing feature "${featureName}"`);
|
||||||
|
return updateImplementedFeaturesSection(specContent, updatedFeatures, options);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update an existing feature in the implemented_features section
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param featureName - The name of the feature to update
|
||||||
|
* @param updates - Partial updates to apply to the feature
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Updated XML content with the feature modified
|
||||||
|
*/
|
||||||
|
export function updateImplementedFeature(
|
||||||
|
specContent: string,
|
||||||
|
featureName: string,
|
||||||
|
updates: Partial<ImplementedFeature>,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
|
||||||
|
// Extract existing features
|
||||||
|
const existingFeatures = extractImplementedFeatures(specContent, options);
|
||||||
|
|
||||||
|
// Find and update the feature
|
||||||
|
let found = false;
|
||||||
|
const updatedFeatures = existingFeatures.map((f) => {
|
||||||
|
if (f.name.toLowerCase() === featureName.toLowerCase()) {
|
||||||
|
found = true;
|
||||||
|
return {
|
||||||
|
...f,
|
||||||
|
...updates,
|
||||||
|
// Preserve the original name if not explicitly updated
|
||||||
|
name: updates.name ?? f.name,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
return f;
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!found) {
|
||||||
|
log.debug(`Feature "${featureName}" not found, no changes made`);
|
||||||
|
return specContent;
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug(`Updating feature "${featureName}"`);
|
||||||
|
return updateImplementedFeaturesSection(specContent, updatedFeatures, options);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a feature exists in the implemented_features section
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param featureName - The name of the feature to check
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns True if the feature exists
|
||||||
|
*/
|
||||||
|
export function hasImplementedFeature(
|
||||||
|
specContent: string,
|
||||||
|
featureName: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): boolean {
|
||||||
|
const features = extractImplementedFeatures(specContent, options);
|
||||||
|
return features.some((f) => f.name.toLowerCase() === featureName.toLowerCase());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert extracted features to SpecOutput.implemented_features format
|
||||||
|
*
|
||||||
|
* @param features - Array of extracted features
|
||||||
|
* @returns Features in SpecOutput format
|
||||||
|
*/
|
||||||
|
export function toSpecOutputFeatures(
|
||||||
|
features: ImplementedFeature[]
|
||||||
|
): SpecOutput['implemented_features'] {
|
||||||
|
return features.map((f) => ({
|
||||||
|
name: f.name,
|
||||||
|
description: f.description,
|
||||||
|
...(f.file_locations && f.file_locations.length > 0
|
||||||
|
? { file_locations: f.file_locations }
|
||||||
|
: {}),
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert SpecOutput.implemented_features to ImplementedFeature format
|
||||||
|
*
|
||||||
|
* @param specFeatures - Features from SpecOutput
|
||||||
|
* @returns Features in ImplementedFeature format
|
||||||
|
*/
|
||||||
|
export function fromSpecOutputFeatures(
|
||||||
|
specFeatures: SpecOutput['implemented_features']
|
||||||
|
): ImplementedFeature[] {
|
||||||
|
return specFeatures.map((f) => ({
|
||||||
|
name: f.name,
|
||||||
|
description: f.description,
|
||||||
|
...(f.file_locations && f.file_locations.length > 0
|
||||||
|
? { file_locations: f.file_locations }
|
||||||
|
: {}),
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represents a roadmap phase extracted from XML
|
||||||
|
*/
|
||||||
|
export interface RoadmapPhase {
|
||||||
|
name: string;
|
||||||
|
status: string;
|
||||||
|
description?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract the technology stack from app_spec.txt XML content
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Array of technology names
|
||||||
|
*/
|
||||||
|
export function extractTechnologyStack(
|
||||||
|
specContent: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string[] {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
|
||||||
|
const techSection = extractXmlSection(specContent, 'technology_stack', options);
|
||||||
|
if (!techSection) {
|
||||||
|
log.debug('No technology_stack section found');
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
const technologies = extractXmlElements(techSection, 'technology', options);
|
||||||
|
log.debug(`Extracted ${technologies.length} technologies`);
|
||||||
|
return technologies;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update the technology_stack section in XML content
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param technologies - The new technology list
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Updated XML content
|
||||||
|
*/
|
||||||
|
export function updateTechnologyStack(
|
||||||
|
specContent: string,
|
||||||
|
technologies: string[],
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
const indent = ' ';
|
||||||
|
const i2 = indent.repeat(2);
|
||||||
|
|
||||||
|
// Generate new section content
|
||||||
|
const techXml = technologies
|
||||||
|
.map((t) => `${i2}<technology>${escapeXml(t)}</technology>`)
|
||||||
|
.join('\n');
|
||||||
|
const newSection = `<technology_stack>\n${techXml}\n${indent}</technology_stack>`;
|
||||||
|
|
||||||
|
// Check if section exists
|
||||||
|
const sectionRegex = /<technology_stack>[\s\S]*?<\/technology_stack>/;
|
||||||
|
|
||||||
|
if (sectionRegex.test(specContent)) {
|
||||||
|
log.debug('Replacing existing technology_stack section');
|
||||||
|
return specContent.replace(sectionRegex, newSection);
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug('No technology_stack section found to update');
|
||||||
|
return specContent;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract roadmap phases from app_spec.txt XML content
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Array of roadmap phases
|
||||||
|
*/
|
||||||
|
export function extractRoadmapPhases(
|
||||||
|
specContent: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): RoadmapPhase[] {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
const phases: RoadmapPhase[] = [];
|
||||||
|
|
||||||
|
const roadmapSection = extractXmlSection(specContent, 'implementation_roadmap', options);
|
||||||
|
if (!roadmapSection) {
|
||||||
|
log.debug('No implementation_roadmap section found');
|
||||||
|
return phases;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract individual phase blocks
|
||||||
|
const phaseRegex = /<phase>([\s\S]*?)<\/phase>/g;
|
||||||
|
const phaseMatches = roadmapSection.matchAll(phaseRegex);
|
||||||
|
|
||||||
|
for (const phaseMatch of phaseMatches) {
|
||||||
|
const phaseContent = phaseMatch[1];
|
||||||
|
|
||||||
|
const nameMatch = phaseContent.match(/<name>([\s\S]*?)<\/name>/);
|
||||||
|
const name = nameMatch ? unescapeXml(nameMatch[1].trim()) : '';
|
||||||
|
|
||||||
|
const statusMatch = phaseContent.match(/<status>([\s\S]*?)<\/status>/);
|
||||||
|
const status = statusMatch ? unescapeXml(statusMatch[1].trim()) : 'pending';
|
||||||
|
|
||||||
|
const descMatch = phaseContent.match(/<description>([\s\S]*?)<\/description>/);
|
||||||
|
const description = descMatch ? unescapeXml(descMatch[1].trim()) : undefined;
|
||||||
|
|
||||||
|
if (name) {
|
||||||
|
phases.push({ name, status, description });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug(`Extracted ${phases.length} roadmap phases`);
|
||||||
|
return phases;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update a roadmap phase status in XML content
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param phaseName - The name of the phase to update
|
||||||
|
* @param newStatus - The new status value
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Updated XML content
|
||||||
|
*/
|
||||||
|
export function updateRoadmapPhaseStatus(
|
||||||
|
specContent: string,
|
||||||
|
phaseName: string,
|
||||||
|
newStatus: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
|
||||||
|
// Find the phase and update its status
|
||||||
|
// Match the phase block containing the specific name
|
||||||
|
const phaseRegex = new RegExp(
|
||||||
|
`(<phase>\\s*<name>\\s*${escapeXml(phaseName)}\\s*<\\/name>\\s*<status>)[\\s\\S]*?(<\\/status>)`,
|
||||||
|
'i'
|
||||||
|
);
|
||||||
|
|
||||||
|
if (phaseRegex.test(specContent)) {
|
||||||
|
log.debug(`Updating phase "${phaseName}" status to "${newStatus}"`);
|
||||||
|
return specContent.replace(phaseRegex, `$1${escapeXml(newStatus)}$2`);
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug(`Phase "${phaseName}" not found`);
|
||||||
|
return specContent;
|
||||||
|
}
|
||||||
@@ -8,12 +8,28 @@ import type { Request, Response, NextFunction } from 'express';
|
|||||||
import { validatePath, PathNotAllowedError } from '@automaker/platform';
|
import { validatePath, PathNotAllowedError } from '@automaker/platform';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a middleware that validates specified path parameters in req.body
|
* Helper to get parameter value from request (checks body first, then query)
|
||||||
|
*/
|
||||||
|
function getParamValue(req: Request, paramName: string): unknown {
|
||||||
|
// Check body first (for POST/PUT/PATCH requests)
|
||||||
|
if (req.body && req.body[paramName] !== undefined) {
|
||||||
|
return req.body[paramName];
|
||||||
|
}
|
||||||
|
// Fall back to query params (for GET requests)
|
||||||
|
if (req.query && req.query[paramName] !== undefined) {
|
||||||
|
return req.query[paramName];
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a middleware that validates specified path parameters in req.body or req.query
|
||||||
* @param paramNames - Names of parameters to validate (e.g., 'projectPath', 'worktreePath')
|
* @param paramNames - Names of parameters to validate (e.g., 'projectPath', 'worktreePath')
|
||||||
* @example
|
* @example
|
||||||
* router.post('/create', validatePathParams('projectPath'), handler);
|
* router.post('/create', validatePathParams('projectPath'), handler);
|
||||||
* router.post('/delete', validatePathParams('projectPath', 'worktreePath'), handler);
|
* router.post('/delete', validatePathParams('projectPath', 'worktreePath'), handler);
|
||||||
* router.post('/send', validatePathParams('workingDirectory?', 'imagePaths[]'), handler);
|
* router.post('/send', validatePathParams('workingDirectory?', 'imagePaths[]'), handler);
|
||||||
|
* router.get('/logs', validatePathParams('worktreePath'), handler); // Works with query params too
|
||||||
*
|
*
|
||||||
* Special syntax:
|
* Special syntax:
|
||||||
* - 'paramName?' - Optional parameter (only validated if present)
|
* - 'paramName?' - Optional parameter (only validated if present)
|
||||||
@@ -26,8 +42,8 @@ export function validatePathParams(...paramNames: string[]) {
|
|||||||
// Handle optional parameters (paramName?)
|
// Handle optional parameters (paramName?)
|
||||||
if (paramName.endsWith('?')) {
|
if (paramName.endsWith('?')) {
|
||||||
const actualName = paramName.slice(0, -1);
|
const actualName = paramName.slice(0, -1);
|
||||||
const value = req.body[actualName];
|
const value = getParamValue(req, actualName);
|
||||||
if (value) {
|
if (value && typeof value === 'string') {
|
||||||
validatePath(value);
|
validatePath(value);
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
@@ -36,18 +52,20 @@ export function validatePathParams(...paramNames: string[]) {
|
|||||||
// Handle array parameters (paramName[])
|
// Handle array parameters (paramName[])
|
||||||
if (paramName.endsWith('[]')) {
|
if (paramName.endsWith('[]')) {
|
||||||
const actualName = paramName.slice(0, -2);
|
const actualName = paramName.slice(0, -2);
|
||||||
const values = req.body[actualName];
|
const values = getParamValue(req, actualName);
|
||||||
if (Array.isArray(values) && values.length > 0) {
|
if (Array.isArray(values) && values.length > 0) {
|
||||||
for (const value of values) {
|
for (const value of values) {
|
||||||
validatePath(value);
|
if (typeof value === 'string') {
|
||||||
|
validatePath(value);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle regular parameters
|
// Handle regular parameters
|
||||||
const value = req.body[paramName];
|
const value = getParamValue(req, paramName);
|
||||||
if (value) {
|
if (value && typeof value === 'string') {
|
||||||
validatePath(value);
|
validatePath(value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,6 +22,8 @@ import type {
|
|||||||
// Only these vars are passed - nothing else from process.env leaks through.
|
// Only these vars are passed - nothing else from process.env leaks through.
|
||||||
const ALLOWED_ENV_VARS = [
|
const ALLOWED_ENV_VARS = [
|
||||||
'ANTHROPIC_API_KEY',
|
'ANTHROPIC_API_KEY',
|
||||||
|
'ANTHROPIC_BASE_URL',
|
||||||
|
'ANTHROPIC_AUTH_TOKEN',
|
||||||
'PATH',
|
'PATH',
|
||||||
'HOME',
|
'HOME',
|
||||||
'SHELL',
|
'SHELL',
|
||||||
@@ -99,6 +101,8 @@ export class ClaudeProvider extends BaseProvider {
|
|||||||
...(maxThinkingTokens && { maxThinkingTokens }),
|
...(maxThinkingTokens && { maxThinkingTokens }),
|
||||||
// Subagents configuration for specialized task delegation
|
// Subagents configuration for specialized task delegation
|
||||||
...(options.agents && { agents: options.agents }),
|
...(options.agents && { agents: options.agents }),
|
||||||
|
// Pass through outputFormat for structured JSON outputs
|
||||||
|
...(options.outputFormat && { outputFormat: options.outputFormat }),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Build prompt payload
|
// Build prompt payload
|
||||||
|
|||||||
@@ -26,22 +26,23 @@
|
|||||||
* ```
|
* ```
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { execSync } from 'child_process';
|
|
||||||
import * as fs from 'fs';
|
|
||||||
import * as path from 'path';
|
|
||||||
import * as os from 'os';
|
|
||||||
import { BaseProvider } from './base-provider.js';
|
|
||||||
import type { ProviderConfig, ExecuteOptions, ProviderMessage } from './types.js';
|
|
||||||
import {
|
import {
|
||||||
spawnJSONLProcess,
|
|
||||||
type SubprocessOptions,
|
|
||||||
isWslAvailable,
|
|
||||||
findCliInWsl,
|
|
||||||
createWslCommand,
|
createWslCommand,
|
||||||
|
findCliInWsl,
|
||||||
|
isWslAvailable,
|
||||||
|
spawnJSONLProcess,
|
||||||
windowsToWslPath,
|
windowsToWslPath,
|
||||||
|
type SubprocessOptions,
|
||||||
type WslCliResult,
|
type WslCliResult,
|
||||||
} from '@automaker/platform';
|
} from '@automaker/platform';
|
||||||
|
import { calculateReasoningTimeout } from '@automaker/types';
|
||||||
import { createLogger, isAbortError } from '@automaker/utils';
|
import { createLogger, isAbortError } from '@automaker/utils';
|
||||||
|
import { execSync } from 'child_process';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import * as os from 'os';
|
||||||
|
import * as path from 'path';
|
||||||
|
import { BaseProvider } from './base-provider.js';
|
||||||
|
import type { ExecuteOptions, ProviderConfig, ProviderMessage } from './types.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Spawn strategy for CLI tools on Windows
|
* Spawn strategy for CLI tools on Windows
|
||||||
@@ -107,6 +108,15 @@ export interface CliDetectionResult {
|
|||||||
// Create logger for CLI operations
|
// Create logger for CLI operations
|
||||||
const cliLogger = createLogger('CliProvider');
|
const cliLogger = createLogger('CliProvider');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base timeout for CLI operations in milliseconds.
|
||||||
|
* CLI tools have longer startup and processing times compared to direct API calls,
|
||||||
|
* so we use a higher base timeout (120s) than the default provider timeout (30s).
|
||||||
|
* This is multiplied by reasoning effort multipliers when applicable.
|
||||||
|
* @see calculateReasoningTimeout from @automaker/types
|
||||||
|
*/
|
||||||
|
const CLI_BASE_TIMEOUT_MS = 120000;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Abstract base class for CLI-based providers
|
* Abstract base class for CLI-based providers
|
||||||
*
|
*
|
||||||
@@ -450,6 +460,10 @@ export abstract class CliProvider extends BaseProvider {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Calculate dynamic timeout based on reasoning effort.
|
||||||
|
// This addresses GitHub issue #530 where reasoning models with 'xhigh' effort would timeout.
|
||||||
|
const timeout = calculateReasoningTimeout(options.reasoningEffort, CLI_BASE_TIMEOUT_MS);
|
||||||
|
|
||||||
// WSL strategy
|
// WSL strategy
|
||||||
if (this.useWsl && this.wslCliPath) {
|
if (this.useWsl && this.wslCliPath) {
|
||||||
const wslCwd = windowsToWslPath(cwd);
|
const wslCwd = windowsToWslPath(cwd);
|
||||||
@@ -473,7 +487,7 @@ export abstract class CliProvider extends BaseProvider {
|
|||||||
cwd, // Windows cwd for spawn
|
cwd, // Windows cwd for spawn
|
||||||
env: filteredEnv,
|
env: filteredEnv,
|
||||||
abortController: options.abortController,
|
abortController: options.abortController,
|
||||||
timeout: 120000, // CLI operations may take longer
|
timeout,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -488,7 +502,7 @@ export abstract class CliProvider extends BaseProvider {
|
|||||||
cwd,
|
cwd,
|
||||||
env: filteredEnv,
|
env: filteredEnv,
|
||||||
abortController: options.abortController,
|
abortController: options.abortController,
|
||||||
timeout: 120000,
|
timeout,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -501,7 +515,7 @@ export abstract class CliProvider extends BaseProvider {
|
|||||||
cwd,
|
cwd,
|
||||||
env: filteredEnv,
|
env: filteredEnv,
|
||||||
abortController: options.abortController,
|
abortController: options.abortController,
|
||||||
timeout: 120000,
|
timeout,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -522,8 +536,13 @@ export abstract class CliProvider extends BaseProvider {
|
|||||||
throw new Error(`${this.getCliName()} CLI not found. ${this.getInstallInstructions()}`);
|
throw new Error(`${this.getCliName()} CLI not found. ${this.getInstallInstructions()}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
const cliArgs = this.buildCliArgs(options);
|
// Many CLI-based providers do not support a separate "system" message.
|
||||||
const subprocessOptions = this.buildSubprocessOptions(options, cliArgs);
|
// If a systemPrompt is provided, embed it into the prompt so downstream models
|
||||||
|
// still receive critical formatting/schema instructions (e.g., JSON-only outputs).
|
||||||
|
const effectiveOptions = this.embedSystemPromptIntoPrompt(options);
|
||||||
|
|
||||||
|
const cliArgs = this.buildCliArgs(effectiveOptions);
|
||||||
|
const subprocessOptions = this.buildSubprocessOptions(effectiveOptions, cliArgs);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
for await (const rawEvent of spawnJSONLProcess(subprocessOptions)) {
|
for await (const rawEvent of spawnJSONLProcess(subprocessOptions)) {
|
||||||
@@ -555,4 +574,52 @@ export abstract class CliProvider extends BaseProvider {
|
|||||||
throw error;
|
throw error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Embed system prompt text into the user prompt for CLI providers.
|
||||||
|
*
|
||||||
|
* Most CLI providers we integrate with only accept a single prompt via stdin/args.
|
||||||
|
* When upstream code supplies `options.systemPrompt`, we prepend it to the prompt
|
||||||
|
* content and clear `systemPrompt` to avoid any accidental double-injection by
|
||||||
|
* subclasses.
|
||||||
|
*/
|
||||||
|
protected embedSystemPromptIntoPrompt(options: ExecuteOptions): ExecuteOptions {
|
||||||
|
if (!options.systemPrompt) {
|
||||||
|
return options;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only string system prompts can be reliably embedded for CLI providers.
|
||||||
|
// Presets are provider-specific (e.g., Claude SDK) and cannot be represented
|
||||||
|
// universally. If a preset is provided, we only embed its optional `append`.
|
||||||
|
const systemText =
|
||||||
|
typeof options.systemPrompt === 'string'
|
||||||
|
? options.systemPrompt
|
||||||
|
: options.systemPrompt.append
|
||||||
|
? options.systemPrompt.append
|
||||||
|
: '';
|
||||||
|
|
||||||
|
if (!systemText) {
|
||||||
|
return { ...options, systemPrompt: undefined };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preserve original prompt structure.
|
||||||
|
if (typeof options.prompt === 'string') {
|
||||||
|
return {
|
||||||
|
...options,
|
||||||
|
prompt: `${systemText}\n\n---\n\n${options.prompt}`,
|
||||||
|
systemPrompt: undefined,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Array.isArray(options.prompt)) {
|
||||||
|
return {
|
||||||
|
...options,
|
||||||
|
prompt: [{ type: 'text', text: systemText }, ...options.prompt],
|
||||||
|
systemPrompt: undefined,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should be unreachable due to ExecuteOptions typing, but keep safe.
|
||||||
|
return { ...options, systemPrompt: undefined };
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ import {
|
|||||||
extractTextFromContent,
|
extractTextFromContent,
|
||||||
classifyError,
|
classifyError,
|
||||||
getUserFriendlyErrorMessage,
|
getUserFriendlyErrorMessage,
|
||||||
|
createLogger,
|
||||||
} from '@automaker/utils';
|
} from '@automaker/utils';
|
||||||
import type {
|
import type {
|
||||||
ExecuteOptions,
|
ExecuteOptions,
|
||||||
@@ -32,6 +33,8 @@ import {
|
|||||||
CODEX_MODEL_MAP,
|
CODEX_MODEL_MAP,
|
||||||
supportsReasoningEffort,
|
supportsReasoningEffort,
|
||||||
validateBareModelId,
|
validateBareModelId,
|
||||||
|
calculateReasoningTimeout,
|
||||||
|
DEFAULT_TIMEOUT_MS,
|
||||||
type CodexApprovalPolicy,
|
type CodexApprovalPolicy,
|
||||||
type CodexSandboxMode,
|
type CodexSandboxMode,
|
||||||
type CodexAuthStatus,
|
type CodexAuthStatus,
|
||||||
@@ -44,6 +47,7 @@ import {
|
|||||||
getCodexTodoToolName,
|
getCodexTodoToolName,
|
||||||
} from './codex-tool-mapping.js';
|
} from './codex-tool-mapping.js';
|
||||||
import { SettingsService } from '../services/settings-service.js';
|
import { SettingsService } from '../services/settings-service.js';
|
||||||
|
import { createTempEnvOverride } from '../lib/auth-utils.js';
|
||||||
import { checkSandboxCompatibility } from '../lib/sdk-options.js';
|
import { checkSandboxCompatibility } from '../lib/sdk-options.js';
|
||||||
import { CODEX_MODELS } from './codex-models.js';
|
import { CODEX_MODELS } from './codex-models.js';
|
||||||
|
|
||||||
@@ -89,7 +93,14 @@ const CODEX_ITEM_TYPES = {
|
|||||||
const SYSTEM_PROMPT_LABEL = 'System instructions';
|
const SYSTEM_PROMPT_LABEL = 'System instructions';
|
||||||
const HISTORY_HEADER = 'Current request:\n';
|
const HISTORY_HEADER = 'Current request:\n';
|
||||||
const TEXT_ENCODING = 'utf-8';
|
const TEXT_ENCODING = 'utf-8';
|
||||||
const DEFAULT_TIMEOUT_MS = 30000;
|
/**
|
||||||
|
* Default timeout for Codex CLI operations in milliseconds.
|
||||||
|
* This is the "no output" timeout - if the CLI doesn't produce any JSONL output
|
||||||
|
* for this duration, the process is killed. For reasoning models with high
|
||||||
|
* reasoning effort, this timeout is dynamically extended via calculateReasoningTimeout().
|
||||||
|
* @see calculateReasoningTimeout from @automaker/types
|
||||||
|
*/
|
||||||
|
const CODEX_CLI_TIMEOUT_MS = DEFAULT_TIMEOUT_MS;
|
||||||
const CONTEXT_WINDOW_256K = 256000;
|
const CONTEXT_WINDOW_256K = 256000;
|
||||||
const MAX_OUTPUT_32K = 32000;
|
const MAX_OUTPUT_32K = 32000;
|
||||||
const MAX_OUTPUT_16K = 16000;
|
const MAX_OUTPUT_16K = 16000;
|
||||||
@@ -141,6 +152,7 @@ type CodexExecutionMode = typeof CODEX_EXECUTION_MODE_CLI | typeof CODEX_EXECUTI
|
|||||||
type CodexExecutionPlan = {
|
type CodexExecutionPlan = {
|
||||||
mode: CodexExecutionMode;
|
mode: CodexExecutionMode;
|
||||||
cliPath: string | null;
|
cliPath: string | null;
|
||||||
|
openAiApiKey?: string | null;
|
||||||
};
|
};
|
||||||
|
|
||||||
const ALLOWED_ENV_VARS = [
|
const ALLOWED_ENV_VARS = [
|
||||||
@@ -165,6 +177,22 @@ function buildEnv(): Record<string, string> {
|
|||||||
return env;
|
return env;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function resolveOpenAiApiKey(): Promise<string | null> {
|
||||||
|
const envKey = process.env[OPENAI_API_KEY_ENV];
|
||||||
|
if (envKey) {
|
||||||
|
return envKey;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const settingsService = new SettingsService(getCodexSettingsDir());
|
||||||
|
const credentials = await settingsService.getCredentials();
|
||||||
|
const storedKey = credentials.apiKeys.openai?.trim();
|
||||||
|
return storedKey ? storedKey : null;
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function hasMcpServersConfigured(options: ExecuteOptions): boolean {
|
function hasMcpServersConfigured(options: ExecuteOptions): boolean {
|
||||||
return Boolean(options.mcpServers && Object.keys(options.mcpServers).length > 0);
|
return Boolean(options.mcpServers && Object.keys(options.mcpServers).length > 0);
|
||||||
}
|
}
|
||||||
@@ -180,18 +208,21 @@ function isSdkEligible(options: ExecuteOptions): boolean {
|
|||||||
async function resolveCodexExecutionPlan(options: ExecuteOptions): Promise<CodexExecutionPlan> {
|
async function resolveCodexExecutionPlan(options: ExecuteOptions): Promise<CodexExecutionPlan> {
|
||||||
const cliPath = await findCodexCliPath();
|
const cliPath = await findCodexCliPath();
|
||||||
const authIndicators = await getCodexAuthIndicators();
|
const authIndicators = await getCodexAuthIndicators();
|
||||||
const hasApiKey = Boolean(process.env[OPENAI_API_KEY_ENV]);
|
const openAiApiKey = await resolveOpenAiApiKey();
|
||||||
|
const hasApiKey = Boolean(openAiApiKey);
|
||||||
const cliAuthenticated = authIndicators.hasOAuthToken || authIndicators.hasApiKey || hasApiKey;
|
const cliAuthenticated = authIndicators.hasOAuthToken || authIndicators.hasApiKey || hasApiKey;
|
||||||
const sdkEligible = isSdkEligible(options);
|
const sdkEligible = isSdkEligible(options);
|
||||||
const cliAvailable = Boolean(cliPath);
|
const cliAvailable = Boolean(cliPath);
|
||||||
|
|
||||||
|
if (hasApiKey) {
|
||||||
|
return {
|
||||||
|
mode: CODEX_EXECUTION_MODE_SDK,
|
||||||
|
cliPath,
|
||||||
|
openAiApiKey,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
if (sdkEligible) {
|
if (sdkEligible) {
|
||||||
if (hasApiKey) {
|
|
||||||
return {
|
|
||||||
mode: CODEX_EXECUTION_MODE_SDK,
|
|
||||||
cliPath,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
if (!cliAvailable) {
|
if (!cliAvailable) {
|
||||||
throw new Error(ERROR_CODEX_SDK_AUTH_REQUIRED);
|
throw new Error(ERROR_CODEX_SDK_AUTH_REQUIRED);
|
||||||
}
|
}
|
||||||
@@ -208,6 +239,7 @@ async function resolveCodexExecutionPlan(options: ExecuteOptions): Promise<Codex
|
|||||||
return {
|
return {
|
||||||
mode: CODEX_EXECUTION_MODE_CLI,
|
mode: CODEX_EXECUTION_MODE_CLI,
|
||||||
cliPath,
|
cliPath,
|
||||||
|
openAiApiKey,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -658,6 +690,8 @@ async function loadCodexInstructions(cwd: string, enabled: boolean): Promise<str
|
|||||||
.join('\n\n');
|
.join('\n\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const logger = createLogger('CodexProvider');
|
||||||
|
|
||||||
export class CodexProvider extends BaseProvider {
|
export class CodexProvider extends BaseProvider {
|
||||||
getName(): string {
|
getName(): string {
|
||||||
return 'codex';
|
return 'codex';
|
||||||
@@ -698,7 +732,14 @@ export class CodexProvider extends BaseProvider {
|
|||||||
|
|
||||||
const executionPlan = await resolveCodexExecutionPlan(options);
|
const executionPlan = await resolveCodexExecutionPlan(options);
|
||||||
if (executionPlan.mode === CODEX_EXECUTION_MODE_SDK) {
|
if (executionPlan.mode === CODEX_EXECUTION_MODE_SDK) {
|
||||||
yield* executeCodexSdkQuery(options, combinedSystemPrompt);
|
const cleanupEnv = executionPlan.openAiApiKey
|
||||||
|
? createTempEnvOverride({ [OPENAI_API_KEY_ENV]: executionPlan.openAiApiKey })
|
||||||
|
: null;
|
||||||
|
try {
|
||||||
|
yield* executeCodexSdkQuery(options, combinedSystemPrompt);
|
||||||
|
} finally {
|
||||||
|
cleanupEnv?.();
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -777,13 +818,24 @@ export class CodexProvider extends BaseProvider {
|
|||||||
'-', // Read prompt from stdin to avoid shell escaping issues
|
'-', // Read prompt from stdin to avoid shell escaping issues
|
||||||
];
|
];
|
||||||
|
|
||||||
|
const envOverrides = buildEnv();
|
||||||
|
if (executionPlan.openAiApiKey && !envOverrides[OPENAI_API_KEY_ENV]) {
|
||||||
|
envOverrides[OPENAI_API_KEY_ENV] = executionPlan.openAiApiKey;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate dynamic timeout based on reasoning effort.
|
||||||
|
// Higher reasoning effort (e.g., 'xhigh' for "xtra thinking" mode) requires more time
|
||||||
|
// for the model to generate reasoning tokens before producing output.
|
||||||
|
// This fixes GitHub issue #530 where features would get stuck with reasoning models.
|
||||||
|
const timeout = calculateReasoningTimeout(options.reasoningEffort, CODEX_CLI_TIMEOUT_MS);
|
||||||
|
|
||||||
const stream = spawnJSONLProcess({
|
const stream = spawnJSONLProcess({
|
||||||
command: commandPath,
|
command: commandPath,
|
||||||
args,
|
args,
|
||||||
cwd: options.cwd,
|
cwd: options.cwd,
|
||||||
env: buildEnv(),
|
env: envOverrides,
|
||||||
abortController: options.abortController,
|
abortController: options.abortController,
|
||||||
timeout: DEFAULT_TIMEOUT_MS,
|
timeout,
|
||||||
stdinData: promptText, // Pass prompt via stdin
|
stdinData: promptText, // Pass prompt via stdin
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -967,21 +1019,11 @@ export class CodexProvider extends BaseProvider {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async detectInstallation(): Promise<InstallationStatus> {
|
async detectInstallation(): Promise<InstallationStatus> {
|
||||||
console.log('[CodexProvider.detectInstallation] Starting...');
|
|
||||||
|
|
||||||
const cliPath = await findCodexCliPath();
|
const cliPath = await findCodexCliPath();
|
||||||
const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
|
const hasApiKey = Boolean(await resolveOpenAiApiKey());
|
||||||
const authIndicators = await getCodexAuthIndicators();
|
const authIndicators = await getCodexAuthIndicators();
|
||||||
const installed = !!cliPath;
|
const installed = !!cliPath;
|
||||||
|
|
||||||
console.log('[CodexProvider.detectInstallation] cliPath:', cliPath);
|
|
||||||
console.log('[CodexProvider.detectInstallation] hasApiKey:', hasApiKey);
|
|
||||||
console.log(
|
|
||||||
'[CodexProvider.detectInstallation] authIndicators:',
|
|
||||||
JSON.stringify(authIndicators)
|
|
||||||
);
|
|
||||||
console.log('[CodexProvider.detectInstallation] installed:', installed);
|
|
||||||
|
|
||||||
let version = '';
|
let version = '';
|
||||||
if (installed) {
|
if (installed) {
|
||||||
try {
|
try {
|
||||||
@@ -991,20 +1033,16 @@ export class CodexProvider extends BaseProvider {
|
|||||||
cwd: process.cwd(),
|
cwd: process.cwd(),
|
||||||
});
|
});
|
||||||
version = result.stdout.trim();
|
version = result.stdout.trim();
|
||||||
console.log('[CodexProvider.detectInstallation] version:', version);
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.log('[CodexProvider.detectInstallation] Error getting version:', error);
|
|
||||||
version = '';
|
version = '';
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine auth status - always verify with CLI, never assume authenticated
|
// Determine auth status - always verify with CLI, never assume authenticated
|
||||||
console.log('[CodexProvider.detectInstallation] Calling checkCodexAuthentication...');
|
|
||||||
const authCheck = await checkCodexAuthentication(cliPath);
|
const authCheck = await checkCodexAuthentication(cliPath);
|
||||||
console.log('[CodexProvider.detectInstallation] authCheck result:', JSON.stringify(authCheck));
|
|
||||||
const authenticated = authCheck.authenticated;
|
const authenticated = authCheck.authenticated;
|
||||||
|
|
||||||
const result = {
|
return {
|
||||||
installed,
|
installed,
|
||||||
path: cliPath || undefined,
|
path: cliPath || undefined,
|
||||||
version: version || undefined,
|
version: version || undefined,
|
||||||
@@ -1012,8 +1050,6 @@ export class CodexProvider extends BaseProvider {
|
|||||||
hasApiKey,
|
hasApiKey,
|
||||||
authenticated,
|
authenticated,
|
||||||
};
|
};
|
||||||
console.log('[CodexProvider.detectInstallation] Final result:', JSON.stringify(result));
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
getAvailableModels(): ModelDefinition[] {
|
getAvailableModels(): ModelDefinition[] {
|
||||||
@@ -1025,36 +1061,24 @@ export class CodexProvider extends BaseProvider {
|
|||||||
* Check authentication status for Codex CLI
|
* Check authentication status for Codex CLI
|
||||||
*/
|
*/
|
||||||
async checkAuth(): Promise<CodexAuthStatus> {
|
async checkAuth(): Promise<CodexAuthStatus> {
|
||||||
console.log('[CodexProvider.checkAuth] Starting auth check...');
|
|
||||||
|
|
||||||
const cliPath = await findCodexCliPath();
|
const cliPath = await findCodexCliPath();
|
||||||
const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
|
const hasApiKey = Boolean(await resolveOpenAiApiKey());
|
||||||
const authIndicators = await getCodexAuthIndicators();
|
const authIndicators = await getCodexAuthIndicators();
|
||||||
|
|
||||||
console.log('[CodexProvider.checkAuth] cliPath:', cliPath);
|
|
||||||
console.log('[CodexProvider.checkAuth] hasApiKey:', hasApiKey);
|
|
||||||
console.log('[CodexProvider.checkAuth] authIndicators:', JSON.stringify(authIndicators));
|
|
||||||
|
|
||||||
// Check for API key in environment
|
// Check for API key in environment
|
||||||
if (hasApiKey) {
|
if (hasApiKey) {
|
||||||
console.log('[CodexProvider.checkAuth] Has API key, returning authenticated');
|
|
||||||
return { authenticated: true, method: 'api_key' };
|
return { authenticated: true, method: 'api_key' };
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for OAuth/token from Codex CLI
|
// Check for OAuth/token from Codex CLI
|
||||||
if (authIndicators.hasOAuthToken || authIndicators.hasApiKey) {
|
if (authIndicators.hasOAuthToken || authIndicators.hasApiKey) {
|
||||||
console.log(
|
|
||||||
'[CodexProvider.checkAuth] Has OAuth token or API key in auth file, returning authenticated'
|
|
||||||
);
|
|
||||||
return { authenticated: true, method: 'oauth' };
|
return { authenticated: true, method: 'oauth' };
|
||||||
}
|
}
|
||||||
|
|
||||||
// CLI is installed but not authenticated via indicators - try CLI command
|
// CLI is installed but not authenticated via indicators - try CLI command
|
||||||
console.log('[CodexProvider.checkAuth] No indicators found, trying CLI command...');
|
|
||||||
if (cliPath) {
|
if (cliPath) {
|
||||||
try {
|
try {
|
||||||
// Try 'codex login status' first (same as checkCodexAuthentication)
|
// Try 'codex login status' first (same as checkCodexAuthentication)
|
||||||
console.log('[CodexProvider.checkAuth] Running: ' + cliPath + ' login status');
|
|
||||||
const result = await spawnProcess({
|
const result = await spawnProcess({
|
||||||
command: cliPath || CODEX_COMMAND,
|
command: cliPath || CODEX_COMMAND,
|
||||||
args: ['login', 'status'],
|
args: ['login', 'status'],
|
||||||
@@ -1064,26 +1088,19 @@ export class CodexProvider extends BaseProvider {
|
|||||||
TERM: 'dumb',
|
TERM: 'dumb',
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
console.log('[CodexProvider.checkAuth] login status result:');
|
|
||||||
console.log('[CodexProvider.checkAuth] exitCode:', result.exitCode);
|
|
||||||
console.log('[CodexProvider.checkAuth] stdout:', JSON.stringify(result.stdout));
|
|
||||||
console.log('[CodexProvider.checkAuth] stderr:', JSON.stringify(result.stderr));
|
|
||||||
|
|
||||||
// Check both stdout and stderr - Codex CLI outputs to stderr
|
// Check both stdout and stderr - Codex CLI outputs to stderr
|
||||||
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
|
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
|
||||||
const isLoggedIn = combinedOutput.includes('logged in');
|
const isLoggedIn = combinedOutput.includes('logged in');
|
||||||
console.log('[CodexProvider.checkAuth] isLoggedIn:', isLoggedIn);
|
|
||||||
|
|
||||||
if (result.exitCode === 0 && isLoggedIn) {
|
if (result.exitCode === 0 && isLoggedIn) {
|
||||||
console.log('[CodexProvider.checkAuth] CLI says logged in, returning authenticated');
|
|
||||||
return { authenticated: true, method: 'oauth' };
|
return { authenticated: true, method: 'oauth' };
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.log('[CodexProvider.checkAuth] Error running login status:', error);
|
logger.warn('Error running login status command during auth check:', error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log('[CodexProvider.checkAuth] Not authenticated');
|
|
||||||
return { authenticated: false, method: 'none' };
|
return { authenticated: false, method: 'none' };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -30,3 +30,11 @@ export { OpencodeProvider } from './opencode-provider.js';
|
|||||||
|
|
||||||
// Provider factory
|
// Provider factory
|
||||||
export { ProviderFactory } from './provider-factory.js';
|
export { ProviderFactory } from './provider-factory.js';
|
||||||
|
|
||||||
|
// Simple query service - unified interface for basic AI queries
|
||||||
|
export { simpleQuery, streamingQuery } from './simple-query-service.js';
|
||||||
|
export type {
|
||||||
|
SimpleQueryOptions,
|
||||||
|
SimpleQueryResult,
|
||||||
|
StreamingQueryOptions,
|
||||||
|
} from './simple-query-service.js';
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
254
apps/server/src/providers/simple-query-service.ts
Normal file
254
apps/server/src/providers/simple-query-service.ts
Normal file
@@ -0,0 +1,254 @@
|
|||||||
|
/**
|
||||||
|
* Simple Query Service - Simplified interface for basic AI queries
|
||||||
|
*
|
||||||
|
* Use this for routes that need simple text responses without
|
||||||
|
* complex event handling. This service abstracts away the provider
|
||||||
|
* selection and streaming details, providing a clean interface
|
||||||
|
* for common query patterns.
|
||||||
|
*
|
||||||
|
* Benefits:
|
||||||
|
* - No direct SDK imports needed in route files
|
||||||
|
* - Consistent provider routing based on model
|
||||||
|
* - Automatic text extraction from streaming responses
|
||||||
|
* - Structured output support for JSON schema responses
|
||||||
|
* - Eliminates duplicate extractTextFromStream() functions
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { ProviderFactory } from './provider-factory.js';
|
||||||
|
import type {
|
||||||
|
ProviderMessage,
|
||||||
|
ContentBlock,
|
||||||
|
ThinkingLevel,
|
||||||
|
ReasoningEffort,
|
||||||
|
} from '@automaker/types';
|
||||||
|
import { stripProviderPrefix } from '@automaker/types';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for simple query execution
|
||||||
|
*/
|
||||||
|
export interface SimpleQueryOptions {
|
||||||
|
/** The prompt to send to the AI (can be text or multi-part content) */
|
||||||
|
prompt: string | Array<{ type: string; text?: string; source?: object }>;
|
||||||
|
/** Model to use (with or without provider prefix) */
|
||||||
|
model?: string;
|
||||||
|
/** Working directory for the query */
|
||||||
|
cwd: string;
|
||||||
|
/** System prompt (combined with user prompt for some providers) */
|
||||||
|
systemPrompt?: string;
|
||||||
|
/** Maximum turns for agentic operations (default: 1) */
|
||||||
|
maxTurns?: number;
|
||||||
|
/** Tools to allow (default: [] for simple queries) */
|
||||||
|
allowedTools?: string[];
|
||||||
|
/** Abort controller for cancellation */
|
||||||
|
abortController?: AbortController;
|
||||||
|
/** Structured output format for JSON responses */
|
||||||
|
outputFormat?: {
|
||||||
|
type: 'json_schema';
|
||||||
|
schema: Record<string, unknown>;
|
||||||
|
};
|
||||||
|
/** Thinking level for Claude models */
|
||||||
|
thinkingLevel?: ThinkingLevel;
|
||||||
|
/** Reasoning effort for Codex/OpenAI models */
|
||||||
|
reasoningEffort?: ReasoningEffort;
|
||||||
|
/** If true, runs in read-only mode (no file writes) */
|
||||||
|
readOnly?: boolean;
|
||||||
|
/** Setting sources for CLAUDE.md loading */
|
||||||
|
settingSources?: Array<'user' | 'project' | 'local'>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Result from a simple query
|
||||||
|
*/
|
||||||
|
export interface SimpleQueryResult {
|
||||||
|
/** The accumulated text response */
|
||||||
|
text: string;
|
||||||
|
/** Structured output if outputFormat was specified and provider supports it */
|
||||||
|
structured_output?: Record<string, unknown>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for streaming query execution
|
||||||
|
*/
|
||||||
|
export interface StreamingQueryOptions extends SimpleQueryOptions {
|
||||||
|
/** Callback for each text chunk received */
|
||||||
|
onText?: (text: string) => void;
|
||||||
|
/** Callback for tool use events */
|
||||||
|
onToolUse?: (tool: string, input: unknown) => void;
|
||||||
|
/** Callback for thinking blocks (if available) */
|
||||||
|
onThinking?: (thinking: string) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default model to use when none specified
|
||||||
|
*/
|
||||||
|
const DEFAULT_MODEL = 'claude-sonnet-4-20250514';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute a simple query and return the text result
|
||||||
|
*
|
||||||
|
* Use this for simple, non-streaming queries where you just need
|
||||||
|
* the final text response. For more complex use cases with progress
|
||||||
|
* callbacks, use streamingQuery() instead.
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* const result = await simpleQuery({
|
||||||
|
* prompt: 'Generate a title for: user authentication',
|
||||||
|
* cwd: process.cwd(),
|
||||||
|
* systemPrompt: 'You are a title generator...',
|
||||||
|
* maxTurns: 1,
|
||||||
|
* allowedTools: [],
|
||||||
|
* });
|
||||||
|
* console.log(result.text); // "Add user authentication"
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
export async function simpleQuery(options: SimpleQueryOptions): Promise<SimpleQueryResult> {
|
||||||
|
const model = options.model || DEFAULT_MODEL;
|
||||||
|
const provider = ProviderFactory.getProviderForModel(model);
|
||||||
|
const bareModel = stripProviderPrefix(model);
|
||||||
|
|
||||||
|
let responseText = '';
|
||||||
|
let structuredOutput: Record<string, unknown> | undefined;
|
||||||
|
|
||||||
|
// Build provider options
|
||||||
|
const providerOptions = {
|
||||||
|
prompt: options.prompt,
|
||||||
|
model: bareModel,
|
||||||
|
originalModel: model,
|
||||||
|
cwd: options.cwd,
|
||||||
|
systemPrompt: options.systemPrompt,
|
||||||
|
maxTurns: options.maxTurns ?? 1,
|
||||||
|
allowedTools: options.allowedTools ?? [],
|
||||||
|
abortController: options.abortController,
|
||||||
|
outputFormat: options.outputFormat,
|
||||||
|
thinkingLevel: options.thinkingLevel,
|
||||||
|
reasoningEffort: options.reasoningEffort,
|
||||||
|
readOnly: options.readOnly,
|
||||||
|
settingSources: options.settingSources,
|
||||||
|
};
|
||||||
|
|
||||||
|
for await (const msg of provider.executeQuery(providerOptions)) {
|
||||||
|
// Handle error messages
|
||||||
|
if (msg.type === 'error') {
|
||||||
|
const errorMessage = msg.error || 'Provider returned an error';
|
||||||
|
throw new Error(errorMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract text from assistant messages
|
||||||
|
if (msg.type === 'assistant' && msg.message?.content) {
|
||||||
|
for (const block of msg.message.content) {
|
||||||
|
if (block.type === 'text' && block.text) {
|
||||||
|
responseText += block.text;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle result messages
|
||||||
|
if (msg.type === 'result') {
|
||||||
|
if (msg.subtype === 'success') {
|
||||||
|
// Use result text if longer than accumulated text
|
||||||
|
if (msg.result && msg.result.length > responseText.length) {
|
||||||
|
responseText = msg.result;
|
||||||
|
}
|
||||||
|
// Capture structured output if present
|
||||||
|
if (msg.structured_output) {
|
||||||
|
structuredOutput = msg.structured_output;
|
||||||
|
}
|
||||||
|
} else if (msg.subtype === 'error_max_turns') {
|
||||||
|
// Max turns reached - return what we have
|
||||||
|
break;
|
||||||
|
} else if (msg.subtype === 'error_max_structured_output_retries') {
|
||||||
|
throw new Error('Could not produce valid structured output after retries');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return { text: responseText, structured_output: structuredOutput };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute a streaming query with event callbacks
|
||||||
|
*
|
||||||
|
* Use this for queries where you need real-time progress updates,
|
||||||
|
* such as when displaying streaming output to a user.
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* const result = await streamingQuery({
|
||||||
|
* prompt: 'Analyze this project and suggest improvements',
|
||||||
|
* cwd: '/path/to/project',
|
||||||
|
* maxTurns: 250,
|
||||||
|
* allowedTools: ['Read', 'Glob', 'Grep'],
|
||||||
|
* onText: (text) => emitProgress(text),
|
||||||
|
* onToolUse: (tool, input) => emitToolUse(tool, input),
|
||||||
|
* });
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
export async function streamingQuery(options: StreamingQueryOptions): Promise<SimpleQueryResult> {
|
||||||
|
const model = options.model || DEFAULT_MODEL;
|
||||||
|
const provider = ProviderFactory.getProviderForModel(model);
|
||||||
|
const bareModel = stripProviderPrefix(model);
|
||||||
|
|
||||||
|
let responseText = '';
|
||||||
|
let structuredOutput: Record<string, unknown> | undefined;
|
||||||
|
|
||||||
|
// Build provider options
|
||||||
|
const providerOptions = {
|
||||||
|
prompt: options.prompt,
|
||||||
|
model: bareModel,
|
||||||
|
originalModel: model,
|
||||||
|
cwd: options.cwd,
|
||||||
|
systemPrompt: options.systemPrompt,
|
||||||
|
maxTurns: options.maxTurns ?? 250,
|
||||||
|
allowedTools: options.allowedTools ?? ['Read', 'Glob', 'Grep'],
|
||||||
|
abortController: options.abortController,
|
||||||
|
outputFormat: options.outputFormat,
|
||||||
|
thinkingLevel: options.thinkingLevel,
|
||||||
|
reasoningEffort: options.reasoningEffort,
|
||||||
|
readOnly: options.readOnly,
|
||||||
|
settingSources: options.settingSources,
|
||||||
|
};
|
||||||
|
|
||||||
|
for await (const msg of provider.executeQuery(providerOptions)) {
|
||||||
|
// Handle error messages
|
||||||
|
if (msg.type === 'error') {
|
||||||
|
const errorMessage = msg.error || 'Provider returned an error';
|
||||||
|
throw new Error(errorMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract content from assistant messages
|
||||||
|
if (msg.type === 'assistant' && msg.message?.content) {
|
||||||
|
for (const block of msg.message.content) {
|
||||||
|
if (block.type === 'text' && block.text) {
|
||||||
|
responseText += block.text;
|
||||||
|
options.onText?.(block.text);
|
||||||
|
} else if (block.type === 'tool_use' && block.name) {
|
||||||
|
options.onToolUse?.(block.name, block.input);
|
||||||
|
} else if (block.type === 'thinking' && block.thinking) {
|
||||||
|
options.onThinking?.(block.thinking);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle result messages
|
||||||
|
if (msg.type === 'result') {
|
||||||
|
if (msg.subtype === 'success') {
|
||||||
|
// Use result text if longer than accumulated text
|
||||||
|
if (msg.result && msg.result.length > responseText.length) {
|
||||||
|
responseText = msg.result;
|
||||||
|
}
|
||||||
|
// Capture structured output if present
|
||||||
|
if (msg.structured_output) {
|
||||||
|
structuredOutput = msg.structured_output;
|
||||||
|
}
|
||||||
|
} else if (msg.subtype === 'error_max_turns') {
|
||||||
|
// Max turns reached - return what we have
|
||||||
|
break;
|
||||||
|
} else if (msg.subtype === 'error_max_structured_output_retries') {
|
||||||
|
throw new Error('Could not produce valid structured output after retries');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return { text: responseText, structured_output: structuredOutput };
|
||||||
|
}
|
||||||
@@ -6,26 +6,103 @@ import { createLogger } from '@automaker/utils';
|
|||||||
|
|
||||||
const logger = createLogger('SpecRegeneration');
|
const logger = createLogger('SpecRegeneration');
|
||||||
|
|
||||||
// Shared state for tracking generation status - private
|
// Types for running generation
|
||||||
let isRunning = false;
|
export type GenerationType = 'spec_regeneration' | 'feature_generation' | 'sync';
|
||||||
let currentAbortController: AbortController | null = null;
|
|
||||||
|
interface RunningGeneration {
|
||||||
|
isRunning: boolean;
|
||||||
|
type: GenerationType;
|
||||||
|
startedAt: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shared state for tracking generation status - scoped by project path
|
||||||
|
const runningProjects = new Map<string, RunningGeneration>();
|
||||||
|
const abortControllers = new Map<string, AbortController>();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the current running state
|
* Get the running state for a specific project
|
||||||
*/
|
*/
|
||||||
export function getSpecRegenerationStatus(): {
|
export function getSpecRegenerationStatus(projectPath?: string): {
|
||||||
isRunning: boolean;
|
isRunning: boolean;
|
||||||
currentAbortController: AbortController | null;
|
currentAbortController: AbortController | null;
|
||||||
|
projectPath?: string;
|
||||||
|
type?: GenerationType;
|
||||||
|
startedAt?: string;
|
||||||
} {
|
} {
|
||||||
return { isRunning, currentAbortController };
|
if (projectPath) {
|
||||||
|
const generation = runningProjects.get(projectPath);
|
||||||
|
return {
|
||||||
|
isRunning: generation?.isRunning || false,
|
||||||
|
currentAbortController: abortControllers.get(projectPath) || null,
|
||||||
|
projectPath,
|
||||||
|
type: generation?.type,
|
||||||
|
startedAt: generation?.startedAt,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
// Fallback: check if any project is running (for backward compatibility)
|
||||||
|
const isAnyRunning = Array.from(runningProjects.values()).some((g) => g.isRunning);
|
||||||
|
return { isRunning: isAnyRunning, currentAbortController: null };
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set the running state and abort controller
|
* Get the project path that is currently running (if any)
|
||||||
*/
|
*/
|
||||||
export function setRunningState(running: boolean, controller: AbortController | null = null): void {
|
export function getRunningProjectPath(): string | null {
|
||||||
isRunning = running;
|
for (const [path, running] of runningProjects.entries()) {
|
||||||
currentAbortController = controller;
|
if (running) return path;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the running state and abort controller for a specific project
|
||||||
|
*/
|
||||||
|
export function setRunningState(
|
||||||
|
projectPath: string,
|
||||||
|
running: boolean,
|
||||||
|
controller: AbortController | null = null,
|
||||||
|
type: GenerationType = 'spec_regeneration'
|
||||||
|
): void {
|
||||||
|
if (running) {
|
||||||
|
runningProjects.set(projectPath, {
|
||||||
|
isRunning: true,
|
||||||
|
type,
|
||||||
|
startedAt: new Date().toISOString(),
|
||||||
|
});
|
||||||
|
if (controller) {
|
||||||
|
abortControllers.set(projectPath, controller);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
runningProjects.delete(projectPath);
|
||||||
|
abortControllers.delete(projectPath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all running spec/feature generations for the running agents view
|
||||||
|
*/
|
||||||
|
export function getAllRunningGenerations(): Array<{
|
||||||
|
projectPath: string;
|
||||||
|
type: GenerationType;
|
||||||
|
startedAt: string;
|
||||||
|
}> {
|
||||||
|
const results: Array<{
|
||||||
|
projectPath: string;
|
||||||
|
type: GenerationType;
|
||||||
|
startedAt: string;
|
||||||
|
}> = [];
|
||||||
|
|
||||||
|
for (const [projectPath, generation] of runningProjects.entries()) {
|
||||||
|
if (generation.isRunning) {
|
||||||
|
results.push({
|
||||||
|
projectPath,
|
||||||
|
type: generation.type,
|
||||||
|
startedAt: generation.startedAt,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return results;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -5,19 +5,17 @@
|
|||||||
* (defaults to Sonnet for balanced speed and quality).
|
* (defaults to Sonnet for balanced speed and quality).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
|
||||||
import * as secureFs from '../../lib/secure-fs.js';
|
import * as secureFs from '../../lib/secure-fs.js';
|
||||||
import type { EventEmitter } from '../../lib/events.js';
|
import type { EventEmitter } from '../../lib/events.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
|
import { DEFAULT_PHASE_MODELS } from '@automaker/types';
|
||||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||||
import { createFeatureGenerationOptions } from '../../lib/sdk-options.js';
|
import { streamingQuery } from '../../providers/simple-query-service.js';
|
||||||
import { ProviderFactory } from '../../providers/provider-factory.js';
|
|
||||||
import { logAuthStatus } from './common.js';
|
|
||||||
import { parseAndCreateFeatures } from './parse-and-create-features.js';
|
import { parseAndCreateFeatures } from './parse-and-create-features.js';
|
||||||
import { getAppSpecPath } from '@automaker/platform';
|
import { getAppSpecPath } from '@automaker/platform';
|
||||||
import type { SettingsService } from '../../services/settings-service.js';
|
import type { SettingsService } from '../../services/settings-service.js';
|
||||||
import { getAutoLoadClaudeMdSetting } from '../../lib/settings-helpers.js';
|
import { getAutoLoadClaudeMdSetting, getPromptCustomization } from '../../lib/settings-helpers.js';
|
||||||
|
import { FeatureLoader } from '../../services/feature-loader.js';
|
||||||
|
|
||||||
const logger = createLogger('SpecRegeneration');
|
const logger = createLogger('SpecRegeneration');
|
||||||
|
|
||||||
@@ -56,38 +54,48 @@ export async function generateFeaturesFromSpec(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get customized prompts from settings
|
||||||
|
const prompts = await getPromptCustomization(settingsService, '[FeatureGeneration]');
|
||||||
|
|
||||||
|
// Load existing features to prevent duplicates
|
||||||
|
const featureLoader = new FeatureLoader();
|
||||||
|
const existingFeatures = await featureLoader.getAll(projectPath);
|
||||||
|
|
||||||
|
logger.info(`Found ${existingFeatures.length} existing features to exclude from generation`);
|
||||||
|
|
||||||
|
// Build existing features context for the prompt
|
||||||
|
let existingFeaturesContext = '';
|
||||||
|
if (existingFeatures.length > 0) {
|
||||||
|
const featuresList = existingFeatures
|
||||||
|
.map(
|
||||||
|
(f) =>
|
||||||
|
`- "${f.title}" (ID: ${f.id}): ${f.description?.substring(0, 100) || 'No description'}`
|
||||||
|
)
|
||||||
|
.join('\n');
|
||||||
|
existingFeaturesContext = `
|
||||||
|
|
||||||
|
## EXISTING FEATURES (DO NOT REGENERATE THESE)
|
||||||
|
|
||||||
|
The following ${existingFeatures.length} features already exist in the project. You MUST NOT generate features that duplicate or overlap with these:
|
||||||
|
|
||||||
|
${featuresList}
|
||||||
|
|
||||||
|
CRITICAL INSTRUCTIONS:
|
||||||
|
- DO NOT generate any features with the same or similar titles as the existing features listed above
|
||||||
|
- DO NOT generate features that cover the same functionality as existing features
|
||||||
|
- ONLY generate NEW features that are not yet in the system
|
||||||
|
- If a feature from the roadmap already exists, skip it entirely
|
||||||
|
- Generate unique feature IDs that do not conflict with existing IDs: ${existingFeatures.map((f) => f.id).join(', ')}
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
const prompt = `Based on this project specification:
|
const prompt = `Based on this project specification:
|
||||||
|
|
||||||
${spec}
|
${spec}
|
||||||
|
${existingFeaturesContext}
|
||||||
|
${prompts.appSpec.generateFeaturesFromSpecPrompt}
|
||||||
|
|
||||||
Generate a prioritized list of implementable features. For each feature provide:
|
Generate ${featureCount} NEW features that build on each other logically. Remember: ONLY generate features that DO NOT already exist.`;
|
||||||
|
|
||||||
1. **id**: A unique lowercase-hyphenated identifier
|
|
||||||
2. **category**: Functional category (e.g., "Core", "UI", "API", "Authentication", "Database")
|
|
||||||
3. **title**: Short descriptive title
|
|
||||||
4. **description**: What this feature does (2-3 sentences)
|
|
||||||
5. **priority**: 1 (high), 2 (medium), or 3 (low)
|
|
||||||
6. **complexity**: "simple", "moderate", or "complex"
|
|
||||||
7. **dependencies**: Array of feature IDs this depends on (can be empty)
|
|
||||||
|
|
||||||
Format as JSON:
|
|
||||||
{
|
|
||||||
"features": [
|
|
||||||
{
|
|
||||||
"id": "feature-id",
|
|
||||||
"category": "Feature Category",
|
|
||||||
"title": "Feature Title",
|
|
||||||
"description": "What it does",
|
|
||||||
"priority": 1,
|
|
||||||
"complexity": "moderate",
|
|
||||||
"dependencies": []
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
Generate ${featureCount} features that build on each other logically.
|
|
||||||
|
|
||||||
IMPORTANT: Do not ask for clarification. The specification is provided above. Generate the JSON immediately.`;
|
|
||||||
|
|
||||||
logger.info('========== PROMPT BEING SENT ==========');
|
logger.info('========== PROMPT BEING SENT ==========');
|
||||||
logger.info(`Prompt length: ${prompt.length} chars`);
|
logger.info(`Prompt length: ${prompt.length} chars`);
|
||||||
@@ -115,121 +123,30 @@ IMPORTANT: Do not ask for clarification. The specification is provided above. Ge
|
|||||||
|
|
||||||
logger.info('Using model:', model);
|
logger.info('Using model:', model);
|
||||||
|
|
||||||
let responseText = '';
|
// Use streamingQuery with event callbacks
|
||||||
let messageCount = 0;
|
const result = await streamingQuery({
|
||||||
|
prompt,
|
||||||
|
model,
|
||||||
|
cwd: projectPath,
|
||||||
|
maxTurns: 250,
|
||||||
|
allowedTools: ['Read', 'Glob', 'Grep'],
|
||||||
|
abortController,
|
||||||
|
thinkingLevel,
|
||||||
|
readOnly: true, // Feature generation only reads code, doesn't write
|
||||||
|
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
|
||||||
|
onText: (text) => {
|
||||||
|
logger.debug(`Feature text block received (${text.length} chars)`);
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_regeneration_progress',
|
||||||
|
content: text,
|
||||||
|
projectPath: projectPath,
|
||||||
|
});
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
// Route to appropriate provider based on model type
|
const responseText = result.text;
|
||||||
if (isCursorModel(model)) {
|
|
||||||
// Use Cursor provider for Cursor models
|
|
||||||
logger.info('[FeatureGeneration] Using Cursor provider');
|
|
||||||
|
|
||||||
const provider = ProviderFactory.getProviderForModel(model);
|
logger.info(`Feature stream complete.`);
|
||||||
// Strip provider prefix - providers expect bare model IDs
|
|
||||||
const bareModel = stripProviderPrefix(model);
|
|
||||||
|
|
||||||
// Add explicit instructions for Cursor to return JSON in response
|
|
||||||
const cursorPrompt = `${prompt}
|
|
||||||
|
|
||||||
CRITICAL INSTRUCTIONS:
|
|
||||||
1. DO NOT write any files. Return the JSON in your response only.
|
|
||||||
2. Respond with ONLY a JSON object - no explanations, no markdown, just raw JSON.
|
|
||||||
3. Your entire response should be valid JSON starting with { and ending with }. No text before or after.`;
|
|
||||||
|
|
||||||
for await (const msg of provider.executeQuery({
|
|
||||||
prompt: cursorPrompt,
|
|
||||||
model: bareModel,
|
|
||||||
cwd: projectPath,
|
|
||||||
maxTurns: 250,
|
|
||||||
allowedTools: ['Read', 'Glob', 'Grep'],
|
|
||||||
abortController,
|
|
||||||
readOnly: true, // Feature generation only reads code, doesn't write
|
|
||||||
})) {
|
|
||||||
messageCount++;
|
|
||||||
|
|
||||||
if (msg.type === 'assistant' && msg.message?.content) {
|
|
||||||
for (const block of msg.message.content) {
|
|
||||||
if (block.type === 'text' && block.text) {
|
|
||||||
responseText += block.text;
|
|
||||||
logger.debug(`Feature text block received (${block.text.length} chars)`);
|
|
||||||
events.emit('spec-regeneration:event', {
|
|
||||||
type: 'spec_regeneration_progress',
|
|
||||||
content: block.text,
|
|
||||||
projectPath: projectPath,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
|
|
||||||
// Use result if it's a final accumulated message
|
|
||||||
if (msg.result.length > responseText.length) {
|
|
||||||
responseText = msg.result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Use Claude SDK for Claude models
|
|
||||||
logger.info('[FeatureGeneration] Using Claude SDK');
|
|
||||||
|
|
||||||
const options = createFeatureGenerationOptions({
|
|
||||||
cwd: projectPath,
|
|
||||||
abortController,
|
|
||||||
autoLoadClaudeMd,
|
|
||||||
model,
|
|
||||||
thinkingLevel, // Pass thinking level for extended thinking
|
|
||||||
});
|
|
||||||
|
|
||||||
logger.debug('SDK Options:', JSON.stringify(options, null, 2));
|
|
||||||
logger.info('Calling Claude Agent SDK query() for features...');
|
|
||||||
|
|
||||||
logAuthStatus('Right before SDK query() for features');
|
|
||||||
|
|
||||||
let stream;
|
|
||||||
try {
|
|
||||||
stream = query({ prompt, options });
|
|
||||||
logger.debug('query() returned stream successfully');
|
|
||||||
} catch (queryError) {
|
|
||||||
logger.error('❌ query() threw an exception:');
|
|
||||||
logger.error('Error:', queryError);
|
|
||||||
throw queryError;
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.debug('Starting to iterate over feature stream...');
|
|
||||||
|
|
||||||
try {
|
|
||||||
for await (const msg of stream) {
|
|
||||||
messageCount++;
|
|
||||||
logger.debug(
|
|
||||||
`Feature stream message #${messageCount}:`,
|
|
||||||
JSON.stringify({ type: msg.type, subtype: (msg as any).subtype }, null, 2)
|
|
||||||
);
|
|
||||||
|
|
||||||
if (msg.type === 'assistant' && msg.message.content) {
|
|
||||||
for (const block of msg.message.content) {
|
|
||||||
if (block.type === 'text') {
|
|
||||||
responseText += block.text;
|
|
||||||
logger.debug(`Feature text block received (${block.text.length} chars)`);
|
|
||||||
events.emit('spec-regeneration:event', {
|
|
||||||
type: 'spec_regeneration_progress',
|
|
||||||
content: block.text,
|
|
||||||
projectPath: projectPath,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (msg.type === 'result' && (msg as any).subtype === 'success') {
|
|
||||||
logger.debug('Received success result for features');
|
|
||||||
responseText = (msg as any).result || responseText;
|
|
||||||
} else if ((msg as { type: string }).type === 'error') {
|
|
||||||
logger.error('❌ Received error message from feature stream:');
|
|
||||||
logger.error('Error message:', JSON.stringify(msg, null, 2));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (streamError) {
|
|
||||||
logger.error('❌ Error while iterating feature stream:');
|
|
||||||
logger.error('Stream error:', streamError);
|
|
||||||
throw streamError;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.info(`Feature stream complete. Total messages: ${messageCount}`);
|
|
||||||
logger.info(`Feature response length: ${responseText.length} chars`);
|
logger.info(`Feature response length: ${responseText.length} chars`);
|
||||||
logger.info('========== FULL RESPONSE TEXT ==========');
|
logger.info('========== FULL RESPONSE TEXT ==========');
|
||||||
logger.info(responseText);
|
logger.info(responseText);
|
||||||
|
|||||||
@@ -5,27 +5,18 @@
|
|||||||
* (defaults to Opus for high-quality specification generation).
|
* (defaults to Opus for high-quality specification generation).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
|
||||||
import path from 'path';
|
|
||||||
import * as secureFs from '../../lib/secure-fs.js';
|
import * as secureFs from '../../lib/secure-fs.js';
|
||||||
import type { EventEmitter } from '../../lib/events.js';
|
import type { EventEmitter } from '../../lib/events.js';
|
||||||
import {
|
import { specOutputSchema, specToXml, type SpecOutput } from '../../lib/app-spec-format.js';
|
||||||
specOutputSchema,
|
|
||||||
specToXml,
|
|
||||||
getStructuredSpecPromptInstruction,
|
|
||||||
type SpecOutput,
|
|
||||||
} from '../../lib/app-spec-format.js';
|
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
|
import { DEFAULT_PHASE_MODELS, isCursorModel } from '@automaker/types';
|
||||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||||
import { createSpecGenerationOptions } from '../../lib/sdk-options.js';
|
|
||||||
import { extractJson } from '../../lib/json-extractor.js';
|
import { extractJson } from '../../lib/json-extractor.js';
|
||||||
import { ProviderFactory } from '../../providers/provider-factory.js';
|
import { streamingQuery } from '../../providers/simple-query-service.js';
|
||||||
import { logAuthStatus } from './common.js';
|
|
||||||
import { generateFeaturesFromSpec } from './generate-features-from-spec.js';
|
import { generateFeaturesFromSpec } from './generate-features-from-spec.js';
|
||||||
import { ensureAutomakerDir, getAppSpecPath } from '@automaker/platform';
|
import { ensureAutomakerDir, getAppSpecPath } from '@automaker/platform';
|
||||||
import type { SettingsService } from '../../services/settings-service.js';
|
import type { SettingsService } from '../../services/settings-service.js';
|
||||||
import { getAutoLoadClaudeMdSetting } from '../../lib/settings-helpers.js';
|
import { getAutoLoadClaudeMdSetting, getPromptCustomization } from '../../lib/settings-helpers.js';
|
||||||
|
|
||||||
const logger = createLogger('SpecRegeneration');
|
const logger = createLogger('SpecRegeneration');
|
||||||
|
|
||||||
@@ -47,6 +38,9 @@ export async function generateSpec(
|
|||||||
logger.info('analyzeProject:', analyzeProject);
|
logger.info('analyzeProject:', analyzeProject);
|
||||||
logger.info('maxFeatures:', maxFeatures);
|
logger.info('maxFeatures:', maxFeatures);
|
||||||
|
|
||||||
|
// Get customized prompts from settings
|
||||||
|
const prompts = await getPromptCustomization(settingsService, '[SpecRegeneration]');
|
||||||
|
|
||||||
// Build the prompt based on whether we should analyze the project
|
// Build the prompt based on whether we should analyze the project
|
||||||
let analysisInstructions = '';
|
let analysisInstructions = '';
|
||||||
let techStackDefaults = '';
|
let techStackDefaults = '';
|
||||||
@@ -70,9 +64,7 @@ export async function generateSpec(
|
|||||||
Use these technologies as the foundation for the specification.`;
|
Use these technologies as the foundation for the specification.`;
|
||||||
}
|
}
|
||||||
|
|
||||||
const prompt = `You are helping to define a software project specification.
|
const prompt = `${prompts.appSpec.generateSpecSystemPrompt}
|
||||||
|
|
||||||
IMPORTANT: Never ask for clarification or additional information. Use the information provided and make reasonable assumptions to create the best possible specification. If details are missing, infer them based on common patterns and best practices.
|
|
||||||
|
|
||||||
Project Overview:
|
Project Overview:
|
||||||
${projectOverview}
|
${projectOverview}
|
||||||
@@ -81,7 +73,7 @@ ${techStackDefaults}
|
|||||||
|
|
||||||
${analysisInstructions}
|
${analysisInstructions}
|
||||||
|
|
||||||
${getStructuredSpecPromptInstruction()}`;
|
${prompts.appSpec.structuredSpecInstructions}`;
|
||||||
|
|
||||||
logger.info('========== PROMPT BEING SENT ==========');
|
logger.info('========== PROMPT BEING SENT ==========');
|
||||||
logger.info(`Prompt length: ${prompt.length} chars`);
|
logger.info(`Prompt length: ${prompt.length} chars`);
|
||||||
@@ -109,21 +101,15 @@ ${getStructuredSpecPromptInstruction()}`;
|
|||||||
logger.info('Using model:', model);
|
logger.info('Using model:', model);
|
||||||
|
|
||||||
let responseText = '';
|
let responseText = '';
|
||||||
let messageCount = 0;
|
|
||||||
let structuredOutput: SpecOutput | null = null;
|
let structuredOutput: SpecOutput | null = null;
|
||||||
|
|
||||||
// Route to appropriate provider based on model type
|
// Determine if we should use structured output (Claude supports it, Cursor doesn't)
|
||||||
if (isCursorModel(model)) {
|
const useStructuredOutput = !isCursorModel(model);
|
||||||
// Use Cursor provider for Cursor models
|
|
||||||
logger.info('[SpecGeneration] Using Cursor provider');
|
|
||||||
|
|
||||||
const provider = ProviderFactory.getProviderForModel(model);
|
// Build the final prompt - for Cursor, include JSON schema instructions
|
||||||
// Strip provider prefix - providers expect bare model IDs
|
let finalPrompt = prompt;
|
||||||
const bareModel = stripProviderPrefix(model);
|
if (!useStructuredOutput) {
|
||||||
|
finalPrompt = `${prompt}
|
||||||
// For Cursor, include the JSON schema in the prompt with clear instructions
|
|
||||||
// to return JSON in the response (not write to a file)
|
|
||||||
const cursorPrompt = `${prompt}
|
|
||||||
|
|
||||||
CRITICAL INSTRUCTIONS:
|
CRITICAL INSTRUCTIONS:
|
||||||
1. DO NOT write any files. DO NOT create any files like "project_specification.json".
|
1. DO NOT write any files. DO NOT create any files like "project_specification.json".
|
||||||
@@ -133,153 +119,57 @@ CRITICAL INSTRUCTIONS:
|
|||||||
${JSON.stringify(specOutputSchema, null, 2)}
|
${JSON.stringify(specOutputSchema, null, 2)}
|
||||||
|
|
||||||
Your entire response should be valid JSON starting with { and ending with }. No text before or after.`;
|
Your entire response should be valid JSON starting with { and ending with }. No text before or after.`;
|
||||||
|
|
||||||
for await (const msg of provider.executeQuery({
|
|
||||||
prompt: cursorPrompt,
|
|
||||||
model: bareModel,
|
|
||||||
cwd: projectPath,
|
|
||||||
maxTurns: 250,
|
|
||||||
allowedTools: ['Read', 'Glob', 'Grep'],
|
|
||||||
abortController,
|
|
||||||
readOnly: true, // Spec generation only reads code, we write the spec ourselves
|
|
||||||
})) {
|
|
||||||
messageCount++;
|
|
||||||
|
|
||||||
if (msg.type === 'assistant' && msg.message?.content) {
|
|
||||||
for (const block of msg.message.content) {
|
|
||||||
if (block.type === 'text' && block.text) {
|
|
||||||
responseText += block.text;
|
|
||||||
logger.info(
|
|
||||||
`Text block received (${block.text.length} chars), total now: ${responseText.length} chars`
|
|
||||||
);
|
|
||||||
events.emit('spec-regeneration:event', {
|
|
||||||
type: 'spec_regeneration_progress',
|
|
||||||
content: block.text,
|
|
||||||
projectPath: projectPath,
|
|
||||||
});
|
|
||||||
} else if (block.type === 'tool_use') {
|
|
||||||
logger.info('Tool use:', block.name);
|
|
||||||
events.emit('spec-regeneration:event', {
|
|
||||||
type: 'spec_tool',
|
|
||||||
tool: block.name,
|
|
||||||
input: block.input,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
|
|
||||||
// Use result if it's a final accumulated message
|
|
||||||
if (msg.result.length > responseText.length) {
|
|
||||||
responseText = msg.result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse JSON from the response text using shared utility
|
|
||||||
if (responseText) {
|
|
||||||
structuredOutput = extractJson<SpecOutput>(responseText, { logger });
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Use Claude SDK for Claude models
|
|
||||||
logger.info('[SpecGeneration] Using Claude SDK');
|
|
||||||
|
|
||||||
const options = createSpecGenerationOptions({
|
|
||||||
cwd: projectPath,
|
|
||||||
abortController,
|
|
||||||
autoLoadClaudeMd,
|
|
||||||
model,
|
|
||||||
thinkingLevel, // Pass thinking level for extended thinking
|
|
||||||
outputFormat: {
|
|
||||||
type: 'json_schema',
|
|
||||||
schema: specOutputSchema,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
logger.debug('SDK Options:', JSON.stringify(options, null, 2));
|
|
||||||
logger.info('Calling Claude Agent SDK query()...');
|
|
||||||
|
|
||||||
// Log auth status right before the SDK call
|
|
||||||
logAuthStatus('Right before SDK query()');
|
|
||||||
|
|
||||||
let stream;
|
|
||||||
try {
|
|
||||||
stream = query({ prompt, options });
|
|
||||||
logger.debug('query() returned stream successfully');
|
|
||||||
} catch (queryError) {
|
|
||||||
logger.error('❌ query() threw an exception:');
|
|
||||||
logger.error('Error:', queryError);
|
|
||||||
throw queryError;
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.info('Starting to iterate over stream...');
|
|
||||||
|
|
||||||
try {
|
|
||||||
for await (const msg of stream) {
|
|
||||||
messageCount++;
|
|
||||||
logger.info(
|
|
||||||
`Stream message #${messageCount}: type=${msg.type}, subtype=${(msg as any).subtype}`
|
|
||||||
);
|
|
||||||
|
|
||||||
if (msg.type === 'assistant') {
|
|
||||||
const msgAny = msg as any;
|
|
||||||
if (msgAny.message?.content) {
|
|
||||||
for (const block of msgAny.message.content) {
|
|
||||||
if (block.type === 'text') {
|
|
||||||
responseText += block.text;
|
|
||||||
logger.info(
|
|
||||||
`Text block received (${block.text.length} chars), total now: ${responseText.length} chars`
|
|
||||||
);
|
|
||||||
events.emit('spec-regeneration:event', {
|
|
||||||
type: 'spec_regeneration_progress',
|
|
||||||
content: block.text,
|
|
||||||
projectPath: projectPath,
|
|
||||||
});
|
|
||||||
} else if (block.type === 'tool_use') {
|
|
||||||
logger.info('Tool use:', block.name);
|
|
||||||
events.emit('spec-regeneration:event', {
|
|
||||||
type: 'spec_tool',
|
|
||||||
tool: block.name,
|
|
||||||
input: block.input,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (msg.type === 'result' && (msg as any).subtype === 'success') {
|
|
||||||
logger.info('Received success result');
|
|
||||||
// Check for structured output - this is the reliable way to get spec data
|
|
||||||
const resultMsg = msg as any;
|
|
||||||
if (resultMsg.structured_output) {
|
|
||||||
structuredOutput = resultMsg.structured_output as SpecOutput;
|
|
||||||
logger.info('✅ Received structured output');
|
|
||||||
logger.debug('Structured output:', JSON.stringify(structuredOutput, null, 2));
|
|
||||||
} else {
|
|
||||||
logger.warn('⚠️ No structured output in result, will fall back to text parsing');
|
|
||||||
}
|
|
||||||
} else if (msg.type === 'result') {
|
|
||||||
// Handle error result types
|
|
||||||
const subtype = (msg as any).subtype;
|
|
||||||
logger.info(`Result message: subtype=${subtype}`);
|
|
||||||
if (subtype === 'error_max_turns') {
|
|
||||||
logger.error('❌ Hit max turns limit!');
|
|
||||||
} else if (subtype === 'error_max_structured_output_retries') {
|
|
||||||
logger.error('❌ Failed to produce valid structured output after retries');
|
|
||||||
throw new Error('Could not produce valid spec output');
|
|
||||||
}
|
|
||||||
} else if ((msg as { type: string }).type === 'error') {
|
|
||||||
logger.error('❌ Received error message from stream:');
|
|
||||||
logger.error('Error message:', JSON.stringify(msg, null, 2));
|
|
||||||
} else if (msg.type === 'user') {
|
|
||||||
// Log user messages (tool results)
|
|
||||||
logger.info(`User message (tool result): ${JSON.stringify(msg).substring(0, 500)}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (streamError) {
|
|
||||||
logger.error('❌ Error while iterating stream:');
|
|
||||||
logger.error('Stream error:', streamError);
|
|
||||||
throw streamError;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info(`Stream iteration complete. Total messages: ${messageCount}`);
|
// Use streamingQuery with event callbacks
|
||||||
|
const result = await streamingQuery({
|
||||||
|
prompt: finalPrompt,
|
||||||
|
model,
|
||||||
|
cwd: projectPath,
|
||||||
|
maxTurns: 250,
|
||||||
|
allowedTools: ['Read', 'Glob', 'Grep'],
|
||||||
|
abortController,
|
||||||
|
thinkingLevel,
|
||||||
|
readOnly: true, // Spec generation only reads code, we write the spec ourselves
|
||||||
|
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
|
||||||
|
outputFormat: useStructuredOutput
|
||||||
|
? {
|
||||||
|
type: 'json_schema',
|
||||||
|
schema: specOutputSchema,
|
||||||
|
}
|
||||||
|
: undefined,
|
||||||
|
onText: (text) => {
|
||||||
|
responseText += text;
|
||||||
|
logger.info(
|
||||||
|
`Text block received (${text.length} chars), total now: ${responseText.length} chars`
|
||||||
|
);
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_regeneration_progress',
|
||||||
|
content: text,
|
||||||
|
projectPath: projectPath,
|
||||||
|
});
|
||||||
|
},
|
||||||
|
onToolUse: (tool, input) => {
|
||||||
|
logger.info('Tool use:', tool);
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_tool',
|
||||||
|
tool,
|
||||||
|
input,
|
||||||
|
});
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get structured output if available
|
||||||
|
if (result.structured_output) {
|
||||||
|
structuredOutput = result.structured_output as unknown as SpecOutput;
|
||||||
|
logger.info('✅ Received structured output');
|
||||||
|
logger.debug('Structured output:', JSON.stringify(structuredOutput, null, 2));
|
||||||
|
} else if (!useStructuredOutput && responseText) {
|
||||||
|
// For non-Claude providers, parse JSON from response text
|
||||||
|
structuredOutput = extractJson<SpecOutput>(responseText, { logger });
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(`Stream iteration complete.`);
|
||||||
logger.info(`Response text length: ${responseText.length} chars`);
|
logger.info(`Response text length: ${responseText.length} chars`);
|
||||||
|
|
||||||
// Determine XML content to save
|
// Determine XML content to save
|
||||||
@@ -311,19 +201,33 @@ Your entire response should be valid JSON starting with { and ending with }. No
|
|||||||
xmlContent = responseText.substring(xmlStart, xmlEnd + '</project_specification>'.length);
|
xmlContent = responseText.substring(xmlStart, xmlEnd + '</project_specification>'.length);
|
||||||
logger.info(`Extracted XML content: ${xmlContent.length} chars (from position ${xmlStart})`);
|
logger.info(`Extracted XML content: ${xmlContent.length} chars (from position ${xmlStart})`);
|
||||||
} else {
|
} else {
|
||||||
// No valid XML structure found in the response text
|
// No XML found, try JSON extraction
|
||||||
// This happens when structured output was expected but not received, and the agent
|
logger.warn('⚠️ No XML tags found, attempting JSON extraction...');
|
||||||
// output conversational text instead of XML (e.g., "The project directory appears to be empty...")
|
const extractedJson = extractJson<SpecOutput>(responseText, { logger });
|
||||||
// We should NOT save this conversational text as it's not a valid spec
|
|
||||||
logger.error('❌ Response does not contain valid <project_specification> XML structure');
|
if (
|
||||||
logger.error(
|
extractedJson &&
|
||||||
'This typically happens when structured output failed and the agent produced conversational text instead of XML'
|
typeof extractedJson.project_name === 'string' &&
|
||||||
);
|
typeof extractedJson.overview === 'string' &&
|
||||||
throw new Error(
|
Array.isArray(extractedJson.technology_stack) &&
|
||||||
'Failed to generate spec: No valid XML structure found in response. ' +
|
Array.isArray(extractedJson.core_capabilities) &&
|
||||||
'The response contained conversational text but no <project_specification> tags. ' +
|
Array.isArray(extractedJson.implemented_features)
|
||||||
'Please try again.'
|
) {
|
||||||
);
|
logger.info('✅ Successfully extracted JSON from response text');
|
||||||
|
xmlContent = specToXml(extractedJson);
|
||||||
|
logger.info(`✅ Converted extracted JSON to XML: ${xmlContent.length} chars`);
|
||||||
|
} else {
|
||||||
|
// Neither XML nor valid JSON found
|
||||||
|
logger.error('❌ Response does not contain valid XML or JSON structure');
|
||||||
|
logger.error(
|
||||||
|
'This typically happens when structured output failed and the agent produced conversational text instead of structured output'
|
||||||
|
);
|
||||||
|
throw new Error(
|
||||||
|
'Failed to generate spec: No valid XML or JSON structure found in response. ' +
|
||||||
|
'The response contained conversational text but no <project_specification> tags or valid JSON. ' +
|
||||||
|
'Please try again.'
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import type { EventEmitter } from '../../lib/events.js';
|
|||||||
import { createCreateHandler } from './routes/create.js';
|
import { createCreateHandler } from './routes/create.js';
|
||||||
import { createGenerateHandler } from './routes/generate.js';
|
import { createGenerateHandler } from './routes/generate.js';
|
||||||
import { createGenerateFeaturesHandler } from './routes/generate-features.js';
|
import { createGenerateFeaturesHandler } from './routes/generate-features.js';
|
||||||
|
import { createSyncHandler } from './routes/sync.js';
|
||||||
import { createStopHandler } from './routes/stop.js';
|
import { createStopHandler } from './routes/stop.js';
|
||||||
import { createStatusHandler } from './routes/status.js';
|
import { createStatusHandler } from './routes/status.js';
|
||||||
import type { SettingsService } from '../../services/settings-service.js';
|
import type { SettingsService } from '../../services/settings-service.js';
|
||||||
@@ -20,6 +21,7 @@ export function createSpecRegenerationRoutes(
|
|||||||
router.post('/create', createCreateHandler(events));
|
router.post('/create', createCreateHandler(events));
|
||||||
router.post('/generate', createGenerateHandler(events, settingsService));
|
router.post('/generate', createGenerateHandler(events, settingsService));
|
||||||
router.post('/generate-features', createGenerateFeaturesHandler(events, settingsService));
|
router.post('/generate-features', createGenerateFeaturesHandler(events, settingsService));
|
||||||
|
router.post('/sync', createSyncHandler(events, settingsService));
|
||||||
router.post('/stop', createStopHandler());
|
router.post('/stop', createStopHandler());
|
||||||
router.get('/status', createStatusHandler());
|
router.get('/status', createStatusHandler());
|
||||||
|
|
||||||
|
|||||||
@@ -5,9 +5,10 @@
|
|||||||
import path from 'path';
|
import path from 'path';
|
||||||
import * as secureFs from '../../lib/secure-fs.js';
|
import * as secureFs from '../../lib/secure-fs.js';
|
||||||
import type { EventEmitter } from '../../lib/events.js';
|
import type { EventEmitter } from '../../lib/events.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger, atomicWriteJson, DEFAULT_BACKUP_COUNT } from '@automaker/utils';
|
||||||
import { getFeaturesDir } from '@automaker/platform';
|
import { getFeaturesDir } from '@automaker/platform';
|
||||||
import { extractJsonWithArray } from '../../lib/json-extractor.js';
|
import { extractJsonWithArray } from '../../lib/json-extractor.js';
|
||||||
|
import { getNotificationService } from '../../services/notification-service.js';
|
||||||
|
|
||||||
const logger = createLogger('SpecRegeneration');
|
const logger = createLogger('SpecRegeneration');
|
||||||
|
|
||||||
@@ -73,10 +74,10 @@ export async function parseAndCreateFeatures(
|
|||||||
updatedAt: new Date().toISOString(),
|
updatedAt: new Date().toISOString(),
|
||||||
};
|
};
|
||||||
|
|
||||||
await secureFs.writeFile(
|
// Use atomic write with backup support for crash protection
|
||||||
path.join(featureDir, 'feature.json'),
|
await atomicWriteJson(path.join(featureDir, 'feature.json'), featureData, {
|
||||||
JSON.stringify(featureData, null, 2)
|
backupCount: DEFAULT_BACKUP_COUNT,
|
||||||
);
|
});
|
||||||
|
|
||||||
createdFeatures.push({ id: feature.id, title: feature.title });
|
createdFeatures.push({ id: feature.id, title: feature.title });
|
||||||
}
|
}
|
||||||
@@ -88,6 +89,15 @@ export async function parseAndCreateFeatures(
|
|||||||
message: `Spec regeneration complete! Created ${createdFeatures.length} features.`,
|
message: `Spec regeneration complete! Created ${createdFeatures.length} features.`,
|
||||||
projectPath: projectPath,
|
projectPath: projectPath,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Create notification for spec generation completion
|
||||||
|
const notificationService = getNotificationService();
|
||||||
|
await notificationService.createNotification({
|
||||||
|
type: 'spec_regeneration_complete',
|
||||||
|
title: 'Spec Generation Complete',
|
||||||
|
message: `Created ${createdFeatures.length} features from the project specification.`,
|
||||||
|
projectPath: projectPath,
|
||||||
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('❌ parseAndCreateFeatures() failed:');
|
logger.error('❌ parseAndCreateFeatures() failed:');
|
||||||
logger.error('Error:', error);
|
logger.error('Error:', error);
|
||||||
|
|||||||
@@ -47,17 +47,17 @@ export function createCreateHandler(events: EventEmitter) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const { isRunning } = getSpecRegenerationStatus();
|
const { isRunning } = getSpecRegenerationStatus(projectPath);
|
||||||
if (isRunning) {
|
if (isRunning) {
|
||||||
logger.warn('Generation already running, rejecting request');
|
logger.warn('Generation already running for project:', projectPath);
|
||||||
res.json({ success: false, error: 'Spec generation already running' });
|
res.json({ success: false, error: 'Spec generation already running for this project' });
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
logAuthStatus('Before starting generation');
|
logAuthStatus('Before starting generation');
|
||||||
|
|
||||||
const abortController = new AbortController();
|
const abortController = new AbortController();
|
||||||
setRunningState(true, abortController);
|
setRunningState(projectPath, true, abortController);
|
||||||
logger.info('Starting background generation task...');
|
logger.info('Starting background generation task...');
|
||||||
|
|
||||||
// Start generation in background
|
// Start generation in background
|
||||||
@@ -80,7 +80,7 @@ export function createCreateHandler(events: EventEmitter) {
|
|||||||
})
|
})
|
||||||
.finally(() => {
|
.finally(() => {
|
||||||
logger.info('Generation task finished (success or error)');
|
logger.info('Generation task finished (success or error)');
|
||||||
setRunningState(false, null);
|
setRunningState(projectPath, false, null);
|
||||||
});
|
});
|
||||||
|
|
||||||
logger.info('Returning success response (generation running in background)');
|
logger.info('Returning success response (generation running in background)');
|
||||||
|
|||||||
@@ -40,17 +40,17 @@ export function createGenerateFeaturesHandler(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const { isRunning } = getSpecRegenerationStatus();
|
const { isRunning } = getSpecRegenerationStatus(projectPath);
|
||||||
if (isRunning) {
|
if (isRunning) {
|
||||||
logger.warn('Generation already running, rejecting request');
|
logger.warn('Generation already running for project:', projectPath);
|
||||||
res.json({ success: false, error: 'Generation already running' });
|
res.json({ success: false, error: 'Generation already running for this project' });
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
logAuthStatus('Before starting feature generation');
|
logAuthStatus('Before starting feature generation');
|
||||||
|
|
||||||
const abortController = new AbortController();
|
const abortController = new AbortController();
|
||||||
setRunningState(true, abortController);
|
setRunningState(projectPath, true, abortController, 'feature_generation');
|
||||||
logger.info('Starting background feature generation task...');
|
logger.info('Starting background feature generation task...');
|
||||||
|
|
||||||
generateFeaturesFromSpec(projectPath, events, abortController, maxFeatures, settingsService)
|
generateFeaturesFromSpec(projectPath, events, abortController, maxFeatures, settingsService)
|
||||||
@@ -63,7 +63,7 @@ export function createGenerateFeaturesHandler(
|
|||||||
})
|
})
|
||||||
.finally(() => {
|
.finally(() => {
|
||||||
logger.info('Feature generation task finished (success or error)');
|
logger.info('Feature generation task finished (success or error)');
|
||||||
setRunningState(false, null);
|
setRunningState(projectPath, false, null);
|
||||||
});
|
});
|
||||||
|
|
||||||
logger.info('Returning success response (generation running in background)');
|
logger.info('Returning success response (generation running in background)');
|
||||||
|
|||||||
@@ -48,17 +48,17 @@ export function createGenerateHandler(events: EventEmitter, settingsService?: Se
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const { isRunning } = getSpecRegenerationStatus();
|
const { isRunning } = getSpecRegenerationStatus(projectPath);
|
||||||
if (isRunning) {
|
if (isRunning) {
|
||||||
logger.warn('Generation already running, rejecting request');
|
logger.warn('Generation already running for project:', projectPath);
|
||||||
res.json({ success: false, error: 'Spec generation already running' });
|
res.json({ success: false, error: 'Spec generation already running for this project' });
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
logAuthStatus('Before starting generation');
|
logAuthStatus('Before starting generation');
|
||||||
|
|
||||||
const abortController = new AbortController();
|
const abortController = new AbortController();
|
||||||
setRunningState(true, abortController);
|
setRunningState(projectPath, true, abortController);
|
||||||
logger.info('Starting background generation task...');
|
logger.info('Starting background generation task...');
|
||||||
|
|
||||||
generateSpec(
|
generateSpec(
|
||||||
@@ -81,7 +81,7 @@ export function createGenerateHandler(events: EventEmitter, settingsService?: Se
|
|||||||
})
|
})
|
||||||
.finally(() => {
|
.finally(() => {
|
||||||
logger.info('Generation task finished (success or error)');
|
logger.info('Generation task finished (success or error)');
|
||||||
setRunningState(false, null);
|
setRunningState(projectPath, false, null);
|
||||||
});
|
});
|
||||||
|
|
||||||
logger.info('Returning success response (generation running in background)');
|
logger.info('Returning success response (generation running in background)');
|
||||||
|
|||||||
@@ -6,10 +6,11 @@ import type { Request, Response } from 'express';
|
|||||||
import { getSpecRegenerationStatus, getErrorMessage } from '../common.js';
|
import { getSpecRegenerationStatus, getErrorMessage } from '../common.js';
|
||||||
|
|
||||||
export function createStatusHandler() {
|
export function createStatusHandler() {
|
||||||
return async (_req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { isRunning } = getSpecRegenerationStatus();
|
const projectPath = req.query.projectPath as string | undefined;
|
||||||
res.json({ success: true, isRunning });
|
const { isRunning } = getSpecRegenerationStatus(projectPath);
|
||||||
|
res.json({ success: true, isRunning, projectPath });
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,13 +6,16 @@ import type { Request, Response } from 'express';
|
|||||||
import { getSpecRegenerationStatus, setRunningState, getErrorMessage } from '../common.js';
|
import { getSpecRegenerationStatus, setRunningState, getErrorMessage } from '../common.js';
|
||||||
|
|
||||||
export function createStopHandler() {
|
export function createStopHandler() {
|
||||||
return async (_req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { currentAbortController } = getSpecRegenerationStatus();
|
const { projectPath } = req.body as { projectPath?: string };
|
||||||
|
const { currentAbortController } = getSpecRegenerationStatus(projectPath);
|
||||||
if (currentAbortController) {
|
if (currentAbortController) {
|
||||||
currentAbortController.abort();
|
currentAbortController.abort();
|
||||||
}
|
}
|
||||||
setRunningState(false, null);
|
if (projectPath) {
|
||||||
|
setRunningState(projectPath, false, null);
|
||||||
|
}
|
||||||
res.json({ success: true });
|
res.json({ success: true });
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
|||||||
76
apps/server/src/routes/app-spec/routes/sync.ts
Normal file
76
apps/server/src/routes/app-spec/routes/sync.ts
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
/**
|
||||||
|
* POST /sync endpoint - Sync spec with codebase and features
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import type { EventEmitter } from '../../../lib/events.js';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import {
|
||||||
|
getSpecRegenerationStatus,
|
||||||
|
setRunningState,
|
||||||
|
logAuthStatus,
|
||||||
|
logError,
|
||||||
|
getErrorMessage,
|
||||||
|
} from '../common.js';
|
||||||
|
import { syncSpec } from '../sync-spec.js';
|
||||||
|
import type { SettingsService } from '../../../services/settings-service.js';
|
||||||
|
|
||||||
|
const logger = createLogger('SpecSync');
|
||||||
|
|
||||||
|
export function createSyncHandler(events: EventEmitter, settingsService?: SettingsService) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
logger.info('========== /sync endpoint called ==========');
|
||||||
|
logger.debug('Request body:', JSON.stringify(req.body, null, 2));
|
||||||
|
|
||||||
|
try {
|
||||||
|
const { projectPath } = req.body as {
|
||||||
|
projectPath: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
logger.debug('projectPath:', projectPath);
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
logger.error('Missing projectPath parameter');
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const { isRunning } = getSpecRegenerationStatus(projectPath);
|
||||||
|
if (isRunning) {
|
||||||
|
logger.warn('Generation/sync already running for project:', projectPath);
|
||||||
|
res.json({ success: false, error: 'Operation already running for this project' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logAuthStatus('Before starting spec sync');
|
||||||
|
|
||||||
|
const abortController = new AbortController();
|
||||||
|
setRunningState(projectPath, true, abortController, 'sync');
|
||||||
|
logger.info('Starting background spec sync task...');
|
||||||
|
|
||||||
|
syncSpec(projectPath, events, abortController, settingsService)
|
||||||
|
.then((result) => {
|
||||||
|
logger.info('Spec sync completed successfully');
|
||||||
|
logger.info('Result:', JSON.stringify(result, null, 2));
|
||||||
|
})
|
||||||
|
.catch((error) => {
|
||||||
|
logError(error, 'Spec sync failed with error');
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_regeneration_error',
|
||||||
|
error: getErrorMessage(error),
|
||||||
|
projectPath,
|
||||||
|
});
|
||||||
|
})
|
||||||
|
.finally(() => {
|
||||||
|
logger.info('Spec sync task finished (success or error)');
|
||||||
|
setRunningState(projectPath, false, null);
|
||||||
|
});
|
||||||
|
|
||||||
|
logger.info('Returning success response (sync running in background)');
|
||||||
|
res.json({ success: true });
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Sync route handler failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
307
apps/server/src/routes/app-spec/sync-spec.ts
Normal file
307
apps/server/src/routes/app-spec/sync-spec.ts
Normal file
@@ -0,0 +1,307 @@
|
|||||||
|
/**
|
||||||
|
* Sync spec with current codebase and feature state
|
||||||
|
*
|
||||||
|
* Updates the spec file based on:
|
||||||
|
* - Completed Automaker features
|
||||||
|
* - Code analysis for tech stack and implementations
|
||||||
|
* - Roadmap phase status updates
|
||||||
|
*/
|
||||||
|
|
||||||
|
import * as secureFs from '../../lib/secure-fs.js';
|
||||||
|
import type { EventEmitter } from '../../lib/events.js';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import { DEFAULT_PHASE_MODELS } from '@automaker/types';
|
||||||
|
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||||
|
import { streamingQuery } from '../../providers/simple-query-service.js';
|
||||||
|
import { getAppSpecPath } from '@automaker/platform';
|
||||||
|
import type { SettingsService } from '../../services/settings-service.js';
|
||||||
|
import { getAutoLoadClaudeMdSetting } from '../../lib/settings-helpers.js';
|
||||||
|
import { FeatureLoader } from '../../services/feature-loader.js';
|
||||||
|
import {
|
||||||
|
extractImplementedFeatures,
|
||||||
|
extractTechnologyStack,
|
||||||
|
extractRoadmapPhases,
|
||||||
|
updateImplementedFeaturesSection,
|
||||||
|
updateTechnologyStack,
|
||||||
|
updateRoadmapPhaseStatus,
|
||||||
|
type ImplementedFeature,
|
||||||
|
type RoadmapPhase,
|
||||||
|
} from '../../lib/xml-extractor.js';
|
||||||
|
import { getNotificationService } from '../../services/notification-service.js';
|
||||||
|
|
||||||
|
const logger = createLogger('SpecSync');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Result of a sync operation
|
||||||
|
*/
|
||||||
|
export interface SyncResult {
|
||||||
|
techStackUpdates: {
|
||||||
|
added: string[];
|
||||||
|
removed: string[];
|
||||||
|
};
|
||||||
|
implementedFeaturesUpdates: {
|
||||||
|
addedFromFeatures: string[];
|
||||||
|
removed: string[];
|
||||||
|
};
|
||||||
|
roadmapUpdates: Array<{ phaseName: string; newStatus: string }>;
|
||||||
|
summary: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sync the spec with current codebase and feature state
|
||||||
|
*/
|
||||||
|
export async function syncSpec(
|
||||||
|
projectPath: string,
|
||||||
|
events: EventEmitter,
|
||||||
|
abortController: AbortController,
|
||||||
|
settingsService?: SettingsService
|
||||||
|
): Promise<SyncResult> {
|
||||||
|
logger.info('========== syncSpec() started ==========');
|
||||||
|
logger.info('projectPath:', projectPath);
|
||||||
|
|
||||||
|
const result: SyncResult = {
|
||||||
|
techStackUpdates: { added: [], removed: [] },
|
||||||
|
implementedFeaturesUpdates: { addedFromFeatures: [], removed: [] },
|
||||||
|
roadmapUpdates: [],
|
||||||
|
summary: '',
|
||||||
|
};
|
||||||
|
|
||||||
|
// Read existing spec
|
||||||
|
const specPath = getAppSpecPath(projectPath);
|
||||||
|
let specContent: string;
|
||||||
|
|
||||||
|
try {
|
||||||
|
specContent = (await secureFs.readFile(specPath, 'utf-8')) as string;
|
||||||
|
logger.info(`Spec loaded successfully (${specContent.length} chars)`);
|
||||||
|
} catch (readError) {
|
||||||
|
logger.error('Failed to read spec file:', readError);
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_regeneration_error',
|
||||||
|
error: 'No project spec found. Create or regenerate spec first.',
|
||||||
|
projectPath,
|
||||||
|
});
|
||||||
|
throw new Error('No project spec found');
|
||||||
|
}
|
||||||
|
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_regeneration_progress',
|
||||||
|
content: '[Phase: sync] Starting spec sync...\n',
|
||||||
|
projectPath,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Extract current state from spec
|
||||||
|
const currentImplementedFeatures = extractImplementedFeatures(specContent);
|
||||||
|
const currentTechStack = extractTechnologyStack(specContent);
|
||||||
|
const currentRoadmapPhases = extractRoadmapPhases(specContent);
|
||||||
|
|
||||||
|
logger.info(`Current spec has ${currentImplementedFeatures.length} implemented features`);
|
||||||
|
logger.info(`Current spec has ${currentTechStack.length} technologies`);
|
||||||
|
logger.info(`Current spec has ${currentRoadmapPhases.length} roadmap phases`);
|
||||||
|
|
||||||
|
// Load completed Automaker features
|
||||||
|
const featureLoader = new FeatureLoader();
|
||||||
|
const allFeatures = await featureLoader.getAll(projectPath);
|
||||||
|
const completedFeatures = allFeatures.filter(
|
||||||
|
(f) => f.status === 'completed' || f.status === 'verified'
|
||||||
|
);
|
||||||
|
|
||||||
|
logger.info(`Found ${completedFeatures.length} completed/verified features in Automaker`);
|
||||||
|
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_regeneration_progress',
|
||||||
|
content: `Found ${completedFeatures.length} completed features to sync...\n`,
|
||||||
|
projectPath,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Build new implemented features list from completed Automaker features
|
||||||
|
const newImplementedFeatures: ImplementedFeature[] = [];
|
||||||
|
const existingNames = new Set(currentImplementedFeatures.map((f) => f.name.toLowerCase()));
|
||||||
|
|
||||||
|
for (const feature of completedFeatures) {
|
||||||
|
const name = feature.title || `Feature: ${feature.id}`;
|
||||||
|
if (!existingNames.has(name.toLowerCase())) {
|
||||||
|
newImplementedFeatures.push({
|
||||||
|
name,
|
||||||
|
description: feature.description || '',
|
||||||
|
});
|
||||||
|
result.implementedFeaturesUpdates.addedFromFeatures.push(name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge: keep existing + add new from completed features
|
||||||
|
const mergedFeatures = [...currentImplementedFeatures, ...newImplementedFeatures];
|
||||||
|
|
||||||
|
// Update spec with merged features
|
||||||
|
if (result.implementedFeaturesUpdates.addedFromFeatures.length > 0) {
|
||||||
|
specContent = updateImplementedFeaturesSection(specContent, mergedFeatures);
|
||||||
|
logger.info(
|
||||||
|
`Added ${result.implementedFeaturesUpdates.addedFromFeatures.length} features to spec`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Analyze codebase for tech stack updates using AI
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_regeneration_progress',
|
||||||
|
content: 'Analyzing codebase for technology updates...\n',
|
||||||
|
projectPath,
|
||||||
|
});
|
||||||
|
|
||||||
|
const autoLoadClaudeMd = await getAutoLoadClaudeMdSetting(
|
||||||
|
projectPath,
|
||||||
|
settingsService,
|
||||||
|
'[SpecSync]'
|
||||||
|
);
|
||||||
|
|
||||||
|
const settings = await settingsService?.getGlobalSettings();
|
||||||
|
const phaseModelEntry =
|
||||||
|
settings?.phaseModels?.specGenerationModel || DEFAULT_PHASE_MODELS.specGenerationModel;
|
||||||
|
const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
|
||||||
|
|
||||||
|
// Use AI to analyze tech stack
|
||||||
|
const techAnalysisPrompt = `Analyze this project and return ONLY a JSON object with the current technology stack.
|
||||||
|
|
||||||
|
Current known technologies: ${currentTechStack.join(', ')}
|
||||||
|
|
||||||
|
Look at package.json, config files, and source code to identify:
|
||||||
|
- Frameworks (React, Vue, Express, etc.)
|
||||||
|
- Languages (TypeScript, JavaScript, Python, etc.)
|
||||||
|
- Build tools (Vite, Webpack, etc.)
|
||||||
|
- Databases (PostgreSQL, MongoDB, etc.)
|
||||||
|
- Key libraries and tools
|
||||||
|
|
||||||
|
Return ONLY this JSON format, no other text:
|
||||||
|
{
|
||||||
|
"technologies": ["Technology 1", "Technology 2", ...]
|
||||||
|
}`;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const techResult = await streamingQuery({
|
||||||
|
prompt: techAnalysisPrompt,
|
||||||
|
model,
|
||||||
|
cwd: projectPath,
|
||||||
|
maxTurns: 10,
|
||||||
|
allowedTools: ['Read', 'Glob', 'Grep'],
|
||||||
|
abortController,
|
||||||
|
thinkingLevel,
|
||||||
|
readOnly: true,
|
||||||
|
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
|
||||||
|
onText: (text) => {
|
||||||
|
logger.debug(`Tech analysis text: ${text.substring(0, 100)}`);
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Parse tech stack from response
|
||||||
|
const jsonMatch = techResult.text.match(/\{[\s\S]*"technologies"[\s\S]*\}/);
|
||||||
|
if (jsonMatch) {
|
||||||
|
const parsed = JSON.parse(jsonMatch[0]);
|
||||||
|
if (Array.isArray(parsed.technologies)) {
|
||||||
|
const newTechStack = parsed.technologies as string[];
|
||||||
|
|
||||||
|
// Calculate differences
|
||||||
|
const currentSet = new Set(currentTechStack.map((t) => t.toLowerCase()));
|
||||||
|
const newSet = new Set(newTechStack.map((t) => t.toLowerCase()));
|
||||||
|
|
||||||
|
for (const tech of newTechStack) {
|
||||||
|
if (!currentSet.has(tech.toLowerCase())) {
|
||||||
|
result.techStackUpdates.added.push(tech);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const tech of currentTechStack) {
|
||||||
|
if (!newSet.has(tech.toLowerCase())) {
|
||||||
|
result.techStackUpdates.removed.push(tech);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update spec with new tech stack if there are changes
|
||||||
|
if (
|
||||||
|
result.techStackUpdates.added.length > 0 ||
|
||||||
|
result.techStackUpdates.removed.length > 0
|
||||||
|
) {
|
||||||
|
specContent = updateTechnologyStack(specContent, newTechStack);
|
||||||
|
logger.info(
|
||||||
|
`Updated tech stack: +${result.techStackUpdates.added.length}, -${result.techStackUpdates.removed.length}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
logger.warn('Failed to analyze tech stack:', error);
|
||||||
|
// Continue with other sync operations
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update roadmap phase statuses based on completed features
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_regeneration_progress',
|
||||||
|
content: 'Checking roadmap phase statuses...\n',
|
||||||
|
projectPath,
|
||||||
|
});
|
||||||
|
|
||||||
|
// For each phase, check if all its features are completed
|
||||||
|
// This is a heuristic - we check if the phase name appears in any feature titles/descriptions
|
||||||
|
for (const phase of currentRoadmapPhases) {
|
||||||
|
if (phase.status === 'completed') continue; // Already completed
|
||||||
|
|
||||||
|
// Check if this phase should be marked as completed
|
||||||
|
// A phase is considered complete if we have completed features that mention it
|
||||||
|
const phaseNameLower = phase.name.toLowerCase();
|
||||||
|
const relatedCompletedFeatures = completedFeatures.filter(
|
||||||
|
(f) =>
|
||||||
|
f.title?.toLowerCase().includes(phaseNameLower) ||
|
||||||
|
f.description?.toLowerCase().includes(phaseNameLower) ||
|
||||||
|
f.category?.toLowerCase().includes(phaseNameLower)
|
||||||
|
);
|
||||||
|
|
||||||
|
// If we have related completed features and the phase is still pending/in_progress,
|
||||||
|
// update it to in_progress or completed based on feature count
|
||||||
|
if (relatedCompletedFeatures.length > 0 && phase.status !== 'completed') {
|
||||||
|
const newStatus = 'in_progress';
|
||||||
|
specContent = updateRoadmapPhaseStatus(specContent, phase.name, newStatus);
|
||||||
|
result.roadmapUpdates.push({ phaseName: phase.name, newStatus });
|
||||||
|
logger.info(`Updated phase "${phase.name}" to ${newStatus}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save updated spec
|
||||||
|
await secureFs.writeFile(specPath, specContent, 'utf-8');
|
||||||
|
logger.info('Spec saved successfully');
|
||||||
|
|
||||||
|
// Build summary
|
||||||
|
const summaryParts: string[] = [];
|
||||||
|
if (result.implementedFeaturesUpdates.addedFromFeatures.length > 0) {
|
||||||
|
summaryParts.push(
|
||||||
|
`Added ${result.implementedFeaturesUpdates.addedFromFeatures.length} implemented features`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if (result.techStackUpdates.added.length > 0) {
|
||||||
|
summaryParts.push(`Added ${result.techStackUpdates.added.length} technologies`);
|
||||||
|
}
|
||||||
|
if (result.techStackUpdates.removed.length > 0) {
|
||||||
|
summaryParts.push(`Removed ${result.techStackUpdates.removed.length} technologies`);
|
||||||
|
}
|
||||||
|
if (result.roadmapUpdates.length > 0) {
|
||||||
|
summaryParts.push(`Updated ${result.roadmapUpdates.length} roadmap phases`);
|
||||||
|
}
|
||||||
|
|
||||||
|
result.summary = summaryParts.length > 0 ? summaryParts.join(', ') : 'Spec is already up to date';
|
||||||
|
|
||||||
|
// Create notification
|
||||||
|
const notificationService = getNotificationService();
|
||||||
|
await notificationService.createNotification({
|
||||||
|
type: 'spec_regeneration_complete',
|
||||||
|
title: 'Spec Sync Complete',
|
||||||
|
message: result.summary,
|
||||||
|
projectPath,
|
||||||
|
});
|
||||||
|
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_regeneration_complete',
|
||||||
|
message: `Spec sync complete! ${result.summary}`,
|
||||||
|
projectPath,
|
||||||
|
});
|
||||||
|
|
||||||
|
logger.info('========== syncSpec() completed ==========');
|
||||||
|
logger.info('Summary:', result.summary);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
@@ -17,6 +17,7 @@ import { createAnalyzeProjectHandler } from './routes/analyze-project.js';
|
|||||||
import { createFollowUpFeatureHandler } from './routes/follow-up-feature.js';
|
import { createFollowUpFeatureHandler } from './routes/follow-up-feature.js';
|
||||||
import { createCommitFeatureHandler } from './routes/commit-feature.js';
|
import { createCommitFeatureHandler } from './routes/commit-feature.js';
|
||||||
import { createApprovePlanHandler } from './routes/approve-plan.js';
|
import { createApprovePlanHandler } from './routes/approve-plan.js';
|
||||||
|
import { createResumeInterruptedHandler } from './routes/resume-interrupted.js';
|
||||||
|
|
||||||
export function createAutoModeRoutes(autoModeService: AutoModeService): Router {
|
export function createAutoModeRoutes(autoModeService: AutoModeService): Router {
|
||||||
const router = Router();
|
const router = Router();
|
||||||
@@ -63,6 +64,11 @@ export function createAutoModeRoutes(autoModeService: AutoModeService): Router {
|
|||||||
validatePathParams('projectPath'),
|
validatePathParams('projectPath'),
|
||||||
createApprovePlanHandler(autoModeService)
|
createApprovePlanHandler(autoModeService)
|
||||||
);
|
);
|
||||||
|
router.post(
|
||||||
|
'/resume-interrupted',
|
||||||
|
validatePathParams('projectPath'),
|
||||||
|
createResumeInterruptedHandler(autoModeService)
|
||||||
|
);
|
||||||
|
|
||||||
return router;
|
return router;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,42 @@
|
|||||||
|
/**
|
||||||
|
* Resume Interrupted Features Handler
|
||||||
|
*
|
||||||
|
* Checks for features that were interrupted (in pipeline steps or in_progress)
|
||||||
|
* when the server was restarted and resumes them.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
||||||
|
|
||||||
|
const logger = createLogger('ResumeInterrupted');
|
||||||
|
|
||||||
|
interface ResumeInterruptedRequest {
|
||||||
|
projectPath: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createResumeInterruptedHandler(autoModeService: AutoModeService) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
const { projectPath } = req.body as ResumeInterruptedRequest;
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
res.status(400).json({ error: 'Project path is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(`Checking for interrupted features in ${projectPath}`);
|
||||||
|
|
||||||
|
try {
|
||||||
|
await autoModeService.resumeInterruptedFeatures(projectPath);
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
message: 'Resume check completed',
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Error resuming interrupted features:', error);
|
||||||
|
res.status(500).json({
|
||||||
|
error: error instanceof Error ? error.message : 'Unknown error',
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -3,12 +3,31 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import { ensureAutomakerDir, getAutomakerDir } from '@automaker/platform';
|
||||||
|
import * as secureFs from '../../lib/secure-fs.js';
|
||||||
|
import path from 'path';
|
||||||
|
import type { BacklogPlanResult } from '@automaker/types';
|
||||||
|
|
||||||
const logger = createLogger('BacklogPlan');
|
const logger = createLogger('BacklogPlan');
|
||||||
|
|
||||||
// State for tracking running generation
|
// State for tracking running generation
|
||||||
let isRunning = false;
|
let isRunning = false;
|
||||||
let currentAbortController: AbortController | null = null;
|
let currentAbortController: AbortController | null = null;
|
||||||
|
let runningDetails: {
|
||||||
|
projectPath: string;
|
||||||
|
prompt: string;
|
||||||
|
model?: string;
|
||||||
|
startedAt: string;
|
||||||
|
} | null = null;
|
||||||
|
|
||||||
|
const BACKLOG_PLAN_FILENAME = 'backlog-plan.json';
|
||||||
|
|
||||||
|
export interface StoredBacklogPlan {
|
||||||
|
savedAt: string;
|
||||||
|
prompt: string;
|
||||||
|
model?: string;
|
||||||
|
result: BacklogPlanResult;
|
||||||
|
}
|
||||||
|
|
||||||
export function getBacklogPlanStatus(): { isRunning: boolean } {
|
export function getBacklogPlanStatus(): { isRunning: boolean } {
|
||||||
return { isRunning };
|
return { isRunning };
|
||||||
@@ -16,11 +35,67 @@ export function getBacklogPlanStatus(): { isRunning: boolean } {
|
|||||||
|
|
||||||
export function setRunningState(running: boolean, abortController?: AbortController | null): void {
|
export function setRunningState(running: boolean, abortController?: AbortController | null): void {
|
||||||
isRunning = running;
|
isRunning = running;
|
||||||
|
if (!running) {
|
||||||
|
runningDetails = null;
|
||||||
|
}
|
||||||
if (abortController !== undefined) {
|
if (abortController !== undefined) {
|
||||||
currentAbortController = abortController;
|
currentAbortController = abortController;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function setRunningDetails(
|
||||||
|
details: {
|
||||||
|
projectPath: string;
|
||||||
|
prompt: string;
|
||||||
|
model?: string;
|
||||||
|
startedAt: string;
|
||||||
|
} | null
|
||||||
|
): void {
|
||||||
|
runningDetails = details;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getRunningDetails(): {
|
||||||
|
projectPath: string;
|
||||||
|
prompt: string;
|
||||||
|
model?: string;
|
||||||
|
startedAt: string;
|
||||||
|
} | null {
|
||||||
|
return runningDetails;
|
||||||
|
}
|
||||||
|
|
||||||
|
function getBacklogPlanPath(projectPath: string): string {
|
||||||
|
return path.join(getAutomakerDir(projectPath), BACKLOG_PLAN_FILENAME);
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function saveBacklogPlan(projectPath: string, plan: StoredBacklogPlan): Promise<void> {
|
||||||
|
await ensureAutomakerDir(projectPath);
|
||||||
|
const filePath = getBacklogPlanPath(projectPath);
|
||||||
|
await secureFs.writeFile(filePath, JSON.stringify(plan, null, 2), 'utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function loadBacklogPlan(projectPath: string): Promise<StoredBacklogPlan | null> {
|
||||||
|
try {
|
||||||
|
const filePath = getBacklogPlanPath(projectPath);
|
||||||
|
const raw = await secureFs.readFile(filePath, 'utf-8');
|
||||||
|
const parsed = JSON.parse(raw as string) as StoredBacklogPlan;
|
||||||
|
if (!Array.isArray(parsed?.result?.changes)) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return parsed;
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function clearBacklogPlan(projectPath: string): Promise<void> {
|
||||||
|
try {
|
||||||
|
const filePath = getBacklogPlanPath(projectPath);
|
||||||
|
await secureFs.unlink(filePath);
|
||||||
|
} catch {
|
||||||
|
// ignore missing file
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
export function getAbortController(): AbortController | null {
|
export function getAbortController(): AbortController | null {
|
||||||
return currentAbortController;
|
return currentAbortController;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,7 +17,13 @@ import { resolvePhaseModel } from '@automaker/model-resolver';
|
|||||||
import { FeatureLoader } from '../../services/feature-loader.js';
|
import { FeatureLoader } from '../../services/feature-loader.js';
|
||||||
import { ProviderFactory } from '../../providers/provider-factory.js';
|
import { ProviderFactory } from '../../providers/provider-factory.js';
|
||||||
import { extractJsonWithArray } from '../../lib/json-extractor.js';
|
import { extractJsonWithArray } from '../../lib/json-extractor.js';
|
||||||
import { logger, setRunningState, getErrorMessage } from './common.js';
|
import {
|
||||||
|
logger,
|
||||||
|
setRunningState,
|
||||||
|
setRunningDetails,
|
||||||
|
getErrorMessage,
|
||||||
|
saveBacklogPlan,
|
||||||
|
} from './common.js';
|
||||||
import type { SettingsService } from '../../services/settings-service.js';
|
import type { SettingsService } from '../../services/settings-service.js';
|
||||||
import { getAutoLoadClaudeMdSetting, getPromptCustomization } from '../../lib/settings-helpers.js';
|
import { getAutoLoadClaudeMdSetting, getPromptCustomization } from '../../lib/settings-helpers.js';
|
||||||
|
|
||||||
@@ -200,6 +206,13 @@ ${userPrompt}`;
|
|||||||
// Parse the response
|
// Parse the response
|
||||||
const result = parsePlanResponse(responseText);
|
const result = parsePlanResponse(responseText);
|
||||||
|
|
||||||
|
await saveBacklogPlan(projectPath, {
|
||||||
|
savedAt: new Date().toISOString(),
|
||||||
|
prompt,
|
||||||
|
model: effectiveModel,
|
||||||
|
result,
|
||||||
|
});
|
||||||
|
|
||||||
events.emit('backlog-plan:event', {
|
events.emit('backlog-plan:event', {
|
||||||
type: 'backlog_plan_complete',
|
type: 'backlog_plan_complete',
|
||||||
result,
|
result,
|
||||||
@@ -218,5 +231,6 @@ ${userPrompt}`;
|
|||||||
throw error;
|
throw error;
|
||||||
} finally {
|
} finally {
|
||||||
setRunningState(false, null);
|
setRunningState(false, null);
|
||||||
|
setRunningDetails(null);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import { createGenerateHandler } from './routes/generate.js';
|
|||||||
import { createStopHandler } from './routes/stop.js';
|
import { createStopHandler } from './routes/stop.js';
|
||||||
import { createStatusHandler } from './routes/status.js';
|
import { createStatusHandler } from './routes/status.js';
|
||||||
import { createApplyHandler } from './routes/apply.js';
|
import { createApplyHandler } from './routes/apply.js';
|
||||||
|
import { createClearHandler } from './routes/clear.js';
|
||||||
import type { SettingsService } from '../../services/settings-service.js';
|
import type { SettingsService } from '../../services/settings-service.js';
|
||||||
|
|
||||||
export function createBacklogPlanRoutes(
|
export function createBacklogPlanRoutes(
|
||||||
@@ -23,8 +24,9 @@ export function createBacklogPlanRoutes(
|
|||||||
createGenerateHandler(events, settingsService)
|
createGenerateHandler(events, settingsService)
|
||||||
);
|
);
|
||||||
router.post('/stop', createStopHandler());
|
router.post('/stop', createStopHandler());
|
||||||
router.get('/status', createStatusHandler());
|
router.get('/status', validatePathParams('projectPath'), createStatusHandler());
|
||||||
router.post('/apply', validatePathParams('projectPath'), createApplyHandler());
|
router.post('/apply', validatePathParams('projectPath'), createApplyHandler());
|
||||||
|
router.post('/clear', validatePathParams('projectPath'), createClearHandler());
|
||||||
|
|
||||||
return router;
|
return router;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,18 +5,29 @@
|
|||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { BacklogPlanResult, BacklogChange, Feature } from '@automaker/types';
|
import type { BacklogPlanResult, BacklogChange, Feature } from '@automaker/types';
|
||||||
import { FeatureLoader } from '../../../services/feature-loader.js';
|
import { FeatureLoader } from '../../../services/feature-loader.js';
|
||||||
import { getErrorMessage, logError, logger } from '../common.js';
|
import { clearBacklogPlan, getErrorMessage, logError, logger } from '../common.js';
|
||||||
|
|
||||||
const featureLoader = new FeatureLoader();
|
const featureLoader = new FeatureLoader();
|
||||||
|
|
||||||
export function createApplyHandler() {
|
export function createApplyHandler() {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, plan } = req.body as {
|
const {
|
||||||
|
projectPath,
|
||||||
|
plan,
|
||||||
|
branchName: rawBranchName,
|
||||||
|
} = req.body as {
|
||||||
projectPath: string;
|
projectPath: string;
|
||||||
plan: BacklogPlanResult;
|
plan: BacklogPlanResult;
|
||||||
|
branchName?: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Validate branchName: must be undefined or a non-empty trimmed string
|
||||||
|
const branchName =
|
||||||
|
typeof rawBranchName === 'string' && rawBranchName.trim().length > 0
|
||||||
|
? rawBranchName.trim()
|
||||||
|
: undefined;
|
||||||
|
|
||||||
if (!projectPath) {
|
if (!projectPath) {
|
||||||
res.status(400).json({ success: false, error: 'projectPath required' });
|
res.status(400).json({ success: false, error: 'projectPath required' });
|
||||||
return;
|
return;
|
||||||
@@ -82,6 +93,7 @@ export function createApplyHandler() {
|
|||||||
dependencies: change.feature.dependencies,
|
dependencies: change.feature.dependencies,
|
||||||
priority: change.feature.priority,
|
priority: change.feature.priority,
|
||||||
status: 'backlog',
|
status: 'backlog',
|
||||||
|
branchName,
|
||||||
});
|
});
|
||||||
|
|
||||||
appliedChanges.push(`added:${newFeature.id}`);
|
appliedChanges.push(`added:${newFeature.id}`);
|
||||||
@@ -135,6 +147,17 @@ export function createApplyHandler() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clear the plan before responding
|
||||||
|
try {
|
||||||
|
await clearBacklogPlan(projectPath);
|
||||||
|
} catch (error) {
|
||||||
|
logger.warn(
|
||||||
|
`[BacklogPlan] Failed to clear backlog plan after apply:`,
|
||||||
|
getErrorMessage(error)
|
||||||
|
);
|
||||||
|
// Don't throw - operation succeeded, just cleanup failed
|
||||||
|
}
|
||||||
|
|
||||||
res.json({
|
res.json({
|
||||||
success: true,
|
success: true,
|
||||||
appliedChanges,
|
appliedChanges,
|
||||||
|
|||||||
25
apps/server/src/routes/backlog-plan/routes/clear.ts
Normal file
25
apps/server/src/routes/backlog-plan/routes/clear.ts
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
/**
|
||||||
|
* POST /clear endpoint - Clear saved backlog plan
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import { clearBacklogPlan, getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
export function createClearHandler() {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath } = req.body as { projectPath: string };
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
await clearBacklogPlan(projectPath);
|
||||||
|
res.json({ success: true });
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Clear backlog plan failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -4,7 +4,13 @@
|
|||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { EventEmitter } from '../../../lib/events.js';
|
import type { EventEmitter } from '../../../lib/events.js';
|
||||||
import { getBacklogPlanStatus, setRunningState, getErrorMessage, logError } from '../common.js';
|
import {
|
||||||
|
getBacklogPlanStatus,
|
||||||
|
setRunningState,
|
||||||
|
setRunningDetails,
|
||||||
|
getErrorMessage,
|
||||||
|
logError,
|
||||||
|
} from '../common.js';
|
||||||
import { generateBacklogPlan } from '../generate-plan.js';
|
import { generateBacklogPlan } from '../generate-plan.js';
|
||||||
import type { SettingsService } from '../../../services/settings-service.js';
|
import type { SettingsService } from '../../../services/settings-service.js';
|
||||||
|
|
||||||
@@ -37,6 +43,12 @@ export function createGenerateHandler(events: EventEmitter, settingsService?: Se
|
|||||||
}
|
}
|
||||||
|
|
||||||
setRunningState(true);
|
setRunningState(true);
|
||||||
|
setRunningDetails({
|
||||||
|
projectPath,
|
||||||
|
prompt,
|
||||||
|
model,
|
||||||
|
startedAt: new Date().toISOString(),
|
||||||
|
});
|
||||||
const abortController = new AbortController();
|
const abortController = new AbortController();
|
||||||
setRunningState(true, abortController);
|
setRunningState(true, abortController);
|
||||||
|
|
||||||
@@ -51,6 +63,7 @@ export function createGenerateHandler(events: EventEmitter, settingsService?: Se
|
|||||||
})
|
})
|
||||||
.finally(() => {
|
.finally(() => {
|
||||||
setRunningState(false, null);
|
setRunningState(false, null);
|
||||||
|
setRunningDetails(null);
|
||||||
});
|
});
|
||||||
|
|
||||||
res.json({ success: true });
|
res.json({ success: true });
|
||||||
|
|||||||
@@ -3,13 +3,15 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { getBacklogPlanStatus, getErrorMessage, logError } from '../common.js';
|
import { getBacklogPlanStatus, loadBacklogPlan, getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
export function createStatusHandler() {
|
export function createStatusHandler() {
|
||||||
return async (_req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const status = getBacklogPlanStatus();
|
const status = getBacklogPlanStatus();
|
||||||
res.json({ success: true, ...status });
|
const projectPath = typeof req.query.projectPath === 'string' ? req.query.projectPath : '';
|
||||||
|
const savedPlan = projectPath ? await loadBacklogPlan(projectPath) : null;
|
||||||
|
res.json({ success: true, ...status, savedPlan });
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logError(error, 'Get backlog plan status failed');
|
logError(error, 'Get backlog plan status failed');
|
||||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
|||||||
@@ -3,7 +3,13 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { getAbortController, setRunningState, getErrorMessage, logError } from '../common.js';
|
import {
|
||||||
|
getAbortController,
|
||||||
|
setRunningState,
|
||||||
|
setRunningDetails,
|
||||||
|
getErrorMessage,
|
||||||
|
logError,
|
||||||
|
} from '../common.js';
|
||||||
|
|
||||||
export function createStopHandler() {
|
export function createStopHandler() {
|
||||||
return async (_req: Request, res: Response): Promise<void> => {
|
return async (_req: Request, res: Response): Promise<void> => {
|
||||||
@@ -12,6 +18,7 @@ export function createStopHandler() {
|
|||||||
if (abortController) {
|
if (abortController) {
|
||||||
abortController.abort();
|
abortController.abort();
|
||||||
setRunningState(false, null);
|
setRunningState(false, null);
|
||||||
|
setRunningDetails(null);
|
||||||
}
|
}
|
||||||
res.json({ success: true });
|
res.json({ success: true });
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@@ -34,6 +34,13 @@ export function createClaudeRoutes(service: ClaudeUsageService): Router {
|
|||||||
error: 'Authentication required',
|
error: 'Authentication required',
|
||||||
message: "Please run 'claude login' to authenticate",
|
message: "Please run 'claude login' to authenticate",
|
||||||
});
|
});
|
||||||
|
} else if (message.includes('TRUST_PROMPT_PENDING')) {
|
||||||
|
// Trust prompt appeared but couldn't be auto-approved
|
||||||
|
res.status(200).json({
|
||||||
|
error: 'Trust prompt pending',
|
||||||
|
message:
|
||||||
|
'Claude CLI needs folder permission. Please run "claude" in your terminal and approve access.',
|
||||||
|
});
|
||||||
} else if (message.includes('timed out')) {
|
} else if (message.includes('timed out')) {
|
||||||
res.status(200).json({
|
res.status(200).json({
|
||||||
error: 'Command timed out',
|
error: 'Command timed out',
|
||||||
|
|||||||
78
apps/server/src/routes/code-review/common.ts
Normal file
78
apps/server/src/routes/code-review/common.ts
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
/**
|
||||||
|
* Common utilities for code-review routes
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import { getErrorMessage as getErrorMessageShared, createLogError } from '../common.js';
|
||||||
|
|
||||||
|
const logger = createLogger('CodeReview');
|
||||||
|
|
||||||
|
// Re-export shared utilities
|
||||||
|
export { getErrorMessageShared as getErrorMessage };
|
||||||
|
export const logError = createLogError(logger);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Review state interface
|
||||||
|
*/
|
||||||
|
interface ReviewState {
|
||||||
|
isRunning: boolean;
|
||||||
|
abortController: AbortController | null;
|
||||||
|
projectPath: string | null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Shared state for code review operations
|
||||||
|
* Using an object to avoid mutable `let` exports which can cause issues in ES modules
|
||||||
|
*/
|
||||||
|
const reviewState: ReviewState = {
|
||||||
|
isRunning: false,
|
||||||
|
abortController: null,
|
||||||
|
projectPath: null,
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a review is currently running
|
||||||
|
*/
|
||||||
|
export function isRunning(): boolean {
|
||||||
|
return reviewState.isRunning;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the current abort controller (for stopping reviews)
|
||||||
|
*/
|
||||||
|
export function getAbortController(): AbortController | null {
|
||||||
|
return reviewState.abortController;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the current project path being reviewed
|
||||||
|
*/
|
||||||
|
export function getCurrentProjectPath(): string | null {
|
||||||
|
return reviewState.projectPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the running state for code review operations
|
||||||
|
*/
|
||||||
|
export function setRunningState(
|
||||||
|
running: boolean,
|
||||||
|
controller: AbortController | null = null,
|
||||||
|
projectPath: string | null = null
|
||||||
|
): void {
|
||||||
|
reviewState.isRunning = running;
|
||||||
|
reviewState.abortController = controller;
|
||||||
|
reviewState.projectPath = projectPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the current review status
|
||||||
|
*/
|
||||||
|
export function getReviewStatus(): {
|
||||||
|
isRunning: boolean;
|
||||||
|
projectPath: string | null;
|
||||||
|
} {
|
||||||
|
return {
|
||||||
|
isRunning: reviewState.isRunning,
|
||||||
|
projectPath: reviewState.projectPath,
|
||||||
|
};
|
||||||
|
}
|
||||||
40
apps/server/src/routes/code-review/index.ts
Normal file
40
apps/server/src/routes/code-review/index.ts
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
/**
|
||||||
|
* Code Review routes - HTTP API for triggering and managing code reviews
|
||||||
|
*
|
||||||
|
* Provides endpoints for:
|
||||||
|
* - Triggering code reviews on projects
|
||||||
|
* - Checking review status
|
||||||
|
* - Stopping in-progress reviews
|
||||||
|
*
|
||||||
|
* Uses the CodeReviewService for actual review execution with AI providers.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Router } from 'express';
|
||||||
|
import type { CodeReviewService } from '../../services/code-review-service.js';
|
||||||
|
import { validatePathParams } from '../../middleware/validate-paths.js';
|
||||||
|
import { createTriggerHandler } from './routes/trigger.js';
|
||||||
|
import { createStatusHandler } from './routes/status.js';
|
||||||
|
import { createStopHandler } from './routes/stop.js';
|
||||||
|
import { createProvidersHandler } from './routes/providers.js';
|
||||||
|
|
||||||
|
export function createCodeReviewRoutes(codeReviewService: CodeReviewService): Router {
|
||||||
|
const router = Router();
|
||||||
|
|
||||||
|
// POST /trigger - Start a new code review
|
||||||
|
router.post(
|
||||||
|
'/trigger',
|
||||||
|
validatePathParams('projectPath'),
|
||||||
|
createTriggerHandler(codeReviewService)
|
||||||
|
);
|
||||||
|
|
||||||
|
// GET /status - Get current review status
|
||||||
|
router.get('/status', createStatusHandler());
|
||||||
|
|
||||||
|
// POST /stop - Stop current review
|
||||||
|
router.post('/stop', createStopHandler());
|
||||||
|
|
||||||
|
// GET /providers - Get available providers and their status
|
||||||
|
router.get('/providers', createProvidersHandler(codeReviewService));
|
||||||
|
|
||||||
|
return router;
|
||||||
|
}
|
||||||
38
apps/server/src/routes/code-review/routes/providers.ts
Normal file
38
apps/server/src/routes/code-review/routes/providers.ts
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
/**
|
||||||
|
* GET /providers endpoint - Get available code review providers
|
||||||
|
*
|
||||||
|
* Returns the status of all available AI providers that can be used for code reviews.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import type { CodeReviewService } from '../../../services/code-review-service.js';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
const logger = createLogger('CodeReview');
|
||||||
|
|
||||||
|
export function createProvidersHandler(codeReviewService: CodeReviewService) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
logger.debug('========== /providers endpoint called ==========');
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Check if refresh is requested
|
||||||
|
const forceRefresh = req.query.refresh === 'true';
|
||||||
|
|
||||||
|
const providers = await codeReviewService.getProviderStatus(forceRefresh);
|
||||||
|
const bestProvider = await codeReviewService.getBestProvider();
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
providers,
|
||||||
|
recommended: bestProvider,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Providers handler exception');
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: getErrorMessage(error),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
32
apps/server/src/routes/code-review/routes/status.ts
Normal file
32
apps/server/src/routes/code-review/routes/status.ts
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
/**
|
||||||
|
* GET /status endpoint - Get current code review status
|
||||||
|
*
|
||||||
|
* Returns whether a code review is currently running and which project.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import { getReviewStatus, getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
const logger = createLogger('CodeReview');
|
||||||
|
|
||||||
|
export function createStatusHandler() {
|
||||||
|
return async (_req: Request, res: Response): Promise<void> => {
|
||||||
|
logger.debug('========== /status endpoint called ==========');
|
||||||
|
|
||||||
|
try {
|
||||||
|
const status = getReviewStatus();
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
...status,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Status handler exception');
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: getErrorMessage(error),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
54
apps/server/src/routes/code-review/routes/stop.ts
Normal file
54
apps/server/src/routes/code-review/routes/stop.ts
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
/**
|
||||||
|
* POST /stop endpoint - Stop the current code review
|
||||||
|
*
|
||||||
|
* Aborts any running code review operation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import {
|
||||||
|
isRunning,
|
||||||
|
getAbortController,
|
||||||
|
setRunningState,
|
||||||
|
getErrorMessage,
|
||||||
|
logError,
|
||||||
|
} from '../common.js';
|
||||||
|
|
||||||
|
const logger = createLogger('CodeReview');
|
||||||
|
|
||||||
|
export function createStopHandler() {
|
||||||
|
return async (_req: Request, res: Response): Promise<void> => {
|
||||||
|
logger.info('========== /stop endpoint called ==========');
|
||||||
|
|
||||||
|
try {
|
||||||
|
if (!isRunning()) {
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
message: 'No code review is currently running',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Abort the current operation
|
||||||
|
const abortController = getAbortController();
|
||||||
|
if (abortController) {
|
||||||
|
abortController.abort();
|
||||||
|
logger.info('Code review aborted');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset state
|
||||||
|
setRunningState(false, null, null);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
message: 'Code review stopped',
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Stop handler exception');
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: getErrorMessage(error),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
188
apps/server/src/routes/code-review/routes/trigger.ts
Normal file
188
apps/server/src/routes/code-review/routes/trigger.ts
Normal file
@@ -0,0 +1,188 @@
|
|||||||
|
/**
|
||||||
|
* POST /trigger endpoint - Trigger a code review
|
||||||
|
*
|
||||||
|
* Starts an asynchronous code review on the specified project.
|
||||||
|
* Progress updates are streamed via WebSocket events.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import type { CodeReviewService } from '../../../services/code-review-service.js';
|
||||||
|
import type { CodeReviewCategory, ThinkingLevel, ModelId } from '@automaker/types';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import { isRunning, setRunningState, getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
const logger = createLogger('CodeReview');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Maximum number of files allowed per review request
|
||||||
|
*/
|
||||||
|
const MAX_FILES_PER_REQUEST = 100;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Maximum length for baseRef parameter
|
||||||
|
*/
|
||||||
|
const MAX_BASE_REF_LENGTH = 256;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Valid categories for code review
|
||||||
|
*/
|
||||||
|
const VALID_CATEGORIES: CodeReviewCategory[] = [
|
||||||
|
'tech_stack',
|
||||||
|
'security',
|
||||||
|
'code_quality',
|
||||||
|
'implementation',
|
||||||
|
'architecture',
|
||||||
|
'performance',
|
||||||
|
'testing',
|
||||||
|
'documentation',
|
||||||
|
];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Valid thinking levels
|
||||||
|
*/
|
||||||
|
const VALID_THINKING_LEVELS: ThinkingLevel[] = ['low', 'medium', 'high'];
|
||||||
|
|
||||||
|
interface TriggerRequestBody {
|
||||||
|
projectPath: string;
|
||||||
|
files?: string[];
|
||||||
|
baseRef?: string;
|
||||||
|
categories?: CodeReviewCategory[];
|
||||||
|
autoFix?: boolean;
|
||||||
|
model?: ModelId;
|
||||||
|
thinkingLevel?: ThinkingLevel;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validate and sanitize the request body
|
||||||
|
*/
|
||||||
|
function validateRequestBody(body: TriggerRequestBody): { valid: boolean; error?: string } {
|
||||||
|
const { files, baseRef, categories, autoFix, thinkingLevel } = body;
|
||||||
|
|
||||||
|
// Validate files array
|
||||||
|
if (files !== undefined) {
|
||||||
|
if (!Array.isArray(files)) {
|
||||||
|
return { valid: false, error: 'files must be an array' };
|
||||||
|
}
|
||||||
|
if (files.length > MAX_FILES_PER_REQUEST) {
|
||||||
|
return { valid: false, error: `Maximum ${MAX_FILES_PER_REQUEST} files allowed per request` };
|
||||||
|
}
|
||||||
|
for (const file of files) {
|
||||||
|
if (typeof file !== 'string') {
|
||||||
|
return { valid: false, error: 'Each file must be a string' };
|
||||||
|
}
|
||||||
|
if (file.length > 500) {
|
||||||
|
return { valid: false, error: 'File path too long' };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate baseRef
|
||||||
|
if (baseRef !== undefined) {
|
||||||
|
if (typeof baseRef !== 'string') {
|
||||||
|
return { valid: false, error: 'baseRef must be a string' };
|
||||||
|
}
|
||||||
|
if (baseRef.length > MAX_BASE_REF_LENGTH) {
|
||||||
|
return { valid: false, error: 'baseRef is too long' };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate categories
|
||||||
|
if (categories !== undefined) {
|
||||||
|
if (!Array.isArray(categories)) {
|
||||||
|
return { valid: false, error: 'categories must be an array' };
|
||||||
|
}
|
||||||
|
for (const category of categories) {
|
||||||
|
if (!VALID_CATEGORIES.includes(category)) {
|
||||||
|
return { valid: false, error: `Invalid category: ${category}` };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate autoFix
|
||||||
|
if (autoFix !== undefined && typeof autoFix !== 'boolean') {
|
||||||
|
return { valid: false, error: 'autoFix must be a boolean' };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate thinkingLevel
|
||||||
|
if (thinkingLevel !== undefined) {
|
||||||
|
if (!VALID_THINKING_LEVELS.includes(thinkingLevel)) {
|
||||||
|
return { valid: false, error: `Invalid thinkingLevel: ${thinkingLevel}` };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return { valid: true };
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createTriggerHandler(codeReviewService: CodeReviewService) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
logger.info('========== /trigger endpoint called ==========');
|
||||||
|
|
||||||
|
try {
|
||||||
|
const body = req.body as TriggerRequestBody;
|
||||||
|
const { projectPath, files, baseRef, categories, autoFix, model, thinkingLevel } = body;
|
||||||
|
|
||||||
|
// Validate required parameters
|
||||||
|
if (!projectPath) {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'projectPath is required',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// SECURITY: Validate all input parameters
|
||||||
|
const validation = validateRequestBody(body);
|
||||||
|
if (!validation.valid) {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: validation.error,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if a review is already running
|
||||||
|
if (isRunning()) {
|
||||||
|
res.status(409).json({
|
||||||
|
success: false,
|
||||||
|
error: 'A code review is already in progress',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up abort controller for cancellation
|
||||||
|
const abortController = new AbortController();
|
||||||
|
setRunningState(true, abortController, projectPath);
|
||||||
|
|
||||||
|
// Start the review in the background
|
||||||
|
codeReviewService
|
||||||
|
.executeReview({
|
||||||
|
projectPath,
|
||||||
|
files,
|
||||||
|
baseRef,
|
||||||
|
categories,
|
||||||
|
autoFix,
|
||||||
|
model,
|
||||||
|
thinkingLevel,
|
||||||
|
abortController,
|
||||||
|
})
|
||||||
|
.catch((error) => {
|
||||||
|
logError(error, 'Code review failed');
|
||||||
|
})
|
||||||
|
.finally(() => {
|
||||||
|
setRunningState(false, null, null);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Return immediate response
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
message: 'Code review started',
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Trigger handler exception');
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: getErrorMessage(error),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -1,17 +1,21 @@
|
|||||||
import { Router, Request, Response } from 'express';
|
import { Router, Request, Response } from 'express';
|
||||||
import { CodexUsageService } from '../../services/codex-usage-service.js';
|
import { CodexUsageService } from '../../services/codex-usage-service.js';
|
||||||
|
import { CodexModelCacheService } from '../../services/codex-model-cache-service.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
|
|
||||||
const logger = createLogger('Codex');
|
const logger = createLogger('Codex');
|
||||||
|
|
||||||
export function createCodexRoutes(service: CodexUsageService): Router {
|
export function createCodexRoutes(
|
||||||
|
usageService: CodexUsageService,
|
||||||
|
modelCacheService: CodexModelCacheService
|
||||||
|
): Router {
|
||||||
const router = Router();
|
const router = Router();
|
||||||
|
|
||||||
// Get current usage (attempts to fetch from Codex CLI)
|
// Get current usage (attempts to fetch from Codex CLI)
|
||||||
router.get('/usage', async (req: Request, res: Response) => {
|
router.get('/usage', async (_req: Request, res: Response) => {
|
||||||
try {
|
try {
|
||||||
// Check if Codex CLI is available first
|
// Check if Codex CLI is available first
|
||||||
const isAvailable = await service.isAvailable();
|
const isAvailable = await usageService.isAvailable();
|
||||||
if (!isAvailable) {
|
if (!isAvailable) {
|
||||||
// IMPORTANT: This endpoint is behind Automaker session auth already.
|
// IMPORTANT: This endpoint is behind Automaker session auth already.
|
||||||
// Use a 200 + error payload for Codex CLI issues so the UI doesn't
|
// Use a 200 + error payload for Codex CLI issues so the UI doesn't
|
||||||
@@ -23,7 +27,7 @@ export function createCodexRoutes(service: CodexUsageService): Router {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const usage = await service.fetchUsageData();
|
const usage = await usageService.fetchUsageData();
|
||||||
res.json(usage);
|
res.json(usage);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
const message = error instanceof Error ? error.message : 'Unknown error';
|
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||||
@@ -52,5 +56,35 @@ export function createCodexRoutes(service: CodexUsageService): Router {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Get available Codex models (cached)
|
||||||
|
router.get('/models', async (req: Request, res: Response) => {
|
||||||
|
try {
|
||||||
|
const forceRefresh = req.query.refresh === 'true';
|
||||||
|
const { models, cachedAt } = await modelCacheService.getModelsWithMetadata(forceRefresh);
|
||||||
|
|
||||||
|
if (models.length === 0) {
|
||||||
|
res.status(503).json({
|
||||||
|
success: false,
|
||||||
|
error: 'Codex CLI not available or not authenticated',
|
||||||
|
message: "Please install Codex CLI and run 'codex login' to authenticate",
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
models,
|
||||||
|
cachedAt,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Error fetching models:', error);
|
||||||
|
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: message,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
return router;
|
return router;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,17 +11,18 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
|
import { DEFAULT_PHASE_MODELS } from '@automaker/types';
|
||||||
import { PathNotAllowedError } from '@automaker/platform';
|
import { PathNotAllowedError } from '@automaker/platform';
|
||||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||||
import { createCustomOptions } from '../../../lib/sdk-options.js';
|
import { simpleQuery } from '../../../providers/simple-query-service.js';
|
||||||
import { ProviderFactory } from '../../../providers/provider-factory.js';
|
|
||||||
import * as secureFs from '../../../lib/secure-fs.js';
|
import * as secureFs from '../../../lib/secure-fs.js';
|
||||||
import * as path from 'path';
|
import * as path from 'path';
|
||||||
import type { SettingsService } from '../../../services/settings-service.js';
|
import type { SettingsService } from '../../../services/settings-service.js';
|
||||||
import { getAutoLoadClaudeMdSetting } from '../../../lib/settings-helpers.js';
|
import {
|
||||||
|
getAutoLoadClaudeMdSetting,
|
||||||
|
getPromptCustomization,
|
||||||
|
} from '../../../lib/settings-helpers.js';
|
||||||
|
|
||||||
const logger = createLogger('DescribeFile');
|
const logger = createLogger('DescribeFile');
|
||||||
|
|
||||||
@@ -49,31 +50,6 @@ interface DescribeFileErrorResponse {
|
|||||||
error: string;
|
error: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract text content from Claude SDK response messages
|
|
||||||
*/
|
|
||||||
async function extractTextFromStream(
|
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
||||||
stream: AsyncIterable<any>
|
|
||||||
): Promise<string> {
|
|
||||||
let responseText = '';
|
|
||||||
|
|
||||||
for await (const msg of stream) {
|
|
||||||
if (msg.type === 'assistant' && msg.message?.content) {
|
|
||||||
const blocks = msg.message.content as Array<{ type: string; text?: string }>;
|
|
||||||
for (const block of blocks) {
|
|
||||||
if (block.type === 'text' && block.text) {
|
|
||||||
responseText += block.text;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (msg.type === 'result' && msg.subtype === 'success') {
|
|
||||||
responseText = msg.result || responseText;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return responseText;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create the describe-file request handler
|
* Create the describe-file request handler
|
||||||
*
|
*
|
||||||
@@ -157,18 +133,17 @@ export function createDescribeFileHandler(
|
|||||||
// Get the filename for context
|
// Get the filename for context
|
||||||
const fileName = path.basename(resolvedPath);
|
const fileName = path.basename(resolvedPath);
|
||||||
|
|
||||||
|
// Get customized prompts from settings
|
||||||
|
const prompts = await getPromptCustomization(settingsService, '[DescribeFile]');
|
||||||
|
|
||||||
// Build prompt with file content passed as structured data
|
// Build prompt with file content passed as structured data
|
||||||
// The file content is included directly, not via tool invocation
|
// The file content is included directly, not via tool invocation
|
||||||
const instructionText = `Analyze the following file and provide a 1-2 sentence description suitable for use as context in an AI coding assistant. Focus on what the file contains, its purpose, and why an AI agent might want to use this context in the future (e.g., "API documentation for the authentication endpoints", "Configuration file for database connections", "Coding style guidelines for the project").
|
const prompt = `${prompts.contextDescription.describeFilePrompt}
|
||||||
|
|
||||||
Respond with ONLY the description text, no additional formatting, preamble, or explanation.
|
File: ${fileName}${truncated ? ' (truncated)' : ''}
|
||||||
|
|
||||||
File: ${fileName}${truncated ? ' (truncated)' : ''}`;
|
--- FILE CONTENT ---
|
||||||
|
${contentToAnalyze}`;
|
||||||
const promptContent = [
|
|
||||||
{ type: 'text' as const, text: instructionText },
|
|
||||||
{ type: 'text' as const, text: `\n\n--- FILE CONTENT ---\n${contentToAnalyze}` },
|
|
||||||
];
|
|
||||||
|
|
||||||
// Use the file's directory as the working directory
|
// Use the file's directory as the working directory
|
||||||
const cwd = path.dirname(resolvedPath);
|
const cwd = path.dirname(resolvedPath);
|
||||||
@@ -190,67 +165,19 @@ File: ${fileName}${truncated ? ' (truncated)' : ''}`;
|
|||||||
|
|
||||||
logger.info(`Resolved model: ${model}, thinkingLevel: ${thinkingLevel}`);
|
logger.info(`Resolved model: ${model}, thinkingLevel: ${thinkingLevel}`);
|
||||||
|
|
||||||
let description: string;
|
// Use simpleQuery - provider abstraction handles routing to correct provider
|
||||||
|
const result = await simpleQuery({
|
||||||
|
prompt,
|
||||||
|
model,
|
||||||
|
cwd,
|
||||||
|
maxTurns: 1,
|
||||||
|
allowedTools: [],
|
||||||
|
thinkingLevel,
|
||||||
|
readOnly: true, // File description only reads, doesn't write
|
||||||
|
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
|
||||||
|
});
|
||||||
|
|
||||||
// Route to appropriate provider based on model type
|
const description = result.text;
|
||||||
if (isCursorModel(model)) {
|
|
||||||
// Use Cursor provider for Cursor models
|
|
||||||
logger.info(`Using Cursor provider for model: ${model}`);
|
|
||||||
|
|
||||||
const provider = ProviderFactory.getProviderForModel(model);
|
|
||||||
// Strip provider prefix - providers expect bare model IDs
|
|
||||||
const bareModel = stripProviderPrefix(model);
|
|
||||||
|
|
||||||
// Build a simple text prompt for Cursor (no multi-part content blocks)
|
|
||||||
const cursorPrompt = `${instructionText}\n\n--- FILE CONTENT ---\n${contentToAnalyze}`;
|
|
||||||
|
|
||||||
let responseText = '';
|
|
||||||
for await (const msg of provider.executeQuery({
|
|
||||||
prompt: cursorPrompt,
|
|
||||||
model: bareModel,
|
|
||||||
cwd,
|
|
||||||
maxTurns: 1,
|
|
||||||
allowedTools: [],
|
|
||||||
readOnly: true, // File description only reads, doesn't write
|
|
||||||
})) {
|
|
||||||
if (msg.type === 'assistant' && msg.message?.content) {
|
|
||||||
for (const block of msg.message.content) {
|
|
||||||
if (block.type === 'text' && block.text) {
|
|
||||||
responseText += block.text;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
description = responseText;
|
|
||||||
} else {
|
|
||||||
// Use Claude SDK for Claude models
|
|
||||||
logger.info(`Using Claude SDK for model: ${model}`);
|
|
||||||
|
|
||||||
// Use centralized SDK options with proper cwd validation
|
|
||||||
// No tools needed since we're passing file content directly
|
|
||||||
const sdkOptions = createCustomOptions({
|
|
||||||
cwd,
|
|
||||||
model,
|
|
||||||
maxTurns: 1,
|
|
||||||
allowedTools: [],
|
|
||||||
autoLoadClaudeMd,
|
|
||||||
thinkingLevel, // Pass thinking level for extended thinking
|
|
||||||
});
|
|
||||||
|
|
||||||
const promptGenerator = (async function* () {
|
|
||||||
yield {
|
|
||||||
type: 'user' as const,
|
|
||||||
session_id: '',
|
|
||||||
message: { role: 'user' as const, content: promptContent },
|
|
||||||
parent_tool_use_id: null,
|
|
||||||
};
|
|
||||||
})();
|
|
||||||
|
|
||||||
const stream = query({ prompt: promptGenerator, options: sdkOptions });
|
|
||||||
|
|
||||||
// Extract the description from the response
|
|
||||||
description = await extractTextFromStream(stream);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!description || description.trim().length === 0) {
|
if (!description || description.trim().length === 0) {
|
||||||
logger.warn('Received empty response from Claude');
|
logger.warn('Received empty response from Claude');
|
||||||
|
|||||||
@@ -12,16 +12,17 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
|
||||||
import { createLogger, readImageAsBase64 } from '@automaker/utils';
|
import { createLogger, readImageAsBase64 } from '@automaker/utils';
|
||||||
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
|
import { DEFAULT_PHASE_MODELS, isCursorModel } from '@automaker/types';
|
||||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||||
import { createCustomOptions } from '../../../lib/sdk-options.js';
|
import { simpleQuery } from '../../../providers/simple-query-service.js';
|
||||||
import { ProviderFactory } from '../../../providers/provider-factory.js';
|
|
||||||
import * as secureFs from '../../../lib/secure-fs.js';
|
import * as secureFs from '../../../lib/secure-fs.js';
|
||||||
import * as path from 'path';
|
import * as path from 'path';
|
||||||
import type { SettingsService } from '../../../services/settings-service.js';
|
import type { SettingsService } from '../../../services/settings-service.js';
|
||||||
import { getAutoLoadClaudeMdSetting } from '../../../lib/settings-helpers.js';
|
import {
|
||||||
|
getAutoLoadClaudeMdSetting,
|
||||||
|
getPromptCustomization,
|
||||||
|
} from '../../../lib/settings-helpers.js';
|
||||||
|
|
||||||
const logger = createLogger('DescribeImage');
|
const logger = createLogger('DescribeImage');
|
||||||
|
|
||||||
@@ -178,57 +179,10 @@ function mapDescribeImageError(rawMessage: string | undefined): {
|
|||||||
return baseResponse;
|
return baseResponse;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract text content from Claude SDK response messages and log high-signal stream events.
|
|
||||||
*/
|
|
||||||
async function extractTextFromStream(
|
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
||||||
stream: AsyncIterable<any>,
|
|
||||||
requestId: string
|
|
||||||
): Promise<string> {
|
|
||||||
let responseText = '';
|
|
||||||
let messageCount = 0;
|
|
||||||
|
|
||||||
logger.info(`[${requestId}] [Stream] Begin reading SDK stream...`);
|
|
||||||
|
|
||||||
for await (const msg of stream) {
|
|
||||||
messageCount++;
|
|
||||||
const msgType = msg?.type;
|
|
||||||
const msgSubtype = msg?.subtype;
|
|
||||||
|
|
||||||
// Keep this concise but informative. Full error object is logged in catch blocks.
|
|
||||||
logger.info(
|
|
||||||
`[${requestId}] [Stream] #${messageCount} type=${String(msgType)} subtype=${String(msgSubtype ?? '')}`
|
|
||||||
);
|
|
||||||
|
|
||||||
if (msgType === 'assistant' && msg.message?.content) {
|
|
||||||
const blocks = msg.message.content as Array<{ type: string; text?: string }>;
|
|
||||||
logger.info(`[${requestId}] [Stream] assistant blocks=${blocks.length}`);
|
|
||||||
for (const block of blocks) {
|
|
||||||
if (block.type === 'text' && block.text) {
|
|
||||||
responseText += block.text;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (msgType === 'result' && msgSubtype === 'success') {
|
|
||||||
if (typeof msg.result === 'string' && msg.result.length > 0) {
|
|
||||||
responseText = msg.result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
`[${requestId}] [Stream] End of stream. messages=${messageCount} textLength=${responseText.length}`
|
|
||||||
);
|
|
||||||
|
|
||||||
return responseText;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create the describe-image request handler
|
* Create the describe-image request handler
|
||||||
*
|
*
|
||||||
* Uses Claude SDK query with multi-part content blocks to include the image (base64),
|
* Uses the provider abstraction with multi-part content blocks to include the image (base64),
|
||||||
* matching the agent runner behavior.
|
* matching the agent runner behavior.
|
||||||
*
|
*
|
||||||
* @param settingsService - Optional settings service for loading autoLoadClaudeMd setting
|
* @param settingsService - Optional settings service for loading autoLoadClaudeMd setting
|
||||||
@@ -309,27 +263,6 @@ export function createDescribeImageHandler(
|
|||||||
`[${requestId}] image meta filename=${imageData.filename} mime=${imageData.mimeType} base64Len=${base64Length} estBytes=${estimatedBytes}`
|
`[${requestId}] image meta filename=${imageData.filename} mime=${imageData.mimeType} base64Len=${base64Length} estBytes=${estimatedBytes}`
|
||||||
);
|
);
|
||||||
|
|
||||||
// Build multi-part prompt with image block (no Read tool required)
|
|
||||||
const instructionText =
|
|
||||||
`Describe this image in 1-2 sentences suitable for use as context in an AI coding assistant. ` +
|
|
||||||
`Focus on what the image shows and its purpose (e.g., "UI mockup showing login form with email/password fields", ` +
|
|
||||||
`"Architecture diagram of microservices", "Screenshot of error message in terminal").\n\n` +
|
|
||||||
`Respond with ONLY the description text, no additional formatting, preamble, or explanation.`;
|
|
||||||
|
|
||||||
const promptContent = [
|
|
||||||
{ type: 'text' as const, text: instructionText },
|
|
||||||
{
|
|
||||||
type: 'image' as const,
|
|
||||||
source: {
|
|
||||||
type: 'base64' as const,
|
|
||||||
media_type: imageData.mimeType,
|
|
||||||
data: imageData.base64,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
logger.info(`[${requestId}] Built multi-part prompt blocks=${promptContent.length}`);
|
|
||||||
|
|
||||||
const cwd = path.dirname(actualPath);
|
const cwd = path.dirname(actualPath);
|
||||||
logger.info(`[${requestId}] Using cwd=${cwd}`);
|
logger.info(`[${requestId}] Using cwd=${cwd}`);
|
||||||
|
|
||||||
@@ -348,85 +281,58 @@ export function createDescribeImageHandler(
|
|||||||
|
|
||||||
logger.info(`[${requestId}] Using model: ${model}`);
|
logger.info(`[${requestId}] Using model: ${model}`);
|
||||||
|
|
||||||
let description: string;
|
// Get customized prompts from settings
|
||||||
|
const prompts = await getPromptCustomization(settingsService, '[DescribeImage]');
|
||||||
|
|
||||||
|
// Build the instruction text from centralized prompts
|
||||||
|
const instructionText = prompts.contextDescription.describeImagePrompt;
|
||||||
|
|
||||||
|
// Build prompt based on provider capability
|
||||||
|
// Some providers (like Cursor) may not support image content blocks
|
||||||
|
let prompt: string | Array<{ type: string; text?: string; source?: object }>;
|
||||||
|
|
||||||
// Route to appropriate provider based on model type
|
|
||||||
if (isCursorModel(model)) {
|
if (isCursorModel(model)) {
|
||||||
// Use Cursor provider for Cursor models
|
// Cursor may not support base64 image blocks directly
|
||||||
// Note: Cursor may have limited support for image content blocks
|
// Use text prompt with image path reference
|
||||||
logger.info(`[${requestId}] Using Cursor provider for model: ${model}`);
|
logger.info(`[${requestId}] Using text prompt for Cursor model`);
|
||||||
|
prompt = `${instructionText}\n\nImage file: ${actualPath}\nMIME type: ${imageData.mimeType}`;
|
||||||
const provider = ProviderFactory.getProviderForModel(model);
|
|
||||||
// Strip provider prefix - providers expect bare model IDs
|
|
||||||
const bareModel = stripProviderPrefix(model);
|
|
||||||
|
|
||||||
// Build prompt with image reference for Cursor
|
|
||||||
// Note: Cursor CLI may not support base64 image blocks directly,
|
|
||||||
// so we include the image path as context
|
|
||||||
const cursorPrompt = `${instructionText}\n\nImage file: ${actualPath}\nMIME type: ${imageData.mimeType}`;
|
|
||||||
|
|
||||||
let responseText = '';
|
|
||||||
const queryStart = Date.now();
|
|
||||||
for await (const msg of provider.executeQuery({
|
|
||||||
prompt: cursorPrompt,
|
|
||||||
model: bareModel,
|
|
||||||
cwd,
|
|
||||||
maxTurns: 1,
|
|
||||||
allowedTools: ['Read'], // Allow Read tool so Cursor can read the image if needed
|
|
||||||
readOnly: true, // Image description only reads, doesn't write
|
|
||||||
})) {
|
|
||||||
if (msg.type === 'assistant' && msg.message?.content) {
|
|
||||||
for (const block of msg.message.content) {
|
|
||||||
if (block.type === 'text' && block.text) {
|
|
||||||
responseText += block.text;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
logger.info(`[${requestId}] Cursor query completed in ${Date.now() - queryStart}ms`);
|
|
||||||
description = responseText;
|
|
||||||
} else {
|
} else {
|
||||||
// Use Claude SDK for Claude models (supports image content blocks)
|
// Claude and other vision-capable models support multi-part prompts with images
|
||||||
logger.info(`[${requestId}] Using Claude SDK for model: ${model}`);
|
logger.info(`[${requestId}] Using multi-part prompt with image block`);
|
||||||
|
prompt = [
|
||||||
// Use the same centralized option builder used across the server (validates cwd)
|
{ type: 'text', text: instructionText },
|
||||||
const sdkOptions = createCustomOptions({
|
{
|
||||||
cwd,
|
type: 'image',
|
||||||
model,
|
source: {
|
||||||
maxTurns: 1,
|
type: 'base64',
|
||||||
allowedTools: [],
|
media_type: imageData.mimeType,
|
||||||
autoLoadClaudeMd,
|
data: imageData.base64,
|
||||||
thinkingLevel, // Pass thinking level for extended thinking
|
},
|
||||||
});
|
},
|
||||||
|
];
|
||||||
logger.info(
|
|
||||||
`[${requestId}] SDK options model=${sdkOptions.model} maxTurns=${sdkOptions.maxTurns} allowedTools=${JSON.stringify(
|
|
||||||
sdkOptions.allowedTools
|
|
||||||
)}`
|
|
||||||
);
|
|
||||||
|
|
||||||
const promptGenerator = (async function* () {
|
|
||||||
yield {
|
|
||||||
type: 'user' as const,
|
|
||||||
session_id: '',
|
|
||||||
message: { role: 'user' as const, content: promptContent },
|
|
||||||
parent_tool_use_id: null,
|
|
||||||
};
|
|
||||||
})();
|
|
||||||
|
|
||||||
logger.info(`[${requestId}] Calling query()...`);
|
|
||||||
const queryStart = Date.now();
|
|
||||||
const stream = query({ prompt: promptGenerator, options: sdkOptions });
|
|
||||||
logger.info(`[${requestId}] query() returned stream in ${Date.now() - queryStart}ms`);
|
|
||||||
|
|
||||||
// Extract the description from the response
|
|
||||||
const extractStart = Date.now();
|
|
||||||
description = await extractTextFromStream(stream, requestId);
|
|
||||||
logger.info(`[${requestId}] extractMs=${Date.now() - extractStart}`);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.info(`[${requestId}] Calling simpleQuery...`);
|
||||||
|
const queryStart = Date.now();
|
||||||
|
|
||||||
|
// Use simpleQuery - provider abstraction handles routing
|
||||||
|
const result = await simpleQuery({
|
||||||
|
prompt,
|
||||||
|
model,
|
||||||
|
cwd,
|
||||||
|
maxTurns: 1,
|
||||||
|
allowedTools: isCursorModel(model) ? ['Read'] : [], // Allow Read for Cursor to read image if needed
|
||||||
|
thinkingLevel,
|
||||||
|
readOnly: true, // Image description only reads, doesn't write
|
||||||
|
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
|
||||||
|
});
|
||||||
|
|
||||||
|
logger.info(`[${requestId}] simpleQuery completed in ${Date.now() - queryStart}ms`);
|
||||||
|
|
||||||
|
const description = result.text;
|
||||||
|
|
||||||
if (!description || description.trim().length === 0) {
|
if (!description || description.trim().length === 0) {
|
||||||
logger.warn(`[${requestId}] Received empty response from Claude`);
|
logger.warn(`[${requestId}] Received empty response from AI`);
|
||||||
const response: DescribeImageErrorResponse = {
|
const response: DescribeImageErrorResponse = {
|
||||||
success: false,
|
success: false,
|
||||||
error: 'Failed to generate description - empty response',
|
error: 'Failed to generate description - empty response',
|
||||||
|
|||||||
@@ -1,22 +1,16 @@
|
|||||||
/**
|
/**
|
||||||
* POST /enhance-prompt endpoint - Enhance user input text
|
* POST /enhance-prompt endpoint - Enhance user input text
|
||||||
*
|
*
|
||||||
* Uses Claude AI or Cursor to enhance text based on the specified enhancement mode.
|
* Uses the provider abstraction to enhance text based on the specified
|
||||||
* Supports modes: improve, technical, simplify, acceptance
|
* enhancement mode. Works with any configured provider (Claude, Cursor, etc.).
|
||||||
|
* Supports modes: improve, technical, simplify, acceptance, ux-reviewer
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { resolveModelString } from '@automaker/model-resolver';
|
import { resolveModelString } from '@automaker/model-resolver';
|
||||||
import {
|
import { CLAUDE_MODEL_MAP, type ThinkingLevel } from '@automaker/types';
|
||||||
CLAUDE_MODEL_MAP,
|
import { simpleQuery } from '../../../providers/simple-query-service.js';
|
||||||
isCursorModel,
|
|
||||||
stripProviderPrefix,
|
|
||||||
ThinkingLevel,
|
|
||||||
getThinkingTokenBudget,
|
|
||||||
} from '@automaker/types';
|
|
||||||
import { ProviderFactory } from '../../../providers/provider-factory.js';
|
|
||||||
import type { SettingsService } from '../../../services/settings-service.js';
|
import type { SettingsService } from '../../../services/settings-service.js';
|
||||||
import { getPromptCustomization } from '../../../lib/settings-helpers.js';
|
import { getPromptCustomization } from '../../../lib/settings-helpers.js';
|
||||||
import {
|
import {
|
||||||
@@ -37,7 +31,7 @@ interface EnhanceRequestBody {
|
|||||||
enhancementMode: string;
|
enhancementMode: string;
|
||||||
/** Optional model override */
|
/** Optional model override */
|
||||||
model?: string;
|
model?: string;
|
||||||
/** Optional thinking level for Claude models (ignored for Cursor models) */
|
/** Optional thinking level for Claude models */
|
||||||
thinkingLevel?: ThinkingLevel;
|
thinkingLevel?: ThinkingLevel;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -57,76 +51,6 @@ interface EnhanceErrorResponse {
|
|||||||
error: string;
|
error: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract text content from Claude SDK response messages
|
|
||||||
*
|
|
||||||
* @param stream - The async iterable from the query function
|
|
||||||
* @returns The extracted text content
|
|
||||||
*/
|
|
||||||
async function extractTextFromStream(
|
|
||||||
stream: AsyncIterable<{
|
|
||||||
type: string;
|
|
||||||
subtype?: string;
|
|
||||||
result?: string;
|
|
||||||
message?: {
|
|
||||||
content?: Array<{ type: string; text?: string }>;
|
|
||||||
};
|
|
||||||
}>
|
|
||||||
): Promise<string> {
|
|
||||||
let responseText = '';
|
|
||||||
|
|
||||||
for await (const msg of stream) {
|
|
||||||
if (msg.type === 'assistant' && msg.message?.content) {
|
|
||||||
for (const block of msg.message.content) {
|
|
||||||
if (block.type === 'text' && block.text) {
|
|
||||||
responseText += block.text;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (msg.type === 'result' && msg.subtype === 'success') {
|
|
||||||
responseText = msg.result || responseText;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return responseText;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Execute enhancement using Cursor provider
|
|
||||||
*
|
|
||||||
* @param prompt - The enhancement prompt
|
|
||||||
* @param model - The Cursor model to use
|
|
||||||
* @returns The enhanced text
|
|
||||||
*/
|
|
||||||
async function executeWithCursor(prompt: string, model: string): Promise<string> {
|
|
||||||
const provider = ProviderFactory.getProviderForModel(model);
|
|
||||||
// Strip provider prefix - providers expect bare model IDs
|
|
||||||
const bareModel = stripProviderPrefix(model);
|
|
||||||
|
|
||||||
let responseText = '';
|
|
||||||
|
|
||||||
for await (const msg of provider.executeQuery({
|
|
||||||
prompt,
|
|
||||||
model: bareModel,
|
|
||||||
cwd: process.cwd(), // Enhancement doesn't need a specific working directory
|
|
||||||
readOnly: true, // Prompt enhancement only generates text, doesn't write files
|
|
||||||
})) {
|
|
||||||
if (msg.type === 'assistant' && msg.message?.content) {
|
|
||||||
for (const block of msg.message.content) {
|
|
||||||
if (block.type === 'text' && block.text) {
|
|
||||||
responseText += block.text;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
|
|
||||||
// Use result if it's a final accumulated message
|
|
||||||
if (msg.result.length > responseText.length) {
|
|
||||||
responseText = msg.result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return responseText;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create the enhance request handler
|
* Create the enhance request handler
|
||||||
*
|
*
|
||||||
@@ -188,13 +112,13 @@ export function createEnhanceHandler(
|
|||||||
technical: prompts.enhancement.technicalSystemPrompt,
|
technical: prompts.enhancement.technicalSystemPrompt,
|
||||||
simplify: prompts.enhancement.simplifySystemPrompt,
|
simplify: prompts.enhancement.simplifySystemPrompt,
|
||||||
acceptance: prompts.enhancement.acceptanceSystemPrompt,
|
acceptance: prompts.enhancement.acceptanceSystemPrompt,
|
||||||
|
'ux-reviewer': prompts.enhancement.uxReviewerSystemPrompt,
|
||||||
};
|
};
|
||||||
const systemPrompt = systemPromptMap[validMode];
|
const systemPrompt = systemPromptMap[validMode];
|
||||||
|
|
||||||
logger.debug(`Using ${validMode} system prompt (length: ${systemPrompt.length} chars)`);
|
logger.debug(`Using ${validMode} system prompt (length: ${systemPrompt.length} chars)`);
|
||||||
|
|
||||||
// Build the user prompt with few-shot examples
|
// Build the user prompt with few-shot examples
|
||||||
// This helps the model understand this is text transformation, not a coding task
|
|
||||||
const userPrompt = buildUserPrompt(validMode, trimmedText, true);
|
const userPrompt = buildUserPrompt(validMode, trimmedText, true);
|
||||||
|
|
||||||
// Resolve the model - use the passed model, default to sonnet for quality
|
// Resolve the model - use the passed model, default to sonnet for quality
|
||||||
@@ -202,40 +126,20 @@ export function createEnhanceHandler(
|
|||||||
|
|
||||||
logger.debug(`Using model: ${resolvedModel}`);
|
logger.debug(`Using model: ${resolvedModel}`);
|
||||||
|
|
||||||
let enhancedText: string;
|
// Use simpleQuery - provider abstraction handles routing to correct provider
|
||||||
|
// The system prompt is combined with user prompt since some providers
|
||||||
|
// don't have a separate system prompt concept
|
||||||
|
const result = await simpleQuery({
|
||||||
|
prompt: `${systemPrompt}\n\n${userPrompt}`,
|
||||||
|
model: resolvedModel,
|
||||||
|
cwd: process.cwd(), // Enhancement doesn't need a specific working directory
|
||||||
|
maxTurns: 1,
|
||||||
|
allowedTools: [],
|
||||||
|
thinkingLevel,
|
||||||
|
readOnly: true, // Prompt enhancement only generates text, doesn't write files
|
||||||
|
});
|
||||||
|
|
||||||
// Route to appropriate provider based on model
|
const enhancedText = result.text;
|
||||||
if (isCursorModel(resolvedModel)) {
|
|
||||||
// Use Cursor provider for Cursor models
|
|
||||||
logger.info(`Using Cursor provider for model: ${resolvedModel}`);
|
|
||||||
|
|
||||||
// Cursor doesn't have a separate system prompt concept, so combine them
|
|
||||||
const combinedPrompt = `${systemPrompt}\n\n${userPrompt}`;
|
|
||||||
enhancedText = await executeWithCursor(combinedPrompt, resolvedModel);
|
|
||||||
} else {
|
|
||||||
// Use Claude SDK for Claude models
|
|
||||||
logger.info(`Using Claude provider for model: ${resolvedModel}`);
|
|
||||||
|
|
||||||
// Convert thinkingLevel to maxThinkingTokens for SDK
|
|
||||||
const maxThinkingTokens = getThinkingTokenBudget(thinkingLevel);
|
|
||||||
const queryOptions: Parameters<typeof query>[0]['options'] = {
|
|
||||||
model: resolvedModel,
|
|
||||||
systemPrompt,
|
|
||||||
maxTurns: 1,
|
|
||||||
allowedTools: [],
|
|
||||||
permissionMode: 'acceptEdits',
|
|
||||||
};
|
|
||||||
if (maxThinkingTokens) {
|
|
||||||
queryOptions.maxThinkingTokens = maxThinkingTokens;
|
|
||||||
}
|
|
||||||
|
|
||||||
const stream = query({
|
|
||||||
prompt: userPrompt,
|
|
||||||
options: queryOptions,
|
|
||||||
});
|
|
||||||
|
|
||||||
enhancedText = await extractTextFromStream(stream);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!enhancedText || enhancedText.trim().length === 0) {
|
if (!enhancedText || enhancedText.trim().length === 0) {
|
||||||
logger.warn('Received empty response from AI');
|
logger.warn('Received empty response from AI');
|
||||||
|
|||||||
19
apps/server/src/routes/event-history/common.ts
Normal file
19
apps/server/src/routes/event-history/common.ts
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
/**
|
||||||
|
* Common utilities for event history routes
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import { getErrorMessage as getErrorMessageShared, createLogError } from '../common.js';
|
||||||
|
|
||||||
|
/** Logger instance for event history operations */
|
||||||
|
export const logger = createLogger('EventHistory');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract user-friendly error message from error objects
|
||||||
|
*/
|
||||||
|
export { getErrorMessageShared as getErrorMessage };
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Log error with automatic logger binding
|
||||||
|
*/
|
||||||
|
export const logError = createLogError(logger);
|
||||||
68
apps/server/src/routes/event-history/index.ts
Normal file
68
apps/server/src/routes/event-history/index.ts
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
/**
|
||||||
|
* Event History routes - HTTP API for event history management
|
||||||
|
*
|
||||||
|
* Provides endpoints for:
|
||||||
|
* - Listing events with filtering
|
||||||
|
* - Getting individual event details
|
||||||
|
* - Deleting events
|
||||||
|
* - Clearing all events
|
||||||
|
* - Replaying events to test hooks
|
||||||
|
*
|
||||||
|
* Mounted at /api/event-history in the main server.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Router } from 'express';
|
||||||
|
import type { EventHistoryService } from '../../services/event-history-service.js';
|
||||||
|
import type { SettingsService } from '../../services/settings-service.js';
|
||||||
|
import { validatePathParams } from '../../middleware/validate-paths.js';
|
||||||
|
import { createListHandler } from './routes/list.js';
|
||||||
|
import { createGetHandler } from './routes/get.js';
|
||||||
|
import { createDeleteHandler } from './routes/delete.js';
|
||||||
|
import { createClearHandler } from './routes/clear.js';
|
||||||
|
import { createReplayHandler } from './routes/replay.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create event history router with all endpoints
|
||||||
|
*
|
||||||
|
* Endpoints:
|
||||||
|
* - POST /list - List events with optional filtering
|
||||||
|
* - POST /get - Get a single event by ID
|
||||||
|
* - POST /delete - Delete an event by ID
|
||||||
|
* - POST /clear - Clear all events for a project
|
||||||
|
* - POST /replay - Replay an event to trigger hooks
|
||||||
|
*
|
||||||
|
* @param eventHistoryService - Instance of EventHistoryService
|
||||||
|
* @param settingsService - Instance of SettingsService (for replay)
|
||||||
|
* @returns Express Router configured with all event history endpoints
|
||||||
|
*/
|
||||||
|
export function createEventHistoryRoutes(
|
||||||
|
eventHistoryService: EventHistoryService,
|
||||||
|
settingsService: SettingsService
|
||||||
|
): Router {
|
||||||
|
const router = Router();
|
||||||
|
|
||||||
|
// List events with filtering
|
||||||
|
router.post('/list', validatePathParams('projectPath'), createListHandler(eventHistoryService));
|
||||||
|
|
||||||
|
// Get single event
|
||||||
|
router.post('/get', validatePathParams('projectPath'), createGetHandler(eventHistoryService));
|
||||||
|
|
||||||
|
// Delete event
|
||||||
|
router.post(
|
||||||
|
'/delete',
|
||||||
|
validatePathParams('projectPath'),
|
||||||
|
createDeleteHandler(eventHistoryService)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Clear all events
|
||||||
|
router.post('/clear', validatePathParams('projectPath'), createClearHandler(eventHistoryService));
|
||||||
|
|
||||||
|
// Replay event
|
||||||
|
router.post(
|
||||||
|
'/replay',
|
||||||
|
validatePathParams('projectPath'),
|
||||||
|
createReplayHandler(eventHistoryService, settingsService)
|
||||||
|
);
|
||||||
|
|
||||||
|
return router;
|
||||||
|
}
|
||||||
33
apps/server/src/routes/event-history/routes/clear.ts
Normal file
33
apps/server/src/routes/event-history/routes/clear.ts
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
/**
|
||||||
|
* POST /api/event-history/clear - Clear all events for a project
|
||||||
|
*
|
||||||
|
* Request body: { projectPath: string }
|
||||||
|
* Response: { success: true, cleared: number }
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import type { EventHistoryService } from '../../../services/event-history-service.js';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
export function createClearHandler(eventHistoryService: EventHistoryService) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath } = req.body as { projectPath: string };
|
||||||
|
|
||||||
|
if (!projectPath || typeof projectPath !== 'string') {
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const cleared = await eventHistoryService.clearEvents(projectPath);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
cleared,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Clear events failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
43
apps/server/src/routes/event-history/routes/delete.ts
Normal file
43
apps/server/src/routes/event-history/routes/delete.ts
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
/**
|
||||||
|
* POST /api/event-history/delete - Delete an event by ID
|
||||||
|
*
|
||||||
|
* Request body: { projectPath: string, eventId: string }
|
||||||
|
* Response: { success: true } or { success: false, error: string }
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import type { EventHistoryService } from '../../../services/event-history-service.js';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
export function createDeleteHandler(eventHistoryService: EventHistoryService) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath, eventId } = req.body as {
|
||||||
|
projectPath: string;
|
||||||
|
eventId: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!projectPath || typeof projectPath !== 'string') {
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!eventId || typeof eventId !== 'string') {
|
||||||
|
res.status(400).json({ success: false, error: 'eventId is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const deleted = await eventHistoryService.deleteEvent(projectPath, eventId);
|
||||||
|
|
||||||
|
if (!deleted) {
|
||||||
|
res.status(404).json({ success: false, error: 'Event not found' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
res.json({ success: true });
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Delete event failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
46
apps/server/src/routes/event-history/routes/get.ts
Normal file
46
apps/server/src/routes/event-history/routes/get.ts
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
/**
|
||||||
|
* POST /api/event-history/get - Get a single event by ID
|
||||||
|
*
|
||||||
|
* Request body: { projectPath: string, eventId: string }
|
||||||
|
* Response: { success: true, event: StoredEvent } or { success: false, error: string }
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import type { EventHistoryService } from '../../../services/event-history-service.js';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
export function createGetHandler(eventHistoryService: EventHistoryService) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath, eventId } = req.body as {
|
||||||
|
projectPath: string;
|
||||||
|
eventId: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!projectPath || typeof projectPath !== 'string') {
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!eventId || typeof eventId !== 'string') {
|
||||||
|
res.status(400).json({ success: false, error: 'eventId is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const event = await eventHistoryService.getEvent(projectPath, eventId);
|
||||||
|
|
||||||
|
if (!event) {
|
||||||
|
res.status(404).json({ success: false, error: 'Event not found' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
event,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Get event failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
53
apps/server/src/routes/event-history/routes/list.ts
Normal file
53
apps/server/src/routes/event-history/routes/list.ts
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
/**
|
||||||
|
* POST /api/event-history/list - List events for a project
|
||||||
|
*
|
||||||
|
* Request body: {
|
||||||
|
* projectPath: string,
|
||||||
|
* filter?: {
|
||||||
|
* trigger?: EventHookTrigger,
|
||||||
|
* featureId?: string,
|
||||||
|
* since?: string,
|
||||||
|
* until?: string,
|
||||||
|
* limit?: number,
|
||||||
|
* offset?: number
|
||||||
|
* }
|
||||||
|
* }
|
||||||
|
* Response: { success: true, events: StoredEventSummary[], total: number }
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import type { EventHistoryService } from '../../../services/event-history-service.js';
|
||||||
|
import type { EventHistoryFilter } from '@automaker/types';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
export function createListHandler(eventHistoryService: EventHistoryService) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath, filter } = req.body as {
|
||||||
|
projectPath: string;
|
||||||
|
filter?: EventHistoryFilter;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!projectPath || typeof projectPath !== 'string') {
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const events = await eventHistoryService.getEvents(projectPath, filter);
|
||||||
|
const total = await eventHistoryService.getEventCount(projectPath, {
|
||||||
|
...filter,
|
||||||
|
limit: undefined,
|
||||||
|
offset: undefined,
|
||||||
|
});
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
events,
|
||||||
|
total,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'List events failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
234
apps/server/src/routes/event-history/routes/replay.ts
Normal file
234
apps/server/src/routes/event-history/routes/replay.ts
Normal file
@@ -0,0 +1,234 @@
|
|||||||
|
/**
|
||||||
|
* POST /api/event-history/replay - Replay an event to trigger hooks
|
||||||
|
*
|
||||||
|
* Request body: {
|
||||||
|
* projectPath: string,
|
||||||
|
* eventId: string,
|
||||||
|
* hookIds?: string[] // Optional: specific hooks to run (if not provided, runs all enabled matching hooks)
|
||||||
|
* }
|
||||||
|
* Response: { success: true, result: EventReplayResult }
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import type { EventHistoryService } from '../../../services/event-history-service.js';
|
||||||
|
import type { SettingsService } from '../../../services/settings-service.js';
|
||||||
|
import type { EventReplayResult, EventReplayHookResult, EventHook } from '@automaker/types';
|
||||||
|
import { exec } from 'child_process';
|
||||||
|
import { promisify } from 'util';
|
||||||
|
import { getErrorMessage, logError, logger } from '../common.js';
|
||||||
|
|
||||||
|
const execAsync = promisify(exec);
|
||||||
|
|
||||||
|
/** Default timeout for shell commands (30 seconds) */
|
||||||
|
const DEFAULT_SHELL_TIMEOUT = 30000;
|
||||||
|
|
||||||
|
/** Default timeout for HTTP requests (10 seconds) */
|
||||||
|
const DEFAULT_HTTP_TIMEOUT = 10000;
|
||||||
|
|
||||||
|
interface HookContext {
|
||||||
|
featureId?: string;
|
||||||
|
featureName?: string;
|
||||||
|
projectPath?: string;
|
||||||
|
projectName?: string;
|
||||||
|
error?: string;
|
||||||
|
errorType?: string;
|
||||||
|
timestamp: string;
|
||||||
|
eventType: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Substitute {{variable}} placeholders in a string
|
||||||
|
*/
|
||||||
|
function substituteVariables(template: string, context: HookContext): string {
|
||||||
|
return template.replace(/\{\{(\w+)\}\}/g, (match, variable) => {
|
||||||
|
const value = context[variable as keyof HookContext];
|
||||||
|
if (value === undefined || value === null) {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
return String(value);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute a single hook and return the result
|
||||||
|
*/
|
||||||
|
async function executeHook(hook: EventHook, context: HookContext): Promise<EventReplayHookResult> {
|
||||||
|
const hookName = hook.name || hook.id;
|
||||||
|
const startTime = Date.now();
|
||||||
|
|
||||||
|
try {
|
||||||
|
if (hook.action.type === 'shell') {
|
||||||
|
const command = substituteVariables(hook.action.command, context);
|
||||||
|
const timeout = hook.action.timeout || DEFAULT_SHELL_TIMEOUT;
|
||||||
|
|
||||||
|
logger.info(`Replaying shell hook "${hookName}": ${command}`);
|
||||||
|
|
||||||
|
await execAsync(command, {
|
||||||
|
timeout,
|
||||||
|
maxBuffer: 1024 * 1024,
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
hookId: hook.id,
|
||||||
|
hookName: hook.name,
|
||||||
|
success: true,
|
||||||
|
durationMs: Date.now() - startTime,
|
||||||
|
};
|
||||||
|
} else if (hook.action.type === 'http') {
|
||||||
|
const url = substituteVariables(hook.action.url, context);
|
||||||
|
const method = hook.action.method || 'POST';
|
||||||
|
|
||||||
|
const headers: Record<string, string> = {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
};
|
||||||
|
if (hook.action.headers) {
|
||||||
|
for (const [key, value] of Object.entries(hook.action.headers)) {
|
||||||
|
headers[key] = substituteVariables(value, context);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let body: string | undefined;
|
||||||
|
if (hook.action.body) {
|
||||||
|
body = substituteVariables(hook.action.body, context);
|
||||||
|
} else if (method !== 'GET') {
|
||||||
|
body = JSON.stringify({
|
||||||
|
eventType: context.eventType,
|
||||||
|
timestamp: context.timestamp,
|
||||||
|
featureId: context.featureId,
|
||||||
|
projectPath: context.projectPath,
|
||||||
|
projectName: context.projectName,
|
||||||
|
error: context.error,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(`Replaying HTTP hook "${hookName}": ${method} ${url}`);
|
||||||
|
|
||||||
|
const controller = new AbortController();
|
||||||
|
const timeoutId = setTimeout(() => controller.abort(), DEFAULT_HTTP_TIMEOUT);
|
||||||
|
|
||||||
|
const response = await fetch(url, {
|
||||||
|
method,
|
||||||
|
headers,
|
||||||
|
body: method !== 'GET' ? body : undefined,
|
||||||
|
signal: controller.signal,
|
||||||
|
});
|
||||||
|
|
||||||
|
clearTimeout(timeoutId);
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
return {
|
||||||
|
hookId: hook.id,
|
||||||
|
hookName: hook.name,
|
||||||
|
success: false,
|
||||||
|
error: `HTTP ${response.status}: ${response.statusText}`,
|
||||||
|
durationMs: Date.now() - startTime,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
hookId: hook.id,
|
||||||
|
hookName: hook.name,
|
||||||
|
success: true,
|
||||||
|
durationMs: Date.now() - startTime,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
hookId: hook.id,
|
||||||
|
hookName: hook.name,
|
||||||
|
success: false,
|
||||||
|
error: 'Unknown hook action type',
|
||||||
|
durationMs: Date.now() - startTime,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
const errorMessage =
|
||||||
|
error instanceof Error
|
||||||
|
? error.name === 'AbortError'
|
||||||
|
? 'Request timed out'
|
||||||
|
: error.message
|
||||||
|
: String(error);
|
||||||
|
|
||||||
|
return {
|
||||||
|
hookId: hook.id,
|
||||||
|
hookName: hook.name,
|
||||||
|
success: false,
|
||||||
|
error: errorMessage,
|
||||||
|
durationMs: Date.now() - startTime,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createReplayHandler(
|
||||||
|
eventHistoryService: EventHistoryService,
|
||||||
|
settingsService: SettingsService
|
||||||
|
) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath, eventId, hookIds } = req.body as {
|
||||||
|
projectPath: string;
|
||||||
|
eventId: string;
|
||||||
|
hookIds?: string[];
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!projectPath || typeof projectPath !== 'string') {
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!eventId || typeof eventId !== 'string') {
|
||||||
|
res.status(400).json({ success: false, error: 'eventId is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the event
|
||||||
|
const event = await eventHistoryService.getEvent(projectPath, eventId);
|
||||||
|
if (!event) {
|
||||||
|
res.status(404).json({ success: false, error: 'Event not found' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get hooks from settings
|
||||||
|
const settings = await settingsService.getGlobalSettings();
|
||||||
|
let hooks = settings.eventHooks || [];
|
||||||
|
|
||||||
|
// Filter to matching trigger and enabled hooks
|
||||||
|
hooks = hooks.filter((h) => h.enabled && h.trigger === event.trigger);
|
||||||
|
|
||||||
|
// If specific hook IDs requested, filter to those
|
||||||
|
if (hookIds && hookIds.length > 0) {
|
||||||
|
hooks = hooks.filter((h) => hookIds.includes(h.id));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build context for variable substitution
|
||||||
|
const context: HookContext = {
|
||||||
|
featureId: event.featureId,
|
||||||
|
featureName: event.featureName,
|
||||||
|
projectPath: event.projectPath,
|
||||||
|
projectName: event.projectName,
|
||||||
|
error: event.error,
|
||||||
|
errorType: event.errorType,
|
||||||
|
timestamp: event.timestamp,
|
||||||
|
eventType: event.trigger,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Execute all hooks in parallel
|
||||||
|
const hookResults = await Promise.all(hooks.map((hook) => executeHook(hook, context)));
|
||||||
|
|
||||||
|
const result: EventReplayResult = {
|
||||||
|
eventId,
|
||||||
|
hooksTriggered: hooks.length,
|
||||||
|
hookResults,
|
||||||
|
};
|
||||||
|
|
||||||
|
logger.info(`Replayed event ${eventId}: ${hooks.length} hooks triggered`);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
result,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Replay event failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -4,32 +4,48 @@
|
|||||||
|
|
||||||
import { Router } from 'express';
|
import { Router } from 'express';
|
||||||
import { FeatureLoader } from '../../services/feature-loader.js';
|
import { FeatureLoader } from '../../services/feature-loader.js';
|
||||||
|
import type { SettingsService } from '../../services/settings-service.js';
|
||||||
|
import type { EventEmitter } from '../../lib/events.js';
|
||||||
import { validatePathParams } from '../../middleware/validate-paths.js';
|
import { validatePathParams } from '../../middleware/validate-paths.js';
|
||||||
import { createListHandler } from './routes/list.js';
|
import { createListHandler } from './routes/list.js';
|
||||||
import { createGetHandler } from './routes/get.js';
|
import { createGetHandler } from './routes/get.js';
|
||||||
import { createCreateHandler } from './routes/create.js';
|
import { createCreateHandler } from './routes/create.js';
|
||||||
import { createUpdateHandler } from './routes/update.js';
|
import { createUpdateHandler } from './routes/update.js';
|
||||||
import { createBulkUpdateHandler } from './routes/bulk-update.js';
|
import { createBulkUpdateHandler } from './routes/bulk-update.js';
|
||||||
|
import { createBulkDeleteHandler } from './routes/bulk-delete.js';
|
||||||
import { createDeleteHandler } from './routes/delete.js';
|
import { createDeleteHandler } from './routes/delete.js';
|
||||||
import { createAgentOutputHandler, createRawOutputHandler } from './routes/agent-output.js';
|
import { createAgentOutputHandler, createRawOutputHandler } from './routes/agent-output.js';
|
||||||
import { createGenerateTitleHandler } from './routes/generate-title.js';
|
import { createGenerateTitleHandler } from './routes/generate-title.js';
|
||||||
|
|
||||||
export function createFeaturesRoutes(featureLoader: FeatureLoader): Router {
|
export function createFeaturesRoutes(
|
||||||
|
featureLoader: FeatureLoader,
|
||||||
|
settingsService?: SettingsService,
|
||||||
|
events?: EventEmitter
|
||||||
|
): Router {
|
||||||
const router = Router();
|
const router = Router();
|
||||||
|
|
||||||
router.post('/list', validatePathParams('projectPath'), createListHandler(featureLoader));
|
router.post('/list', validatePathParams('projectPath'), createListHandler(featureLoader));
|
||||||
router.post('/get', validatePathParams('projectPath'), createGetHandler(featureLoader));
|
router.post('/get', validatePathParams('projectPath'), createGetHandler(featureLoader));
|
||||||
router.post('/create', validatePathParams('projectPath'), createCreateHandler(featureLoader));
|
router.post(
|
||||||
|
'/create',
|
||||||
|
validatePathParams('projectPath'),
|
||||||
|
createCreateHandler(featureLoader, events)
|
||||||
|
);
|
||||||
router.post('/update', validatePathParams('projectPath'), createUpdateHandler(featureLoader));
|
router.post('/update', validatePathParams('projectPath'), createUpdateHandler(featureLoader));
|
||||||
router.post(
|
router.post(
|
||||||
'/bulk-update',
|
'/bulk-update',
|
||||||
validatePathParams('projectPath'),
|
validatePathParams('projectPath'),
|
||||||
createBulkUpdateHandler(featureLoader)
|
createBulkUpdateHandler(featureLoader)
|
||||||
);
|
);
|
||||||
|
router.post(
|
||||||
|
'/bulk-delete',
|
||||||
|
validatePathParams('projectPath'),
|
||||||
|
createBulkDeleteHandler(featureLoader)
|
||||||
|
);
|
||||||
router.post('/delete', validatePathParams('projectPath'), createDeleteHandler(featureLoader));
|
router.post('/delete', validatePathParams('projectPath'), createDeleteHandler(featureLoader));
|
||||||
router.post('/agent-output', createAgentOutputHandler(featureLoader));
|
router.post('/agent-output', createAgentOutputHandler(featureLoader));
|
||||||
router.post('/raw-output', createRawOutputHandler(featureLoader));
|
router.post('/raw-output', createRawOutputHandler(featureLoader));
|
||||||
router.post('/generate-title', createGenerateTitleHandler());
|
router.post('/generate-title', createGenerateTitleHandler(settingsService));
|
||||||
|
|
||||||
return router;
|
return router;
|
||||||
}
|
}
|
||||||
|
|||||||
69
apps/server/src/routes/features/routes/bulk-delete.ts
Normal file
69
apps/server/src/routes/features/routes/bulk-delete.ts
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
/**
|
||||||
|
* POST /bulk-delete endpoint - Delete multiple features at once
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import { FeatureLoader } from '../../../services/feature-loader.js';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
interface BulkDeleteRequest {
|
||||||
|
projectPath: string;
|
||||||
|
featureIds: string[];
|
||||||
|
}
|
||||||
|
|
||||||
|
interface BulkDeleteResult {
|
||||||
|
featureId: string;
|
||||||
|
success: boolean;
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createBulkDeleteHandler(featureLoader: FeatureLoader) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath, featureIds } = req.body as BulkDeleteRequest;
|
||||||
|
|
||||||
|
if (!projectPath || !featureIds || !Array.isArray(featureIds) || featureIds.length === 0) {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'projectPath and featureIds (non-empty array) are required',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process in parallel batches of 20 for efficiency
|
||||||
|
const BATCH_SIZE = 20;
|
||||||
|
const results: BulkDeleteResult[] = [];
|
||||||
|
|
||||||
|
for (let i = 0; i < featureIds.length; i += BATCH_SIZE) {
|
||||||
|
const batch = featureIds.slice(i, i + BATCH_SIZE);
|
||||||
|
const batchResults = await Promise.all(
|
||||||
|
batch.map(async (featureId) => {
|
||||||
|
const success = await featureLoader.delete(projectPath, featureId);
|
||||||
|
if (success) {
|
||||||
|
return { featureId, success: true };
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
featureId,
|
||||||
|
success: false,
|
||||||
|
error: 'Deletion failed. Check server logs for details.',
|
||||||
|
};
|
||||||
|
})
|
||||||
|
);
|
||||||
|
results.push(...batchResults);
|
||||||
|
}
|
||||||
|
|
||||||
|
const successCount = results.reduce((count, r) => count + (r.success ? 1 : 0), 0);
|
||||||
|
const failureCount = results.length - successCount;
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: failureCount === 0,
|
||||||
|
deletedCount: successCount,
|
||||||
|
failedCount: failureCount,
|
||||||
|
results,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Bulk delete features failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -43,17 +43,36 @@ export function createBulkUpdateHandler(featureLoader: FeatureLoader) {
|
|||||||
const results: BulkUpdateResult[] = [];
|
const results: BulkUpdateResult[] = [];
|
||||||
const updatedFeatures: Feature[] = [];
|
const updatedFeatures: Feature[] = [];
|
||||||
|
|
||||||
for (const featureId of featureIds) {
|
// Process in parallel batches of 20 for efficiency
|
||||||
try {
|
const BATCH_SIZE = 20;
|
||||||
const updated = await featureLoader.update(projectPath, featureId, updates);
|
for (let i = 0; i < featureIds.length; i += BATCH_SIZE) {
|
||||||
results.push({ featureId, success: true });
|
const batch = featureIds.slice(i, i + BATCH_SIZE);
|
||||||
updatedFeatures.push(updated);
|
const batchResults = await Promise.all(
|
||||||
} catch (error) {
|
batch.map(async (featureId) => {
|
||||||
results.push({
|
try {
|
||||||
featureId,
|
const updated = await featureLoader.update(projectPath, featureId, updates);
|
||||||
success: false,
|
return { featureId, success: true as const, feature: updated };
|
||||||
error: getErrorMessage(error),
|
} catch (error) {
|
||||||
});
|
return {
|
||||||
|
featureId,
|
||||||
|
success: false as const,
|
||||||
|
error: getErrorMessage(error),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
for (const result of batchResults) {
|
||||||
|
if (result.success) {
|
||||||
|
results.push({ featureId: result.featureId, success: true });
|
||||||
|
updatedFeatures.push(result.feature);
|
||||||
|
} else {
|
||||||
|
results.push({
|
||||||
|
featureId: result.featureId,
|
||||||
|
success: false,
|
||||||
|
error: result.error,
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,10 +4,11 @@
|
|||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { FeatureLoader } from '../../../services/feature-loader.js';
|
import { FeatureLoader } from '../../../services/feature-loader.js';
|
||||||
|
import type { EventEmitter } from '../../../lib/events.js';
|
||||||
import type { Feature } from '@automaker/types';
|
import type { Feature } from '@automaker/types';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
export function createCreateHandler(featureLoader: FeatureLoader) {
|
export function createCreateHandler(featureLoader: FeatureLoader, events?: EventEmitter) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, feature } = req.body as {
|
const { projectPath, feature } = req.body as {
|
||||||
@@ -23,7 +24,30 @@ export function createCreateHandler(featureLoader: FeatureLoader) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check for duplicate title if title is provided
|
||||||
|
if (feature.title && feature.title.trim()) {
|
||||||
|
const duplicate = await featureLoader.findDuplicateTitle(projectPath, feature.title);
|
||||||
|
if (duplicate) {
|
||||||
|
res.status(409).json({
|
||||||
|
success: false,
|
||||||
|
error: `A feature with title "${feature.title}" already exists`,
|
||||||
|
duplicateFeatureId: duplicate.id,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const created = await featureLoader.create(projectPath, feature);
|
const created = await featureLoader.create(projectPath, feature);
|
||||||
|
|
||||||
|
// Emit feature_created event for hooks
|
||||||
|
if (events) {
|
||||||
|
events.emit('feature:created', {
|
||||||
|
featureId: created.id,
|
||||||
|
featureName: created.name,
|
||||||
|
projectPath,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
res.json({ success: true, feature: created });
|
res.json({ success: true, feature: created });
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logError(error, 'Create feature failed');
|
logError(error, 'Create feature failed');
|
||||||
|
|||||||
@@ -1,13 +1,16 @@
|
|||||||
/**
|
/**
|
||||||
* POST /features/generate-title endpoint - Generate a concise title from description
|
* POST /features/generate-title endpoint - Generate a concise title from description
|
||||||
*
|
*
|
||||||
* Uses Claude Haiku to generate a short, descriptive title from feature description.
|
* Uses the provider abstraction to generate a short, descriptive title
|
||||||
|
* from a feature description. Works with any configured provider (Claude, Cursor, etc.).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { CLAUDE_MODEL_MAP } from '@automaker/model-resolver';
|
import { CLAUDE_MODEL_MAP } from '@automaker/model-resolver';
|
||||||
|
import { simpleQuery } from '../../../providers/simple-query-service.js';
|
||||||
|
import type { SettingsService } from '../../../services/settings-service.js';
|
||||||
|
import { getPromptCustomization } from '../../../lib/settings-helpers.js';
|
||||||
|
|
||||||
const logger = createLogger('GenerateTitle');
|
const logger = createLogger('GenerateTitle');
|
||||||
|
|
||||||
@@ -25,43 +28,9 @@ interface GenerateTitleErrorResponse {
|
|||||||
error: string;
|
error: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
const SYSTEM_PROMPT = `You are a title generator. Your task is to create a concise, descriptive title (5-10 words max) for a software feature based on its description.
|
export function createGenerateTitleHandler(
|
||||||
|
settingsService?: SettingsService
|
||||||
Rules:
|
): (req: Request, res: Response) => Promise<void> {
|
||||||
- Output ONLY the title, nothing else
|
|
||||||
- Keep it short and action-oriented (e.g., "Add dark mode toggle", "Fix login validation")
|
|
||||||
- Start with a verb when possible (Add, Fix, Update, Implement, Create, etc.)
|
|
||||||
- No quotes, periods, or extra formatting
|
|
||||||
- Capture the essence of the feature in a scannable way`;
|
|
||||||
|
|
||||||
async function extractTextFromStream(
|
|
||||||
stream: AsyncIterable<{
|
|
||||||
type: string;
|
|
||||||
subtype?: string;
|
|
||||||
result?: string;
|
|
||||||
message?: {
|
|
||||||
content?: Array<{ type: string; text?: string }>;
|
|
||||||
};
|
|
||||||
}>
|
|
||||||
): Promise<string> {
|
|
||||||
let responseText = '';
|
|
||||||
|
|
||||||
for await (const msg of stream) {
|
|
||||||
if (msg.type === 'assistant' && msg.message?.content) {
|
|
||||||
for (const block of msg.message.content) {
|
|
||||||
if (block.type === 'text' && block.text) {
|
|
||||||
responseText += block.text;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (msg.type === 'result' && msg.subtype === 'success') {
|
|
||||||
responseText = msg.result || responseText;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return responseText;
|
|
||||||
}
|
|
||||||
|
|
||||||
export function createGenerateTitleHandler(): (req: Request, res: Response) => Promise<void> {
|
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { description } = req.body as GenerateTitleRequestBody;
|
const { description } = req.body as GenerateTitleRequestBody;
|
||||||
@@ -87,23 +56,25 @@ export function createGenerateTitleHandler(): (req: Request, res: Response) => P
|
|||||||
|
|
||||||
logger.info(`Generating title for description: ${trimmedDescription.substring(0, 50)}...`);
|
logger.info(`Generating title for description: ${trimmedDescription.substring(0, 50)}...`);
|
||||||
|
|
||||||
|
// Get customized prompts from settings
|
||||||
|
const prompts = await getPromptCustomization(settingsService, '[GenerateTitle]');
|
||||||
|
const systemPrompt = prompts.titleGeneration.systemPrompt;
|
||||||
|
|
||||||
const userPrompt = `Generate a concise title for this feature:\n\n${trimmedDescription}`;
|
const userPrompt = `Generate a concise title for this feature:\n\n${trimmedDescription}`;
|
||||||
|
|
||||||
const stream = query({
|
// Use simpleQuery - provider abstraction handles all the streaming/extraction
|
||||||
prompt: userPrompt,
|
const result = await simpleQuery({
|
||||||
options: {
|
prompt: `${systemPrompt}\n\n${userPrompt}`,
|
||||||
model: CLAUDE_MODEL_MAP.haiku,
|
model: CLAUDE_MODEL_MAP.haiku,
|
||||||
systemPrompt: SYSTEM_PROMPT,
|
cwd: process.cwd(),
|
||||||
maxTurns: 1,
|
maxTurns: 1,
|
||||||
allowedTools: [],
|
allowedTools: [],
|
||||||
permissionMode: 'default',
|
|
||||||
},
|
|
||||||
});
|
});
|
||||||
|
|
||||||
const title = await extractTextFromStream(stream);
|
const title = result.text;
|
||||||
|
|
||||||
if (!title || title.trim().length === 0) {
|
if (!title || title.trim().length === 0) {
|
||||||
logger.warn('Received empty response from Claude');
|
logger.warn('Received empty response from AI');
|
||||||
const response: GenerateTitleErrorResponse = {
|
const response: GenerateTitleErrorResponse = {
|
||||||
success: false,
|
success: false,
|
||||||
error: 'Failed to generate title - empty response',
|
error: 'Failed to generate title - empty response',
|
||||||
|
|||||||
@@ -4,20 +4,33 @@
|
|||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { FeatureLoader } from '../../../services/feature-loader.js';
|
import { FeatureLoader } from '../../../services/feature-loader.js';
|
||||||
import type { Feature } from '@automaker/types';
|
import type { Feature, FeatureStatus } from '@automaker/types';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
|
||||||
|
const logger = createLogger('features/update');
|
||||||
|
|
||||||
|
// Statuses that should trigger syncing to app_spec.txt
|
||||||
|
const SYNC_TRIGGER_STATUSES: FeatureStatus[] = ['verified', 'completed'];
|
||||||
|
|
||||||
export function createUpdateHandler(featureLoader: FeatureLoader) {
|
export function createUpdateHandler(featureLoader: FeatureLoader) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, featureId, updates, descriptionHistorySource, enhancementMode } =
|
const {
|
||||||
req.body as {
|
projectPath,
|
||||||
projectPath: string;
|
featureId,
|
||||||
featureId: string;
|
updates,
|
||||||
updates: Partial<Feature>;
|
descriptionHistorySource,
|
||||||
descriptionHistorySource?: 'enhance' | 'edit';
|
enhancementMode,
|
||||||
enhancementMode?: 'improve' | 'technical' | 'simplify' | 'acceptance';
|
preEnhancementDescription,
|
||||||
};
|
} = req.body as {
|
||||||
|
projectPath: string;
|
||||||
|
featureId: string;
|
||||||
|
updates: Partial<Feature>;
|
||||||
|
descriptionHistorySource?: 'enhance' | 'edit';
|
||||||
|
enhancementMode?: 'improve' | 'technical' | 'simplify' | 'acceptance' | 'ux-reviewer';
|
||||||
|
preEnhancementDescription?: string;
|
||||||
|
};
|
||||||
|
|
||||||
if (!projectPath || !featureId || !updates) {
|
if (!projectPath || !featureId || !updates) {
|
||||||
res.status(400).json({
|
res.status(400).json({
|
||||||
@@ -27,13 +40,52 @@ export function createUpdateHandler(featureLoader: FeatureLoader) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check for duplicate title if title is being updated
|
||||||
|
if (updates.title && updates.title.trim()) {
|
||||||
|
const duplicate = await featureLoader.findDuplicateTitle(
|
||||||
|
projectPath,
|
||||||
|
updates.title,
|
||||||
|
featureId // Exclude the current feature from duplicate check
|
||||||
|
);
|
||||||
|
if (duplicate) {
|
||||||
|
res.status(409).json({
|
||||||
|
success: false,
|
||||||
|
error: `A feature with title "${updates.title}" already exists`,
|
||||||
|
duplicateFeatureId: duplicate.id,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the current feature to detect status changes
|
||||||
|
const currentFeature = await featureLoader.get(projectPath, featureId);
|
||||||
|
const previousStatus = currentFeature?.status as FeatureStatus | undefined;
|
||||||
|
const newStatus = updates.status as FeatureStatus | undefined;
|
||||||
|
|
||||||
const updated = await featureLoader.update(
|
const updated = await featureLoader.update(
|
||||||
projectPath,
|
projectPath,
|
||||||
featureId,
|
featureId,
|
||||||
updates,
|
updates,
|
||||||
descriptionHistorySource,
|
descriptionHistorySource,
|
||||||
enhancementMode
|
enhancementMode,
|
||||||
|
preEnhancementDescription
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Trigger sync to app_spec.txt when status changes to verified or completed
|
||||||
|
if (newStatus && SYNC_TRIGGER_STATUSES.includes(newStatus) && previousStatus !== newStatus) {
|
||||||
|
try {
|
||||||
|
const synced = await featureLoader.syncFeatureToAppSpec(projectPath, updated);
|
||||||
|
if (synced) {
|
||||||
|
logger.info(
|
||||||
|
`Synced feature "${updated.title || updated.id}" to app_spec.txt on status change to ${newStatus}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} catch (syncError) {
|
||||||
|
// Log the sync error but don't fail the update operation
|
||||||
|
logger.error(`Failed to sync feature to app_spec.txt:`, syncError);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
res.json({ success: true, feature: updated });
|
res.json({ success: true, feature: updated });
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logError(error, 'Update feature failed');
|
logError(error, 'Update feature failed');
|
||||||
|
|||||||
@@ -5,6 +5,43 @@
|
|||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { execAsync, execEnv, getErrorMessage, logError } from './common.js';
|
import { execAsync, execEnv, getErrorMessage, logError } from './common.js';
|
||||||
|
|
||||||
|
const GIT_REMOTE_ORIGIN_COMMAND = 'git remote get-url origin';
|
||||||
|
const GH_REPO_VIEW_COMMAND = 'gh repo view --json name,owner';
|
||||||
|
const GITHUB_REPO_URL_PREFIX = 'https://github.com/';
|
||||||
|
const GITHUB_HTTPS_REMOTE_REGEX = /https:\/\/github\.com\/([^/]+)\/([^/.]+)/;
|
||||||
|
const GITHUB_SSH_REMOTE_REGEX = /git@github\.com:([^/]+)\/([^/.]+)/;
|
||||||
|
|
||||||
|
interface GhRepoViewResponse {
|
||||||
|
name?: string;
|
||||||
|
owner?: {
|
||||||
|
login?: string;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
async function resolveRepoFromGh(projectPath: string): Promise<{
|
||||||
|
owner: string;
|
||||||
|
repo: string;
|
||||||
|
} | null> {
|
||||||
|
try {
|
||||||
|
const { stdout } = await execAsync(GH_REPO_VIEW_COMMAND, {
|
||||||
|
cwd: projectPath,
|
||||||
|
env: execEnv,
|
||||||
|
});
|
||||||
|
|
||||||
|
const data = JSON.parse(stdout) as GhRepoViewResponse;
|
||||||
|
const owner = typeof data.owner?.login === 'string' ? data.owner.login : null;
|
||||||
|
const repo = typeof data.name === 'string' ? data.name : null;
|
||||||
|
|
||||||
|
if (!owner || !repo) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return { owner, repo };
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
export interface GitHubRemoteStatus {
|
export interface GitHubRemoteStatus {
|
||||||
hasGitHubRemote: boolean;
|
hasGitHubRemote: boolean;
|
||||||
remoteUrl: string | null;
|
remoteUrl: string | null;
|
||||||
@@ -21,19 +58,38 @@ export async function checkGitHubRemote(projectPath: string): Promise<GitHubRemo
|
|||||||
};
|
};
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Get the remote URL (origin by default)
|
let remoteUrl = '';
|
||||||
const { stdout } = await execAsync('git remote get-url origin', {
|
try {
|
||||||
cwd: projectPath,
|
// Get the remote URL (origin by default)
|
||||||
env: execEnv,
|
const { stdout } = await execAsync(GIT_REMOTE_ORIGIN_COMMAND, {
|
||||||
});
|
cwd: projectPath,
|
||||||
|
env: execEnv,
|
||||||
|
});
|
||||||
|
remoteUrl = stdout.trim();
|
||||||
|
status.remoteUrl = remoteUrl || null;
|
||||||
|
} catch {
|
||||||
|
// Ignore missing origin remote
|
||||||
|
}
|
||||||
|
|
||||||
const remoteUrl = stdout.trim();
|
const ghRepo = await resolveRepoFromGh(projectPath);
|
||||||
status.remoteUrl = remoteUrl;
|
if (ghRepo) {
|
||||||
|
status.hasGitHubRemote = true;
|
||||||
|
status.owner = ghRepo.owner;
|
||||||
|
status.repo = ghRepo.repo;
|
||||||
|
if (!status.remoteUrl) {
|
||||||
|
status.remoteUrl = `${GITHUB_REPO_URL_PREFIX}${ghRepo.owner}/${ghRepo.repo}`;
|
||||||
|
}
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
// Check if it's a GitHub URL
|
// Check if it's a GitHub URL
|
||||||
// Formats: https://github.com/owner/repo.git, git@github.com:owner/repo.git
|
// Formats: https://github.com/owner/repo.git, git@github.com:owner/repo.git
|
||||||
const httpsMatch = remoteUrl.match(/https:\/\/github\.com\/([^/]+)\/([^/.]+)/);
|
if (!remoteUrl) {
|
||||||
const sshMatch = remoteUrl.match(/git@github\.com:([^/]+)\/([^/.]+)/);
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
const httpsMatch = remoteUrl.match(GITHUB_HTTPS_REMOTE_REGEX);
|
||||||
|
const sshMatch = remoteUrl.match(GITHUB_SSH_REMOTE_REGEX);
|
||||||
|
|
||||||
const match = httpsMatch || sshMatch;
|
const match = httpsMatch || sshMatch;
|
||||||
if (match) {
|
if (match) {
|
||||||
|
|||||||
@@ -25,19 +25,24 @@ interface GraphQLComment {
|
|||||||
updatedAt: string;
|
updatedAt: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
interface GraphQLCommentConnection {
|
||||||
|
totalCount: number;
|
||||||
|
pageInfo: {
|
||||||
|
hasNextPage: boolean;
|
||||||
|
endCursor: string | null;
|
||||||
|
};
|
||||||
|
nodes: GraphQLComment[];
|
||||||
|
}
|
||||||
|
|
||||||
|
interface GraphQLIssueOrPullRequest {
|
||||||
|
__typename: 'Issue' | 'PullRequest';
|
||||||
|
comments: GraphQLCommentConnection;
|
||||||
|
}
|
||||||
|
|
||||||
interface GraphQLResponse {
|
interface GraphQLResponse {
|
||||||
data?: {
|
data?: {
|
||||||
repository?: {
|
repository?: {
|
||||||
issue?: {
|
issueOrPullRequest?: GraphQLIssueOrPullRequest | null;
|
||||||
comments: {
|
|
||||||
totalCount: number;
|
|
||||||
pageInfo: {
|
|
||||||
hasNextPage: boolean;
|
|
||||||
endCursor: string | null;
|
|
||||||
};
|
|
||||||
nodes: GraphQLComment[];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
errors?: Array<{ message: string }>;
|
errors?: Array<{ message: string }>;
|
||||||
@@ -45,6 +50,7 @@ interface GraphQLResponse {
|
|||||||
|
|
||||||
/** Timeout for GitHub API requests in milliseconds */
|
/** Timeout for GitHub API requests in milliseconds */
|
||||||
const GITHUB_API_TIMEOUT_MS = 30000;
|
const GITHUB_API_TIMEOUT_MS = 30000;
|
||||||
|
const COMMENTS_PAGE_SIZE = 50;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Validate cursor format (GraphQL cursors are typically base64 strings)
|
* Validate cursor format (GraphQL cursors are typically base64 strings)
|
||||||
@@ -54,7 +60,7 @@ function isValidCursor(cursor: string): boolean {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Fetch comments for a specific issue using GitHub GraphQL API
|
* Fetch comments for a specific issue or pull request using GitHub GraphQL API
|
||||||
*/
|
*/
|
||||||
async function fetchIssueComments(
|
async function fetchIssueComments(
|
||||||
projectPath: string,
|
projectPath: string,
|
||||||
@@ -70,24 +76,52 @@ async function fetchIssueComments(
|
|||||||
|
|
||||||
// Use GraphQL variables instead of string interpolation for safety
|
// Use GraphQL variables instead of string interpolation for safety
|
||||||
const query = `
|
const query = `
|
||||||
query GetIssueComments($owner: String!, $repo: String!, $issueNumber: Int!, $cursor: String) {
|
query GetIssueComments(
|
||||||
|
$owner: String!
|
||||||
|
$repo: String!
|
||||||
|
$issueNumber: Int!
|
||||||
|
$cursor: String
|
||||||
|
$pageSize: Int!
|
||||||
|
) {
|
||||||
repository(owner: $owner, name: $repo) {
|
repository(owner: $owner, name: $repo) {
|
||||||
issue(number: $issueNumber) {
|
issueOrPullRequest(number: $issueNumber) {
|
||||||
comments(first: 50, after: $cursor) {
|
__typename
|
||||||
totalCount
|
... on Issue {
|
||||||
pageInfo {
|
comments(first: $pageSize, after: $cursor) {
|
||||||
hasNextPage
|
totalCount
|
||||||
endCursor
|
pageInfo {
|
||||||
}
|
hasNextPage
|
||||||
nodes {
|
endCursor
|
||||||
id
|
}
|
||||||
author {
|
nodes {
|
||||||
login
|
id
|
||||||
avatarUrl
|
author {
|
||||||
|
login
|
||||||
|
avatarUrl
|
||||||
|
}
|
||||||
|
body
|
||||||
|
createdAt
|
||||||
|
updatedAt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
... on PullRequest {
|
||||||
|
comments(first: $pageSize, after: $cursor) {
|
||||||
|
totalCount
|
||||||
|
pageInfo {
|
||||||
|
hasNextPage
|
||||||
|
endCursor
|
||||||
|
}
|
||||||
|
nodes {
|
||||||
|
id
|
||||||
|
author {
|
||||||
|
login
|
||||||
|
avatarUrl
|
||||||
|
}
|
||||||
|
body
|
||||||
|
createdAt
|
||||||
|
updatedAt
|
||||||
}
|
}
|
||||||
body
|
|
||||||
createdAt
|
|
||||||
updatedAt
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -99,6 +133,7 @@ async function fetchIssueComments(
|
|||||||
repo,
|
repo,
|
||||||
issueNumber,
|
issueNumber,
|
||||||
cursor: cursor || null,
|
cursor: cursor || null,
|
||||||
|
pageSize: COMMENTS_PAGE_SIZE,
|
||||||
};
|
};
|
||||||
|
|
||||||
const requestBody = JSON.stringify({ query, variables });
|
const requestBody = JSON.stringify({ query, variables });
|
||||||
@@ -140,10 +175,10 @@ async function fetchIssueComments(
|
|||||||
throw new Error(response.errors[0].message);
|
throw new Error(response.errors[0].message);
|
||||||
}
|
}
|
||||||
|
|
||||||
const commentsData = response.data?.repository?.issue?.comments;
|
const commentsData = response.data?.repository?.issueOrPullRequest?.comments;
|
||||||
|
|
||||||
if (!commentsData) {
|
if (!commentsData) {
|
||||||
throw new Error('Issue not found or no comments data available');
|
throw new Error('Issue or pull request not found or no comments data available');
|
||||||
}
|
}
|
||||||
|
|
||||||
const comments: GitHubComment[] = commentsData.nodes.map((node) => ({
|
const comments: GitHubComment[] = commentsData.nodes.map((node) => ({
|
||||||
|
|||||||
@@ -9,6 +9,17 @@ import { checkGitHubRemote } from './check-github-remote.js';
|
|||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
|
|
||||||
const logger = createLogger('ListIssues');
|
const logger = createLogger('ListIssues');
|
||||||
|
const OPEN_ISSUES_LIMIT = 100;
|
||||||
|
const CLOSED_ISSUES_LIMIT = 50;
|
||||||
|
const ISSUE_LIST_FIELDS = 'number,title,state,author,createdAt,labels,url,body,assignees';
|
||||||
|
const ISSUE_STATE_OPEN = 'open';
|
||||||
|
const ISSUE_STATE_CLOSED = 'closed';
|
||||||
|
const GH_ISSUE_LIST_COMMAND = 'gh issue list';
|
||||||
|
const GH_STATE_FLAG = '--state';
|
||||||
|
const GH_JSON_FLAG = '--json';
|
||||||
|
const GH_LIMIT_FLAG = '--limit';
|
||||||
|
const LINKED_PRS_BATCH_SIZE = 20;
|
||||||
|
const LINKED_PRS_TIMELINE_ITEMS = 10;
|
||||||
|
|
||||||
export interface GitHubLabel {
|
export interface GitHubLabel {
|
||||||
name: string;
|
name: string;
|
||||||
@@ -69,34 +80,68 @@ async function fetchLinkedPRs(
|
|||||||
|
|
||||||
// Build GraphQL query for batch fetching linked PRs
|
// Build GraphQL query for batch fetching linked PRs
|
||||||
// We fetch up to 20 issues at a time to avoid query limits
|
// We fetch up to 20 issues at a time to avoid query limits
|
||||||
const batchSize = 20;
|
for (let i = 0; i < issueNumbers.length; i += LINKED_PRS_BATCH_SIZE) {
|
||||||
for (let i = 0; i < issueNumbers.length; i += batchSize) {
|
const batch = issueNumbers.slice(i, i + LINKED_PRS_BATCH_SIZE);
|
||||||
const batch = issueNumbers.slice(i, i + batchSize);
|
|
||||||
|
|
||||||
const issueQueries = batch
|
const issueQueries = batch
|
||||||
.map(
|
.map(
|
||||||
(num, idx) => `
|
(num, idx) => `
|
||||||
issue${idx}: issue(number: ${num}) {
|
issue${idx}: issueOrPullRequest(number: ${num}) {
|
||||||
number
|
... on Issue {
|
||||||
timelineItems(first: 10, itemTypes: [CROSS_REFERENCED_EVENT, CONNECTED_EVENT]) {
|
number
|
||||||
nodes {
|
timelineItems(
|
||||||
... on CrossReferencedEvent {
|
first: ${LINKED_PRS_TIMELINE_ITEMS}
|
||||||
source {
|
itemTypes: [CROSS_REFERENCED_EVENT, CONNECTED_EVENT]
|
||||||
... on PullRequest {
|
) {
|
||||||
number
|
nodes {
|
||||||
title
|
... on CrossReferencedEvent {
|
||||||
state
|
source {
|
||||||
url
|
... on PullRequest {
|
||||||
|
number
|
||||||
|
title
|
||||||
|
state
|
||||||
|
url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
... on ConnectedEvent {
|
||||||
|
subject {
|
||||||
|
... on PullRequest {
|
||||||
|
number
|
||||||
|
title
|
||||||
|
state
|
||||||
|
url
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
... on ConnectedEvent {
|
}
|
||||||
subject {
|
}
|
||||||
... on PullRequest {
|
... on PullRequest {
|
||||||
number
|
number
|
||||||
title
|
timelineItems(
|
||||||
state
|
first: ${LINKED_PRS_TIMELINE_ITEMS}
|
||||||
url
|
itemTypes: [CROSS_REFERENCED_EVENT, CONNECTED_EVENT]
|
||||||
|
) {
|
||||||
|
nodes {
|
||||||
|
... on CrossReferencedEvent {
|
||||||
|
source {
|
||||||
|
... on PullRequest {
|
||||||
|
number
|
||||||
|
title
|
||||||
|
state
|
||||||
|
url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
... on ConnectedEvent {
|
||||||
|
subject {
|
||||||
|
... on PullRequest {
|
||||||
|
number
|
||||||
|
title
|
||||||
|
state
|
||||||
|
url
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -213,16 +258,35 @@ export function createListIssuesHandler() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Fetch open and closed issues in parallel (now including assignees)
|
// Fetch open and closed issues in parallel (now including assignees)
|
||||||
|
const repoQualifier =
|
||||||
|
remoteStatus.owner && remoteStatus.repo ? `${remoteStatus.owner}/${remoteStatus.repo}` : '';
|
||||||
|
const repoFlag = repoQualifier ? `-R ${repoQualifier}` : '';
|
||||||
const [openResult, closedResult] = await Promise.all([
|
const [openResult, closedResult] = await Promise.all([
|
||||||
execAsync(
|
execAsync(
|
||||||
'gh issue list --state open --json number,title,state,author,createdAt,labels,url,body,assignees --limit 100',
|
[
|
||||||
|
GH_ISSUE_LIST_COMMAND,
|
||||||
|
repoFlag,
|
||||||
|
`${GH_STATE_FLAG} ${ISSUE_STATE_OPEN}`,
|
||||||
|
`${GH_JSON_FLAG} ${ISSUE_LIST_FIELDS}`,
|
||||||
|
`${GH_LIMIT_FLAG} ${OPEN_ISSUES_LIMIT}`,
|
||||||
|
]
|
||||||
|
.filter(Boolean)
|
||||||
|
.join(' '),
|
||||||
{
|
{
|
||||||
cwd: projectPath,
|
cwd: projectPath,
|
||||||
env: execEnv,
|
env: execEnv,
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
execAsync(
|
execAsync(
|
||||||
'gh issue list --state closed --json number,title,state,author,createdAt,labels,url,body,assignees --limit 50',
|
[
|
||||||
|
GH_ISSUE_LIST_COMMAND,
|
||||||
|
repoFlag,
|
||||||
|
`${GH_STATE_FLAG} ${ISSUE_STATE_CLOSED}`,
|
||||||
|
`${GH_JSON_FLAG} ${ISSUE_LIST_FIELDS}`,
|
||||||
|
`${GH_LIMIT_FLAG} ${CLOSED_ISSUES_LIMIT}`,
|
||||||
|
]
|
||||||
|
.filter(Boolean)
|
||||||
|
.join(' '),
|
||||||
{
|
{
|
||||||
cwd: projectPath,
|
cwd: projectPath,
|
||||||
env: execEnv,
|
env: execEnv,
|
||||||
|
|||||||
@@ -6,6 +6,17 @@ import type { Request, Response } from 'express';
|
|||||||
import { execAsync, execEnv, getErrorMessage, logError } from './common.js';
|
import { execAsync, execEnv, getErrorMessage, logError } from './common.js';
|
||||||
import { checkGitHubRemote } from './check-github-remote.js';
|
import { checkGitHubRemote } from './check-github-remote.js';
|
||||||
|
|
||||||
|
const OPEN_PRS_LIMIT = 100;
|
||||||
|
const MERGED_PRS_LIMIT = 50;
|
||||||
|
const PR_LIST_FIELDS =
|
||||||
|
'number,title,state,author,createdAt,labels,url,isDraft,headRefName,reviewDecision,mergeable,body';
|
||||||
|
const PR_STATE_OPEN = 'open';
|
||||||
|
const PR_STATE_MERGED = 'merged';
|
||||||
|
const GH_PR_LIST_COMMAND = 'gh pr list';
|
||||||
|
const GH_STATE_FLAG = '--state';
|
||||||
|
const GH_JSON_FLAG = '--json';
|
||||||
|
const GH_LIMIT_FLAG = '--limit';
|
||||||
|
|
||||||
export interface GitHubLabel {
|
export interface GitHubLabel {
|
||||||
name: string;
|
name: string;
|
||||||
color: string;
|
color: string;
|
||||||
@@ -57,16 +68,36 @@ export function createListPRsHandler() {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const repoQualifier =
|
||||||
|
remoteStatus.owner && remoteStatus.repo ? `${remoteStatus.owner}/${remoteStatus.repo}` : '';
|
||||||
|
const repoFlag = repoQualifier ? `-R ${repoQualifier}` : '';
|
||||||
|
|
||||||
const [openResult, mergedResult] = await Promise.all([
|
const [openResult, mergedResult] = await Promise.all([
|
||||||
execAsync(
|
execAsync(
|
||||||
'gh pr list --state open --json number,title,state,author,createdAt,labels,url,isDraft,headRefName,reviewDecision,mergeable,body --limit 100',
|
[
|
||||||
|
GH_PR_LIST_COMMAND,
|
||||||
|
repoFlag,
|
||||||
|
`${GH_STATE_FLAG} ${PR_STATE_OPEN}`,
|
||||||
|
`${GH_JSON_FLAG} ${PR_LIST_FIELDS}`,
|
||||||
|
`${GH_LIMIT_FLAG} ${OPEN_PRS_LIMIT}`,
|
||||||
|
]
|
||||||
|
.filter(Boolean)
|
||||||
|
.join(' '),
|
||||||
{
|
{
|
||||||
cwd: projectPath,
|
cwd: projectPath,
|
||||||
env: execEnv,
|
env: execEnv,
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
execAsync(
|
execAsync(
|
||||||
'gh pr list --state merged --json number,title,state,author,createdAt,labels,url,isDraft,headRefName,reviewDecision,mergeable,body --limit 50',
|
[
|
||||||
|
GH_PR_LIST_COMMAND,
|
||||||
|
repoFlag,
|
||||||
|
`${GH_STATE_FLAG} ${PR_STATE_MERGED}`,
|
||||||
|
`${GH_JSON_FLAG} ${PR_LIST_FIELDS}`,
|
||||||
|
`${GH_LIMIT_FLAG} ${MERGED_PRS_LIMIT}`,
|
||||||
|
]
|
||||||
|
.filter(Boolean)
|
||||||
|
.join(' '),
|
||||||
{
|
{
|
||||||
cwd: projectPath,
|
cwd: projectPath,
|
||||||
env: execEnv,
|
env: execEnv,
|
||||||
|
|||||||
@@ -1,36 +1,40 @@
|
|||||||
/**
|
/**
|
||||||
* POST /validate-issue endpoint - Validate a GitHub issue using Claude SDK or Cursor (async)
|
* POST /validate-issue endpoint - Validate a GitHub issue using provider abstraction (async)
|
||||||
*
|
*
|
||||||
* Scans the codebase to determine if an issue is valid, invalid, or needs clarification.
|
* Scans the codebase to determine if an issue is valid, invalid, or needs clarification.
|
||||||
* Runs asynchronously and emits events for progress and completion.
|
* Runs asynchronously and emits events for progress and completion.
|
||||||
* Supports both Claude models and Cursor models.
|
* Supports Claude, Codex, Cursor, and OpenCode models.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
|
||||||
import type { EventEmitter } from '../../../lib/events.js';
|
import type { EventEmitter } from '../../../lib/events.js';
|
||||||
import type {
|
import type {
|
||||||
IssueValidationResult,
|
IssueValidationResult,
|
||||||
IssueValidationEvent,
|
IssueValidationEvent,
|
||||||
ModelAlias,
|
ModelId,
|
||||||
CursorModelId,
|
|
||||||
GitHubComment,
|
GitHubComment,
|
||||||
LinkedPRInfo,
|
LinkedPRInfo,
|
||||||
ThinkingLevel,
|
ThinkingLevel,
|
||||||
|
ReasoningEffort,
|
||||||
|
} from '@automaker/types';
|
||||||
|
import {
|
||||||
|
DEFAULT_PHASE_MODELS,
|
||||||
|
isClaudeModel,
|
||||||
|
isCodexModel,
|
||||||
|
isCursorModel,
|
||||||
|
isOpencodeModel,
|
||||||
} from '@automaker/types';
|
} from '@automaker/types';
|
||||||
import { isCursorModel, DEFAULT_PHASE_MODELS, stripProviderPrefix } from '@automaker/types';
|
|
||||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||||
import { createSuggestionsOptions } from '../../../lib/sdk-options.js';
|
|
||||||
import { extractJson } from '../../../lib/json-extractor.js';
|
import { extractJson } from '../../../lib/json-extractor.js';
|
||||||
import { writeValidation } from '../../../lib/validation-storage.js';
|
import { writeValidation } from '../../../lib/validation-storage.js';
|
||||||
import { ProviderFactory } from '../../../providers/provider-factory.js';
|
import { streamingQuery } from '../../../providers/simple-query-service.js';
|
||||||
import {
|
import {
|
||||||
issueValidationSchema,
|
issueValidationSchema,
|
||||||
ISSUE_VALIDATION_SYSTEM_PROMPT,
|
|
||||||
buildValidationPrompt,
|
buildValidationPrompt,
|
||||||
ValidationComment,
|
ValidationComment,
|
||||||
ValidationLinkedPR,
|
ValidationLinkedPR,
|
||||||
} from './validation-schema.js';
|
} from './validation-schema.js';
|
||||||
|
import { getPromptCustomization } from '../../../lib/settings-helpers.js';
|
||||||
import {
|
import {
|
||||||
trySetValidationRunning,
|
trySetValidationRunning,
|
||||||
clearValidationStatus,
|
clearValidationStatus,
|
||||||
@@ -41,9 +45,6 @@ import {
|
|||||||
import type { SettingsService } from '../../../services/settings-service.js';
|
import type { SettingsService } from '../../../services/settings-service.js';
|
||||||
import { getAutoLoadClaudeMdSetting } from '../../../lib/settings-helpers.js';
|
import { getAutoLoadClaudeMdSetting } from '../../../lib/settings-helpers.js';
|
||||||
|
|
||||||
/** Valid Claude model values for validation */
|
|
||||||
const VALID_CLAUDE_MODELS: readonly ModelAlias[] = ['opus', 'sonnet', 'haiku'] as const;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Request body for issue validation
|
* Request body for issue validation
|
||||||
*/
|
*/
|
||||||
@@ -53,10 +54,12 @@ interface ValidateIssueRequestBody {
|
|||||||
issueTitle: string;
|
issueTitle: string;
|
||||||
issueBody: string;
|
issueBody: string;
|
||||||
issueLabels?: string[];
|
issueLabels?: string[];
|
||||||
/** Model to use for validation (opus, sonnet, haiku, or cursor model IDs) */
|
/** Model to use for validation (Claude alias or provider model ID) */
|
||||||
model?: ModelAlias | CursorModelId;
|
model?: ModelId;
|
||||||
/** Thinking level for Claude models (ignored for Cursor models) */
|
/** Thinking level for Claude models (ignored for non-Claude models) */
|
||||||
thinkingLevel?: ThinkingLevel;
|
thinkingLevel?: ThinkingLevel;
|
||||||
|
/** Reasoning effort for Codex models (ignored for non-Codex models) */
|
||||||
|
reasoningEffort?: ReasoningEffort;
|
||||||
/** Comments to include in validation analysis */
|
/** Comments to include in validation analysis */
|
||||||
comments?: GitHubComment[];
|
comments?: GitHubComment[];
|
||||||
/** Linked pull requests for this issue */
|
/** Linked pull requests for this issue */
|
||||||
@@ -68,7 +71,7 @@ interface ValidateIssueRequestBody {
|
|||||||
*
|
*
|
||||||
* Emits events for start, progress, complete, and error.
|
* Emits events for start, progress, complete, and error.
|
||||||
* Stores result on completion.
|
* Stores result on completion.
|
||||||
* Supports both Claude models (with structured output) and Cursor models (with JSON parsing).
|
* Supports Claude/Codex models (structured output) and Cursor/OpenCode models (JSON parsing).
|
||||||
*/
|
*/
|
||||||
async function runValidation(
|
async function runValidation(
|
||||||
projectPath: string,
|
projectPath: string,
|
||||||
@@ -76,13 +79,14 @@ async function runValidation(
|
|||||||
issueTitle: string,
|
issueTitle: string,
|
||||||
issueBody: string,
|
issueBody: string,
|
||||||
issueLabels: string[] | undefined,
|
issueLabels: string[] | undefined,
|
||||||
model: ModelAlias | CursorModelId,
|
model: ModelId,
|
||||||
events: EventEmitter,
|
events: EventEmitter,
|
||||||
abortController: AbortController,
|
abortController: AbortController,
|
||||||
settingsService?: SettingsService,
|
settingsService?: SettingsService,
|
||||||
comments?: ValidationComment[],
|
comments?: ValidationComment[],
|
||||||
linkedPRs?: ValidationLinkedPR[],
|
linkedPRs?: ValidationLinkedPR[],
|
||||||
thinkingLevel?: ThinkingLevel
|
thinkingLevel?: ThinkingLevel,
|
||||||
|
reasoningEffort?: ReasoningEffort
|
||||||
): Promise<void> {
|
): Promise<void> {
|
||||||
// Emit start event
|
// Emit start event
|
||||||
const startEvent: IssueValidationEvent = {
|
const startEvent: IssueValidationEvent = {
|
||||||
@@ -102,7 +106,7 @@ async function runValidation(
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
// Build the prompt (include comments and linked PRs if provided)
|
// Build the prompt (include comments and linked PRs if provided)
|
||||||
const prompt = buildValidationPrompt(
|
const basePrompt = buildValidationPrompt(
|
||||||
issueNumber,
|
issueNumber,
|
||||||
issueTitle,
|
issueTitle,
|
||||||
issueBody,
|
issueBody,
|
||||||
@@ -111,20 +115,19 @@ async function runValidation(
|
|||||||
linkedPRs
|
linkedPRs
|
||||||
);
|
);
|
||||||
|
|
||||||
let validationResult: IssueValidationResult | null = null;
|
|
||||||
let responseText = '';
|
let responseText = '';
|
||||||
|
|
||||||
// Route to appropriate provider based on model
|
// Get customized prompts from settings
|
||||||
if (isCursorModel(model)) {
|
const prompts = await getPromptCustomization(settingsService, '[ValidateIssue]');
|
||||||
// Use Cursor provider for Cursor models
|
const issueValidationSystemPrompt = prompts.issueValidation.systemPrompt;
|
||||||
logger.info(`Using Cursor provider for validation with model: ${model}`);
|
|
||||||
|
|
||||||
const provider = ProviderFactory.getProviderForModel(model);
|
// Determine if we should use structured output (Claude/Codex support it, Cursor/OpenCode don't)
|
||||||
// Strip provider prefix - providers expect bare model IDs
|
const useStructuredOutput = isClaudeModel(model) || isCodexModel(model);
|
||||||
const bareModel = stripProviderPrefix(model);
|
|
||||||
|
|
||||||
// For Cursor, include the system prompt and schema in the user prompt
|
// Build the final prompt - for Cursor, include system prompt and JSON schema instructions
|
||||||
const cursorPrompt = `${ISSUE_VALIDATION_SYSTEM_PROMPT}
|
let finalPrompt = basePrompt;
|
||||||
|
if (!useStructuredOutput) {
|
||||||
|
finalPrompt = `${issueValidationSystemPrompt}
|
||||||
|
|
||||||
CRITICAL INSTRUCTIONS:
|
CRITICAL INSTRUCTIONS:
|
||||||
1. DO NOT write any files. Return the JSON in your response only.
|
1. DO NOT write any files. Return the JSON in your response only.
|
||||||
@@ -135,121 +138,78 @@ ${JSON.stringify(issueValidationSchema, null, 2)}
|
|||||||
|
|
||||||
Your entire response should be valid JSON starting with { and ending with }. No text before or after.
|
Your entire response should be valid JSON starting with { and ending with }. No text before or after.
|
||||||
|
|
||||||
${prompt}`;
|
${basePrompt}`;
|
||||||
|
}
|
||||||
|
|
||||||
for await (const msg of provider.executeQuery({
|
// Load autoLoadClaudeMd setting
|
||||||
prompt: cursorPrompt,
|
const autoLoadClaudeMd = await getAutoLoadClaudeMdSetting(
|
||||||
model: bareModel,
|
projectPath,
|
||||||
cwd: projectPath,
|
settingsService,
|
||||||
readOnly: true, // Issue validation only reads code, doesn't write
|
'[ValidateIssue]'
|
||||||
})) {
|
);
|
||||||
if (msg.type === 'assistant' && msg.message?.content) {
|
|
||||||
for (const block of msg.message.content) {
|
|
||||||
if (block.type === 'text' && block.text) {
|
|
||||||
responseText += block.text;
|
|
||||||
|
|
||||||
// Emit progress event
|
// Use request overrides if provided, otherwise fall back to settings
|
||||||
const progressEvent: IssueValidationEvent = {
|
let effectiveThinkingLevel: ThinkingLevel | undefined = thinkingLevel;
|
||||||
type: 'issue_validation_progress',
|
let effectiveReasoningEffort: ReasoningEffort | undefined = reasoningEffort;
|
||||||
issueNumber,
|
if (!effectiveThinkingLevel || !effectiveReasoningEffort) {
|
||||||
content: block.text,
|
const settings = await settingsService?.getGlobalSettings();
|
||||||
projectPath,
|
const phaseModelEntry =
|
||||||
};
|
settings?.phaseModels?.validationModel || DEFAULT_PHASE_MODELS.validationModel;
|
||||||
events.emit('issue-validation:event', progressEvent);
|
const resolved = resolvePhaseModel(phaseModelEntry);
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
|
|
||||||
// Use result if it's a final accumulated message
|
|
||||||
if (msg.result.length > responseText.length) {
|
|
||||||
responseText = msg.result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse JSON from the response text using shared utility
|
|
||||||
if (responseText) {
|
|
||||||
validationResult = extractJson<IssueValidationResult>(responseText, { logger });
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Use Claude SDK for Claude models
|
|
||||||
logger.info(`Using Claude provider for validation with model: ${model}`);
|
|
||||||
|
|
||||||
// Load autoLoadClaudeMd setting
|
|
||||||
const autoLoadClaudeMd = await getAutoLoadClaudeMdSetting(
|
|
||||||
projectPath,
|
|
||||||
settingsService,
|
|
||||||
'[ValidateIssue]'
|
|
||||||
);
|
|
||||||
|
|
||||||
// Use thinkingLevel from request if provided, otherwise fall back to settings
|
|
||||||
let effectiveThinkingLevel: ThinkingLevel | undefined = thinkingLevel;
|
|
||||||
if (!effectiveThinkingLevel) {
|
if (!effectiveThinkingLevel) {
|
||||||
const settings = await settingsService?.getGlobalSettings();
|
|
||||||
const phaseModelEntry =
|
|
||||||
settings?.phaseModels?.validationModel || DEFAULT_PHASE_MODELS.validationModel;
|
|
||||||
const resolved = resolvePhaseModel(phaseModelEntry);
|
|
||||||
effectiveThinkingLevel = resolved.thinkingLevel;
|
effectiveThinkingLevel = resolved.thinkingLevel;
|
||||||
}
|
}
|
||||||
|
if (!effectiveReasoningEffort && typeof phaseModelEntry !== 'string') {
|
||||||
// Create SDK options with structured output and abort controller
|
effectiveReasoningEffort = phaseModelEntry.reasoningEffort;
|
||||||
const options = createSuggestionsOptions({
|
|
||||||
cwd: projectPath,
|
|
||||||
model: model as ModelAlias,
|
|
||||||
systemPrompt: ISSUE_VALIDATION_SYSTEM_PROMPT,
|
|
||||||
abortController,
|
|
||||||
autoLoadClaudeMd,
|
|
||||||
thinkingLevel: effectiveThinkingLevel,
|
|
||||||
outputFormat: {
|
|
||||||
type: 'json_schema',
|
|
||||||
schema: issueValidationSchema as Record<string, unknown>,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
// Execute the query
|
|
||||||
const stream = query({ prompt, options });
|
|
||||||
|
|
||||||
for await (const msg of stream) {
|
|
||||||
// Collect assistant text for debugging and emit progress
|
|
||||||
if (msg.type === 'assistant' && msg.message?.content) {
|
|
||||||
for (const block of msg.message.content) {
|
|
||||||
if (block.type === 'text') {
|
|
||||||
responseText += block.text;
|
|
||||||
|
|
||||||
// Emit progress event
|
|
||||||
const progressEvent: IssueValidationEvent = {
|
|
||||||
type: 'issue_validation_progress',
|
|
||||||
issueNumber,
|
|
||||||
content: block.text,
|
|
||||||
projectPath,
|
|
||||||
};
|
|
||||||
events.emit('issue-validation:event', progressEvent);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract structured output on success
|
|
||||||
if (msg.type === 'result' && msg.subtype === 'success') {
|
|
||||||
const resultMsg = msg as { structured_output?: IssueValidationResult };
|
|
||||||
if (resultMsg.structured_output) {
|
|
||||||
validationResult = resultMsg.structured_output;
|
|
||||||
logger.debug('Received structured output:', validationResult);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle errors
|
|
||||||
if (msg.type === 'result') {
|
|
||||||
const resultMsg = msg as { subtype?: string };
|
|
||||||
if (resultMsg.subtype === 'error_max_structured_output_retries') {
|
|
||||||
logger.error('Failed to produce valid structured output after retries');
|
|
||||||
throw new Error('Could not produce valid validation output');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.info(`Using model: ${model}`);
|
||||||
|
|
||||||
|
// Use streamingQuery with event callbacks
|
||||||
|
const result = await streamingQuery({
|
||||||
|
prompt: finalPrompt,
|
||||||
|
model: model as string,
|
||||||
|
cwd: projectPath,
|
||||||
|
systemPrompt: useStructuredOutput ? issueValidationSystemPrompt : undefined,
|
||||||
|
abortController,
|
||||||
|
thinkingLevel: effectiveThinkingLevel,
|
||||||
|
reasoningEffort: effectiveReasoningEffort,
|
||||||
|
readOnly: true, // Issue validation only reads code, doesn't write
|
||||||
|
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
|
||||||
|
outputFormat: useStructuredOutput
|
||||||
|
? {
|
||||||
|
type: 'json_schema',
|
||||||
|
schema: issueValidationSchema as Record<string, unknown>,
|
||||||
|
}
|
||||||
|
: undefined,
|
||||||
|
onText: (text) => {
|
||||||
|
responseText += text;
|
||||||
|
// Emit progress event
|
||||||
|
const progressEvent: IssueValidationEvent = {
|
||||||
|
type: 'issue_validation_progress',
|
||||||
|
issueNumber,
|
||||||
|
content: text,
|
||||||
|
projectPath,
|
||||||
|
};
|
||||||
|
events.emit('issue-validation:event', progressEvent);
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
// Clear timeout
|
// Clear timeout
|
||||||
clearTimeout(timeoutId);
|
clearTimeout(timeoutId);
|
||||||
|
|
||||||
|
// Get validation result from structured output or parse from text
|
||||||
|
let validationResult: IssueValidationResult | null = null;
|
||||||
|
|
||||||
|
if (result.structured_output) {
|
||||||
|
validationResult = result.structured_output as unknown as IssueValidationResult;
|
||||||
|
logger.debug('Received structured output:', validationResult);
|
||||||
|
} else if (responseText) {
|
||||||
|
// Parse JSON from response text
|
||||||
|
validationResult = extractJson<IssueValidationResult>(responseText, { logger });
|
||||||
|
}
|
||||||
|
|
||||||
// Require validation result
|
// Require validation result
|
||||||
if (!validationResult) {
|
if (!validationResult) {
|
||||||
logger.error('No validation result received from AI provider');
|
logger.error('No validation result received from AI provider');
|
||||||
@@ -299,7 +259,7 @@ ${prompt}`;
|
|||||||
/**
|
/**
|
||||||
* Creates the handler for validating GitHub issues against the codebase.
|
* Creates the handler for validating GitHub issues against the codebase.
|
||||||
*
|
*
|
||||||
* Uses Claude SDK with:
|
* Uses the provider abstraction with:
|
||||||
* - Read-only tools (Read, Glob, Grep) for codebase analysis
|
* - Read-only tools (Read, Glob, Grep) for codebase analysis
|
||||||
* - JSON schema structured output for reliable parsing
|
* - JSON schema structured output for reliable parsing
|
||||||
* - System prompt guiding the validation process
|
* - System prompt guiding the validation process
|
||||||
@@ -319,6 +279,7 @@ export function createValidateIssueHandler(
|
|||||||
issueLabels,
|
issueLabels,
|
||||||
model = 'opus',
|
model = 'opus',
|
||||||
thinkingLevel,
|
thinkingLevel,
|
||||||
|
reasoningEffort,
|
||||||
comments: rawComments,
|
comments: rawComments,
|
||||||
linkedPRs: rawLinkedPRs,
|
linkedPRs: rawLinkedPRs,
|
||||||
} = req.body as ValidateIssueRequestBody;
|
} = req.body as ValidateIssueRequestBody;
|
||||||
@@ -366,14 +327,17 @@ export function createValidateIssueHandler(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate model parameter at runtime - accept Claude models or Cursor models
|
// Validate model parameter at runtime - accept any supported provider model
|
||||||
const isValidClaudeModel = VALID_CLAUDE_MODELS.includes(model as ModelAlias);
|
const isValidModel =
|
||||||
const isValidCursorModel = isCursorModel(model);
|
isClaudeModel(model) ||
|
||||||
|
isCursorModel(model) ||
|
||||||
|
isCodexModel(model) ||
|
||||||
|
isOpencodeModel(model);
|
||||||
|
|
||||||
if (!isValidClaudeModel && !isValidCursorModel) {
|
if (!isValidModel) {
|
||||||
res.status(400).json({
|
res.status(400).json({
|
||||||
success: false,
|
success: false,
|
||||||
error: `Invalid model. Must be one of: ${VALID_CLAUDE_MODELS.join(', ')}, or a Cursor model ID`,
|
error: 'Invalid model. Must be a Claude, Cursor, Codex, or OpenCode model ID (or alias).',
|
||||||
});
|
});
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -404,7 +368,8 @@ export function createValidateIssueHandler(
|
|||||||
settingsService,
|
settingsService,
|
||||||
validationComments,
|
validationComments,
|
||||||
validationLinkedPRs,
|
validationLinkedPRs,
|
||||||
thinkingLevel
|
thinkingLevel,
|
||||||
|
reasoningEffort
|
||||||
)
|
)
|
||||||
.catch(() => {
|
.catch(() => {
|
||||||
// Error is already handled inside runValidation (event emitted)
|
// Error is already handled inside runValidation (event emitted)
|
||||||
|
|||||||
@@ -1,8 +1,11 @@
|
|||||||
/**
|
/**
|
||||||
* Issue Validation Schema and System Prompt
|
* Issue Validation Schema and Prompt Building
|
||||||
*
|
*
|
||||||
* Defines the JSON schema for Claude's structured output and
|
* Defines the JSON schema for Claude's structured output and
|
||||||
* the system prompt that guides the validation process.
|
* helper functions for building validation prompts.
|
||||||
|
*
|
||||||
|
* Note: The system prompt is now centralized in @automaker/prompts
|
||||||
|
* and accessed via getPromptCustomization() in validate-issue.ts
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -82,76 +85,6 @@ export const issueValidationSchema = {
|
|||||||
additionalProperties: false,
|
additionalProperties: false,
|
||||||
} as const;
|
} as const;
|
||||||
|
|
||||||
/**
|
|
||||||
* System prompt that guides Claude in validating GitHub issues.
|
|
||||||
* Instructs the model to use read-only tools to analyze the codebase.
|
|
||||||
*/
|
|
||||||
export const ISSUE_VALIDATION_SYSTEM_PROMPT = `You are an expert code analyst validating GitHub issues against a codebase.
|
|
||||||
|
|
||||||
Your task is to analyze a GitHub issue and determine if it's valid by scanning the codebase.
|
|
||||||
|
|
||||||
## Validation Process
|
|
||||||
|
|
||||||
1. **Read the issue carefully** - Understand what is being reported or requested
|
|
||||||
2. **Search the codebase** - Use Glob to find relevant files by pattern, Grep to search for keywords
|
|
||||||
3. **Examine the code** - Use Read to look at the actual implementation in relevant files
|
|
||||||
4. **Check linked PRs** - If there are linked pull requests, use \`gh pr diff <PR_NUMBER>\` to review the changes
|
|
||||||
5. **Form your verdict** - Based on your analysis, determine if the issue is valid
|
|
||||||
|
|
||||||
## Verdicts
|
|
||||||
|
|
||||||
- **valid**: The issue describes a real problem that exists in the codebase, or a clear feature request that can be implemented. The referenced files/components exist and the issue is actionable.
|
|
||||||
|
|
||||||
- **invalid**: The issue describes behavior that doesn't exist, references non-existent files or components, is based on a misunderstanding of the code, or the described "bug" is actually expected behavior.
|
|
||||||
|
|
||||||
- **needs_clarification**: The issue lacks sufficient detail to verify. Specify what additional information is needed in the missingInfo field.
|
|
||||||
|
|
||||||
## For Bug Reports, Check:
|
|
||||||
- Do the referenced files/components exist?
|
|
||||||
- Does the code match what the issue describes?
|
|
||||||
- Is the described behavior actually a bug or expected?
|
|
||||||
- Can you locate the code that would cause the reported issue?
|
|
||||||
|
|
||||||
## For Feature Requests, Check:
|
|
||||||
- Does the feature already exist?
|
|
||||||
- Is the implementation location clear?
|
|
||||||
- Is the request technically feasible given the codebase structure?
|
|
||||||
|
|
||||||
## Analyzing Linked Pull Requests
|
|
||||||
|
|
||||||
When an issue has linked PRs (especially open ones), you MUST analyze them:
|
|
||||||
|
|
||||||
1. **Run \`gh pr diff <PR_NUMBER>\`** to see what changes the PR makes
|
|
||||||
2. **Run \`gh pr view <PR_NUMBER>\`** to see PR description and status
|
|
||||||
3. **Evaluate if the PR fixes the issue** - Does the diff address the reported problem?
|
|
||||||
4. **Provide a recommendation**:
|
|
||||||
- \`wait_for_merge\`: The PR appears to fix the issue correctly. No additional work needed - just wait for it to be merged.
|
|
||||||
- \`pr_needs_work\`: The PR attempts to fix the issue but is incomplete or has problems.
|
|
||||||
- \`no_pr\`: No relevant PR exists for this issue.
|
|
||||||
|
|
||||||
5. **Include prAnalysis in your response** with:
|
|
||||||
- hasOpenPR: true/false
|
|
||||||
- prFixesIssue: true/false (based on diff analysis)
|
|
||||||
- prNumber: the PR number you analyzed
|
|
||||||
- prSummary: brief description of what the PR changes
|
|
||||||
- recommendation: one of the above values
|
|
||||||
|
|
||||||
## Response Guidelines
|
|
||||||
|
|
||||||
- **Always include relatedFiles** when you find relevant code
|
|
||||||
- **Set bugConfirmed to true** only if you can definitively confirm a bug exists in the code
|
|
||||||
- **Provide a suggestedFix** when you have a clear idea of how to address the issue
|
|
||||||
- **Use missingInfo** when the verdict is needs_clarification to list what's needed
|
|
||||||
- **Include prAnalysis** when there are linked PRs - this is critical for avoiding duplicate work
|
|
||||||
- **Set estimatedComplexity** to help prioritize:
|
|
||||||
- trivial: Simple text changes, one-line fixes
|
|
||||||
- simple: Small changes to one file
|
|
||||||
- moderate: Changes to multiple files or moderate logic changes
|
|
||||||
- complex: Significant refactoring or new feature implementation
|
|
||||||
- very_complex: Major architectural changes or cross-cutting concerns
|
|
||||||
|
|
||||||
Be thorough in your analysis but focus on files that are directly relevant to the issue.`;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Comment data structure for validation prompt
|
* Comment data structure for validation prompt
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -9,12 +9,14 @@ import type { Request, Response } from 'express';
|
|||||||
|
|
||||||
export interface EnvironmentResponse {
|
export interface EnvironmentResponse {
|
||||||
isContainerized: boolean;
|
isContainerized: boolean;
|
||||||
|
skipSandboxWarning?: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function createEnvironmentHandler() {
|
export function createEnvironmentHandler() {
|
||||||
return (_req: Request, res: Response): void => {
|
return (_req: Request, res: Response): void => {
|
||||||
res.json({
|
res.json({
|
||||||
isContainerized: process.env.IS_CONTAINERIZED === 'true',
|
isContainerized: process.env.IS_CONTAINERIZED === 'true',
|
||||||
|
skipSandboxWarning: process.env.AUTOMAKER_SKIP_SANDBOX_WARNING === 'true',
|
||||||
} satisfies EnvironmentResponse);
|
} satisfies EnvironmentResponse);
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
21
apps/server/src/routes/notifications/common.ts
Normal file
21
apps/server/src/routes/notifications/common.ts
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
/**
|
||||||
|
* Common utilities for notification routes
|
||||||
|
*
|
||||||
|
* Provides logger and error handling utilities shared across all notification endpoints.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import { getErrorMessage as getErrorMessageShared, createLogError } from '../common.js';
|
||||||
|
|
||||||
|
/** Logger instance for notification-related operations */
|
||||||
|
export const logger = createLogger('Notifications');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract user-friendly error message from error objects
|
||||||
|
*/
|
||||||
|
export { getErrorMessageShared as getErrorMessage };
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Log error with automatic logger binding
|
||||||
|
*/
|
||||||
|
export const logError = createLogError(logger);
|
||||||
62
apps/server/src/routes/notifications/index.ts
Normal file
62
apps/server/src/routes/notifications/index.ts
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
/**
|
||||||
|
* Notifications routes - HTTP API for project-level notifications
|
||||||
|
*
|
||||||
|
* Provides endpoints for:
|
||||||
|
* - Listing notifications
|
||||||
|
* - Getting unread count
|
||||||
|
* - Marking notifications as read
|
||||||
|
* - Dismissing notifications
|
||||||
|
*
|
||||||
|
* All endpoints use handler factories that receive the NotificationService instance.
|
||||||
|
* Mounted at /api/notifications in the main server.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Router } from 'express';
|
||||||
|
import type { NotificationService } from '../../services/notification-service.js';
|
||||||
|
import { validatePathParams } from '../../middleware/validate-paths.js';
|
||||||
|
import { createListHandler } from './routes/list.js';
|
||||||
|
import { createUnreadCountHandler } from './routes/unread-count.js';
|
||||||
|
import { createMarkReadHandler } from './routes/mark-read.js';
|
||||||
|
import { createDismissHandler } from './routes/dismiss.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create notifications router with all endpoints
|
||||||
|
*
|
||||||
|
* Endpoints:
|
||||||
|
* - POST /list - List all notifications for a project
|
||||||
|
* - POST /unread-count - Get unread notification count
|
||||||
|
* - POST /mark-read - Mark notification(s) as read
|
||||||
|
* - POST /dismiss - Dismiss notification(s)
|
||||||
|
*
|
||||||
|
* @param notificationService - Instance of NotificationService
|
||||||
|
* @returns Express Router configured with all notification endpoints
|
||||||
|
*/
|
||||||
|
export function createNotificationsRoutes(notificationService: NotificationService): Router {
|
||||||
|
const router = Router();
|
||||||
|
|
||||||
|
// List notifications
|
||||||
|
router.post('/list', validatePathParams('projectPath'), createListHandler(notificationService));
|
||||||
|
|
||||||
|
// Get unread count
|
||||||
|
router.post(
|
||||||
|
'/unread-count',
|
||||||
|
validatePathParams('projectPath'),
|
||||||
|
createUnreadCountHandler(notificationService)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Mark as read (single or all)
|
||||||
|
router.post(
|
||||||
|
'/mark-read',
|
||||||
|
validatePathParams('projectPath'),
|
||||||
|
createMarkReadHandler(notificationService)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Dismiss (single or all)
|
||||||
|
router.post(
|
||||||
|
'/dismiss',
|
||||||
|
validatePathParams('projectPath'),
|
||||||
|
createDismissHandler(notificationService)
|
||||||
|
);
|
||||||
|
|
||||||
|
return router;
|
||||||
|
}
|
||||||
53
apps/server/src/routes/notifications/routes/dismiss.ts
Normal file
53
apps/server/src/routes/notifications/routes/dismiss.ts
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
/**
|
||||||
|
* POST /api/notifications/dismiss - Dismiss notification(s)
|
||||||
|
*
|
||||||
|
* Request body: { projectPath: string, notificationId?: string }
|
||||||
|
* - If notificationId provided: dismisses that notification
|
||||||
|
* - If notificationId not provided: dismisses all notifications
|
||||||
|
*
|
||||||
|
* Response: { success: true, dismissed: boolean | count: number }
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import type { NotificationService } from '../../../services/notification-service.js';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create handler for POST /api/notifications/dismiss
|
||||||
|
*
|
||||||
|
* @param notificationService - Instance of NotificationService
|
||||||
|
* @returns Express request handler
|
||||||
|
*/
|
||||||
|
export function createDismissHandler(notificationService: NotificationService) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath, notificationId } = req.body;
|
||||||
|
|
||||||
|
if (!projectPath || typeof projectPath !== 'string') {
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If notificationId provided, dismiss single notification
|
||||||
|
if (notificationId) {
|
||||||
|
const dismissed = await notificationService.dismissNotification(
|
||||||
|
projectPath,
|
||||||
|
notificationId
|
||||||
|
);
|
||||||
|
if (!dismissed) {
|
||||||
|
res.status(404).json({ success: false, error: 'Notification not found' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
res.json({ success: true, dismissed: true });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise dismiss all
|
||||||
|
const count = await notificationService.dismissAll(projectPath);
|
||||||
|
res.json({ success: true, count });
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Dismiss failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
39
apps/server/src/routes/notifications/routes/list.ts
Normal file
39
apps/server/src/routes/notifications/routes/list.ts
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
/**
|
||||||
|
* POST /api/notifications/list - List all notifications for a project
|
||||||
|
*
|
||||||
|
* Request body: { projectPath: string }
|
||||||
|
* Response: { success: true, notifications: Notification[] }
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import type { NotificationService } from '../../../services/notification-service.js';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create handler for POST /api/notifications/list
|
||||||
|
*
|
||||||
|
* @param notificationService - Instance of NotificationService
|
||||||
|
* @returns Express request handler
|
||||||
|
*/
|
||||||
|
export function createListHandler(notificationService: NotificationService) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath } = req.body;
|
||||||
|
|
||||||
|
if (!projectPath || typeof projectPath !== 'string') {
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const notifications = await notificationService.getNotifications(projectPath);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
notifications,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'List notifications failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
50
apps/server/src/routes/notifications/routes/mark-read.ts
Normal file
50
apps/server/src/routes/notifications/routes/mark-read.ts
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
/**
|
||||||
|
* POST /api/notifications/mark-read - Mark notification(s) as read
|
||||||
|
*
|
||||||
|
* Request body: { projectPath: string, notificationId?: string }
|
||||||
|
* - If notificationId provided: marks that notification as read
|
||||||
|
* - If notificationId not provided: marks all notifications as read
|
||||||
|
*
|
||||||
|
* Response: { success: true, count?: number, notification?: Notification }
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import type { NotificationService } from '../../../services/notification-service.js';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create handler for POST /api/notifications/mark-read
|
||||||
|
*
|
||||||
|
* @param notificationService - Instance of NotificationService
|
||||||
|
* @returns Express request handler
|
||||||
|
*/
|
||||||
|
export function createMarkReadHandler(notificationService: NotificationService) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath, notificationId } = req.body;
|
||||||
|
|
||||||
|
if (!projectPath || typeof projectPath !== 'string') {
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If notificationId provided, mark single notification
|
||||||
|
if (notificationId) {
|
||||||
|
const notification = await notificationService.markAsRead(projectPath, notificationId);
|
||||||
|
if (!notification) {
|
||||||
|
res.status(404).json({ success: false, error: 'Notification not found' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
res.json({ success: true, notification });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise mark all as read
|
||||||
|
const count = await notificationService.markAllAsRead(projectPath);
|
||||||
|
res.json({ success: true, count });
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Mark read failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
39
apps/server/src/routes/notifications/routes/unread-count.ts
Normal file
39
apps/server/src/routes/notifications/routes/unread-count.ts
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
/**
|
||||||
|
* POST /api/notifications/unread-count - Get unread notification count
|
||||||
|
*
|
||||||
|
* Request body: { projectPath: string }
|
||||||
|
* Response: { success: true, count: number }
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import type { NotificationService } from '../../../services/notification-service.js';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create handler for POST /api/notifications/unread-count
|
||||||
|
*
|
||||||
|
* @param notificationService - Instance of NotificationService
|
||||||
|
* @returns Express request handler
|
||||||
|
*/
|
||||||
|
export function createUnreadCountHandler(notificationService: NotificationService) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath } = req.body;
|
||||||
|
|
||||||
|
if (!projectPath || typeof projectPath !== 'string') {
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const count = await notificationService.getUnreadCount(projectPath);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
count,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Get unread count failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -4,12 +4,58 @@
|
|||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
||||||
|
import { getBacklogPlanStatus, getRunningDetails } from '../../backlog-plan/common.js';
|
||||||
|
import { getAllRunningGenerations } from '../../app-spec/common.js';
|
||||||
|
import path from 'path';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
export function createIndexHandler(autoModeService: AutoModeService) {
|
export function createIndexHandler(autoModeService: AutoModeService) {
|
||||||
return async (_req: Request, res: Response): Promise<void> => {
|
return async (_req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const runningAgents = await autoModeService.getRunningAgents();
|
const runningAgents = [...(await autoModeService.getRunningAgents())];
|
||||||
|
const backlogPlanStatus = getBacklogPlanStatus();
|
||||||
|
const backlogPlanDetails = getRunningDetails();
|
||||||
|
|
||||||
|
if (backlogPlanStatus.isRunning && backlogPlanDetails) {
|
||||||
|
runningAgents.push({
|
||||||
|
featureId: `backlog-plan:${backlogPlanDetails.projectPath}`,
|
||||||
|
projectPath: backlogPlanDetails.projectPath,
|
||||||
|
projectName: path.basename(backlogPlanDetails.projectPath),
|
||||||
|
isAutoMode: false,
|
||||||
|
title: 'Backlog plan',
|
||||||
|
description: backlogPlanDetails.prompt,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add spec/feature generation tasks
|
||||||
|
const specGenerations = getAllRunningGenerations();
|
||||||
|
for (const generation of specGenerations) {
|
||||||
|
let title: string;
|
||||||
|
let description: string;
|
||||||
|
|
||||||
|
switch (generation.type) {
|
||||||
|
case 'feature_generation':
|
||||||
|
title = 'Generating features from spec';
|
||||||
|
description = 'Creating features from the project specification';
|
||||||
|
break;
|
||||||
|
case 'sync':
|
||||||
|
title = 'Syncing spec with code';
|
||||||
|
description = 'Updating spec from codebase and completed features';
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
title = 'Regenerating spec';
|
||||||
|
description = 'Analyzing project and generating specification';
|
||||||
|
}
|
||||||
|
|
||||||
|
runningAgents.push({
|
||||||
|
featureId: `spec-generation:${generation.projectPath}`,
|
||||||
|
projectPath: generation.projectPath,
|
||||||
|
projectName: path.basename(generation.projectPath),
|
||||||
|
isAutoMode: false,
|
||||||
|
title,
|
||||||
|
description,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
res.json({
|
res.json({
|
||||||
success: true,
|
success: true,
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
* Each provider shows: `{ configured: boolean, masked: string }`
|
* Each provider shows: `{ configured: boolean, masked: string }`
|
||||||
* Masked shows first 4 and last 4 characters for verification.
|
* Masked shows first 4 and last 4 characters for verification.
|
||||||
*
|
*
|
||||||
* Response: `{ "success": true, "credentials": { anthropic } }`
|
* Response: `{ "success": true, "credentials": { anthropic, google, openai } }`
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/**
|
/**
|
||||||
* PUT /api/settings/credentials - Update API credentials
|
* PUT /api/settings/credentials - Update API credentials
|
||||||
*
|
*
|
||||||
* Updates API keys for Anthropic. Partial updates supported.
|
* Updates API keys for supported providers. Partial updates supported.
|
||||||
* Returns masked credentials for verification without exposing full keys.
|
* Returns masked credentials for verification without exposing full keys.
|
||||||
*
|
*
|
||||||
* Request body: `Partial<Credentials>` (usually just apiKeys)
|
* Request body: `Partial<Credentials>` (usually just apiKeys)
|
||||||
|
|||||||
@@ -12,6 +12,18 @@ import type { Request, Response } from 'express';
|
|||||||
import type { SettingsService } from '../../../services/settings-service.js';
|
import type { SettingsService } from '../../../services/settings-service.js';
|
||||||
import type { GlobalSettings } from '../../../types/settings.js';
|
import type { GlobalSettings } from '../../../types/settings.js';
|
||||||
import { getErrorMessage, logError, logger } from '../common.js';
|
import { getErrorMessage, logError, logger } from '../common.js';
|
||||||
|
import { setLogLevel, LogLevel } from '@automaker/utils';
|
||||||
|
import { setRequestLoggingEnabled } from '../../../index.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Map server log level string to LogLevel enum
|
||||||
|
*/
|
||||||
|
const LOG_LEVEL_MAP: Record<string, LogLevel> = {
|
||||||
|
error: LogLevel.ERROR,
|
||||||
|
warn: LogLevel.WARN,
|
||||||
|
info: LogLevel.INFO,
|
||||||
|
debug: LogLevel.DEBUG,
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create handler factory for PUT /api/settings/global
|
* Create handler factory for PUT /api/settings/global
|
||||||
@@ -46,6 +58,23 @@ export function createUpdateGlobalHandler(settingsService: SettingsService) {
|
|||||||
|
|
||||||
const settings = await settingsService.updateGlobalSettings(updates);
|
const settings = await settingsService.updateGlobalSettings(updates);
|
||||||
|
|
||||||
|
// Apply server log level if it was updated
|
||||||
|
if ('serverLogLevel' in updates && updates.serverLogLevel) {
|
||||||
|
const level = LOG_LEVEL_MAP[updates.serverLogLevel];
|
||||||
|
if (level !== undefined) {
|
||||||
|
setLogLevel(level);
|
||||||
|
logger.info(`Server log level changed to: ${updates.serverLogLevel}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply request logging setting if it was updated
|
||||||
|
if ('enableRequestLogging' in updates && typeof updates.enableRequestLogging === 'boolean') {
|
||||||
|
setRequestLoggingEnabled(updates.enableRequestLogging);
|
||||||
|
logger.info(
|
||||||
|
`HTTP request logging ${updates.enableRequestLogging ? 'enabled' : 'disabled'}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
res.json({
|
res.json({
|
||||||
success: true,
|
success: true,
|
||||||
settings,
|
settings,
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { Router } from 'express';
|
import { Router } from 'express';
|
||||||
|
import { createStatusHandler } from './routes/status.js';
|
||||||
import { createClaudeStatusHandler } from './routes/claude-status.js';
|
import { createClaudeStatusHandler } from './routes/claude-status.js';
|
||||||
import { createInstallClaudeHandler } from './routes/install-claude.js';
|
import { createInstallClaudeHandler } from './routes/install-claude.js';
|
||||||
import { createAuthClaudeHandler } from './routes/auth-claude.js';
|
import { createAuthClaudeHandler } from './routes/auth-claude.js';
|
||||||
@@ -12,6 +13,10 @@ import { createApiKeysHandler } from './routes/api-keys.js';
|
|||||||
import { createPlatformHandler } from './routes/platform.js';
|
import { createPlatformHandler } from './routes/platform.js';
|
||||||
import { createVerifyClaudeAuthHandler } from './routes/verify-claude-auth.js';
|
import { createVerifyClaudeAuthHandler } from './routes/verify-claude-auth.js';
|
||||||
import { createVerifyCodexAuthHandler } from './routes/verify-codex-auth.js';
|
import { createVerifyCodexAuthHandler } from './routes/verify-codex-auth.js';
|
||||||
|
import { createVerifyCodeRabbitAuthHandler } from './routes/verify-coderabbit-auth.js';
|
||||||
|
import { createCodeRabbitStatusHandler } from './routes/coderabbit-status.js';
|
||||||
|
import { createAuthCodeRabbitHandler } from './routes/auth-coderabbit.js';
|
||||||
|
import { createDeauthCodeRabbitHandler } from './routes/deauth-coderabbit.js';
|
||||||
import { createGhStatusHandler } from './routes/gh-status.js';
|
import { createGhStatusHandler } from './routes/gh-status.js';
|
||||||
import { createCursorStatusHandler } from './routes/cursor-status.js';
|
import { createCursorStatusHandler } from './routes/cursor-status.js';
|
||||||
import { createCodexStatusHandler } from './routes/codex-status.js';
|
import { createCodexStatusHandler } from './routes/codex-status.js';
|
||||||
@@ -24,6 +29,12 @@ import { createDeauthCursorHandler } from './routes/deauth-cursor.js';
|
|||||||
import { createAuthOpencodeHandler } from './routes/auth-opencode.js';
|
import { createAuthOpencodeHandler } from './routes/auth-opencode.js';
|
||||||
import { createDeauthOpencodeHandler } from './routes/deauth-opencode.js';
|
import { createDeauthOpencodeHandler } from './routes/deauth-opencode.js';
|
||||||
import { createOpencodeStatusHandler } from './routes/opencode-status.js';
|
import { createOpencodeStatusHandler } from './routes/opencode-status.js';
|
||||||
|
import {
|
||||||
|
createGetOpencodeModelsHandler,
|
||||||
|
createRefreshOpencodeModelsHandler,
|
||||||
|
createGetOpencodeProvidersHandler,
|
||||||
|
createClearOpencodeCacheHandler,
|
||||||
|
} from './routes/opencode-models.js';
|
||||||
import {
|
import {
|
||||||
createGetCursorConfigHandler,
|
createGetCursorConfigHandler,
|
||||||
createSetCursorDefaultModelHandler,
|
createSetCursorDefaultModelHandler,
|
||||||
@@ -38,6 +49,9 @@ import {
|
|||||||
export function createSetupRoutes(): Router {
|
export function createSetupRoutes(): Router {
|
||||||
const router = Router();
|
const router = Router();
|
||||||
|
|
||||||
|
// Unified CLI status endpoint
|
||||||
|
router.get('/status', createStatusHandler());
|
||||||
|
|
||||||
router.get('/claude-status', createClaudeStatusHandler());
|
router.get('/claude-status', createClaudeStatusHandler());
|
||||||
router.post('/install-claude', createInstallClaudeHandler());
|
router.post('/install-claude', createInstallClaudeHandler());
|
||||||
router.post('/auth-claude', createAuthClaudeHandler());
|
router.post('/auth-claude', createAuthClaudeHandler());
|
||||||
@@ -48,6 +62,7 @@ export function createSetupRoutes(): Router {
|
|||||||
router.get('/platform', createPlatformHandler());
|
router.get('/platform', createPlatformHandler());
|
||||||
router.post('/verify-claude-auth', createVerifyClaudeAuthHandler());
|
router.post('/verify-claude-auth', createVerifyClaudeAuthHandler());
|
||||||
router.post('/verify-codex-auth', createVerifyCodexAuthHandler());
|
router.post('/verify-codex-auth', createVerifyCodexAuthHandler());
|
||||||
|
router.post('/verify-coderabbit-auth', createVerifyCodeRabbitAuthHandler());
|
||||||
router.get('/gh-status', createGhStatusHandler());
|
router.get('/gh-status', createGhStatusHandler());
|
||||||
|
|
||||||
// Cursor CLI routes
|
// Cursor CLI routes
|
||||||
@@ -65,6 +80,17 @@ export function createSetupRoutes(): Router {
|
|||||||
router.get('/opencode-status', createOpencodeStatusHandler());
|
router.get('/opencode-status', createOpencodeStatusHandler());
|
||||||
router.post('/auth-opencode', createAuthOpencodeHandler());
|
router.post('/auth-opencode', createAuthOpencodeHandler());
|
||||||
router.post('/deauth-opencode', createDeauthOpencodeHandler());
|
router.post('/deauth-opencode', createDeauthOpencodeHandler());
|
||||||
|
|
||||||
|
// CodeRabbit CLI routes
|
||||||
|
router.get('/coderabbit-status', createCodeRabbitStatusHandler());
|
||||||
|
router.post('/auth-coderabbit', createAuthCodeRabbitHandler());
|
||||||
|
router.post('/deauth-coderabbit', createDeauthCodeRabbitHandler());
|
||||||
|
|
||||||
|
// OpenCode Dynamic Model Discovery routes
|
||||||
|
router.get('/opencode/models', createGetOpencodeModelsHandler());
|
||||||
|
router.post('/opencode/models/refresh', createRefreshOpencodeModelsHandler());
|
||||||
|
router.get('/opencode/providers', createGetOpencodeProvidersHandler());
|
||||||
|
router.post('/opencode/cache/clear', createClearOpencodeCacheHandler());
|
||||||
router.get('/cursor-config', createGetCursorConfigHandler());
|
router.get('/cursor-config', createGetCursorConfigHandler());
|
||||||
router.post('/cursor-config/default-model', createSetCursorDefaultModelHandler());
|
router.post('/cursor-config/default-model', createSetCursorDefaultModelHandler());
|
||||||
router.post('/cursor-config/models', createSetCursorModelsHandler());
|
router.post('/cursor-config/models', createSetCursorModelsHandler());
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ export function createApiKeysHandler() {
|
|||||||
res.json({
|
res.json({
|
||||||
success: true,
|
success: true,
|
||||||
hasAnthropicKey: !!getApiKey('anthropic') || !!process.env.ANTHROPIC_API_KEY,
|
hasAnthropicKey: !!getApiKey('anthropic') || !!process.env.ANTHROPIC_API_KEY,
|
||||||
|
hasGoogleKey: !!getApiKey('google'),
|
||||||
hasOpenaiKey: !!getApiKey('openai') || !!process.env.OPENAI_API_KEY,
|
hasOpenaiKey: !!getApiKey('openai') || !!process.env.OPENAI_API_KEY,
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
80
apps/server/src/routes/setup/routes/auth-coderabbit.ts
Normal file
80
apps/server/src/routes/setup/routes/auth-coderabbit.ts
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
/**
|
||||||
|
* POST /auth-coderabbit endpoint - Authenticate CodeRabbit CLI via OAuth
|
||||||
|
*
|
||||||
|
* CodeRabbit CLI requires interactive authentication:
|
||||||
|
* 1. Run `cr auth login`
|
||||||
|
* 2. Browser opens with OAuth flow
|
||||||
|
* 3. After browser auth, CLI shows a token
|
||||||
|
* 4. User must press Enter to confirm
|
||||||
|
*
|
||||||
|
* Since step 4 requires interactive input, we can't fully automate this.
|
||||||
|
* Instead, we provide the command for the user to run manually.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import { execSync } from 'child_process';
|
||||||
|
import { logError, getErrorMessage } from '../common.js';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import * as path from 'path';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find the CodeRabbit CLI command (coderabbit or cr)
|
||||||
|
*/
|
||||||
|
function findCodeRabbitCommand(): string | null {
|
||||||
|
const commands = ['coderabbit', 'cr'];
|
||||||
|
for (const command of commands) {
|
||||||
|
try {
|
||||||
|
const whichCommand = process.platform === 'win32' ? 'where' : 'which';
|
||||||
|
const result = execSync(`${whichCommand} ${command}`, {
|
||||||
|
encoding: 'utf8',
|
||||||
|
timeout: 2000,
|
||||||
|
}).trim();
|
||||||
|
if (result) {
|
||||||
|
return result.split('\n')[0];
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Command not found, try next
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createAuthCodeRabbitHandler() {
|
||||||
|
return async (_req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
// Remove the disconnected marker file to reconnect the app to the CLI
|
||||||
|
const markerPath = path.join(process.cwd(), '.automaker', '.coderabbit-disconnected');
|
||||||
|
if (fs.existsSync(markerPath)) {
|
||||||
|
fs.unlinkSync(markerPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find CodeRabbit CLI
|
||||||
|
const cliPath = findCodeRabbitCommand();
|
||||||
|
if (!cliPath) {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'CodeRabbit CLI is not installed. Please install it first.',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeRabbit CLI requires interactive input (pressing Enter after OAuth)
|
||||||
|
// We can't automate this, so we return the command for the user to run
|
||||||
|
const command = cliPath.includes('coderabbit') ? 'coderabbit auth login' : 'cr auth login';
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
requiresManualAuth: true,
|
||||||
|
command,
|
||||||
|
message: `Please run "${command}" in your terminal to authenticate. After completing OAuth in your browser, press Enter in the terminal to confirm.`,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Auth CodeRabbit failed');
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: getErrorMessage(error),
|
||||||
|
message: 'Failed to initiate CodeRabbit authentication',
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
240
apps/server/src/routes/setup/routes/coderabbit-status.ts
Normal file
240
apps/server/src/routes/setup/routes/coderabbit-status.ts
Normal file
@@ -0,0 +1,240 @@
|
|||||||
|
/**
|
||||||
|
* GET /coderabbit-status endpoint - Get CodeRabbit CLI installation and auth status
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import { spawn, execSync } from 'child_process';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import * as path from 'path';
|
||||||
|
|
||||||
|
const DISCONNECTED_MARKER_FILE = '.coderabbit-disconnected';
|
||||||
|
|
||||||
|
function isCodeRabbitDisconnectedFromApp(): boolean {
|
||||||
|
try {
|
||||||
|
const projectRoot = process.cwd();
|
||||||
|
const markerPath = path.join(projectRoot, '.automaker', DISCONNECTED_MARKER_FILE);
|
||||||
|
return fs.existsSync(markerPath);
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find the CodeRabbit CLI command (coderabbit or cr)
|
||||||
|
*/
|
||||||
|
function findCodeRabbitCommand(): string | null {
|
||||||
|
const commands = ['coderabbit', 'cr'];
|
||||||
|
for (const command of commands) {
|
||||||
|
try {
|
||||||
|
const whichCommand = process.platform === 'win32' ? 'where' : 'which';
|
||||||
|
const result = execSync(`${whichCommand} ${command}`, {
|
||||||
|
encoding: 'utf8',
|
||||||
|
timeout: 2000,
|
||||||
|
}).trim();
|
||||||
|
if (result) {
|
||||||
|
return result.split('\n')[0];
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Command not found, try next
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get CodeRabbit CLI version
|
||||||
|
*/
|
||||||
|
async function getCodeRabbitVersion(command: string): Promise<string | null> {
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
const child = spawn(command, ['--version'], {
|
||||||
|
stdio: 'pipe',
|
||||||
|
timeout: 5000,
|
||||||
|
});
|
||||||
|
|
||||||
|
let stdout = '';
|
||||||
|
child.stdout?.on('data', (data) => {
|
||||||
|
stdout += data.toString();
|
||||||
|
});
|
||||||
|
|
||||||
|
child.on('close', (code) => {
|
||||||
|
if (code === 0 && stdout) {
|
||||||
|
resolve(stdout.trim());
|
||||||
|
} else {
|
||||||
|
resolve(null);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
child.on('error', () => {
|
||||||
|
resolve(null);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
interface CodeRabbitAuthInfo {
|
||||||
|
authenticated: boolean;
|
||||||
|
method: 'oauth' | 'none';
|
||||||
|
username?: string;
|
||||||
|
email?: string;
|
||||||
|
organization?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check CodeRabbit CLI authentication status
|
||||||
|
* Parses output like:
|
||||||
|
* ```
|
||||||
|
* CodeRabbit CLI Status
|
||||||
|
* ✅ Authentication: Logged in
|
||||||
|
* User Information:
|
||||||
|
* 👤 Name: Kacper
|
||||||
|
* 📧 Email: kacperlachowiczwp.pl@wp.pl
|
||||||
|
* 🔧 Username: Shironex
|
||||||
|
* Organization Information:
|
||||||
|
* 🏢 Name: Anime-World-SPZOO
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
async function getCodeRabbitAuthStatus(command: string): Promise<CodeRabbitAuthInfo> {
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
const child = spawn(command, ['auth', 'status'], {
|
||||||
|
stdio: 'pipe',
|
||||||
|
timeout: 10000,
|
||||||
|
});
|
||||||
|
|
||||||
|
let stdout = '';
|
||||||
|
let stderr = '';
|
||||||
|
|
||||||
|
child.stdout?.on('data', (data) => {
|
||||||
|
stdout += data.toString();
|
||||||
|
});
|
||||||
|
|
||||||
|
child.stderr?.on('data', (data) => {
|
||||||
|
stderr += data.toString();
|
||||||
|
});
|
||||||
|
|
||||||
|
child.on('close', (code) => {
|
||||||
|
const output = stdout + stderr;
|
||||||
|
|
||||||
|
// Check for "Logged in" in Authentication line
|
||||||
|
const isAuthenticated =
|
||||||
|
code === 0 &&
|
||||||
|
(output.includes('Logged in') || output.includes('logged in')) &&
|
||||||
|
!output.toLowerCase().includes('not logged in');
|
||||||
|
|
||||||
|
if (isAuthenticated) {
|
||||||
|
// Parse the structured output format
|
||||||
|
// Username: look for "Username: <value>" line
|
||||||
|
const usernameMatch = output.match(/Username:\s*(\S+)/i);
|
||||||
|
// Email: look for "Email: <value>" line
|
||||||
|
const emailMatch = output.match(/Email:\s*(\S+@\S+)/i);
|
||||||
|
// Organization: look for "Name: <value>" under Organization Information
|
||||||
|
// The org name appears after "Organization Information:" section
|
||||||
|
const orgSection = output.split(/Organization Information:/i)[1];
|
||||||
|
const orgMatch = orgSection?.match(/Name:\s*(.+?)(?:\n|$)/i);
|
||||||
|
|
||||||
|
resolve({
|
||||||
|
authenticated: true,
|
||||||
|
method: 'oauth',
|
||||||
|
username: usernameMatch?.[1]?.trim(),
|
||||||
|
email: emailMatch?.[1]?.trim(),
|
||||||
|
organization: orgMatch?.[1]?.trim(),
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
resolve({
|
||||||
|
authenticated: false,
|
||||||
|
method: 'none',
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
child.on('error', () => {
|
||||||
|
resolve({
|
||||||
|
authenticated: false,
|
||||||
|
method: 'none',
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates handler for GET /api/setup/coderabbit-status
|
||||||
|
* Returns CodeRabbit CLI installation and authentication status
|
||||||
|
*/
|
||||||
|
export function createCodeRabbitStatusHandler() {
|
||||||
|
const installCommand = 'npm install -g coderabbit';
|
||||||
|
const loginCommand = 'coderabbit auth login';
|
||||||
|
|
||||||
|
return async (_req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
// Check if user has manually disconnected from the app
|
||||||
|
if (isCodeRabbitDisconnectedFromApp()) {
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
installed: true,
|
||||||
|
version: null,
|
||||||
|
path: null,
|
||||||
|
auth: {
|
||||||
|
authenticated: false,
|
||||||
|
method: 'none',
|
||||||
|
},
|
||||||
|
recommendation: 'CodeRabbit CLI is disconnected. Click Sign In to reconnect.',
|
||||||
|
installCommand,
|
||||||
|
loginCommand,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find CodeRabbit CLI
|
||||||
|
const cliPath = findCodeRabbitCommand();
|
||||||
|
|
||||||
|
if (!cliPath) {
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
installed: false,
|
||||||
|
version: null,
|
||||||
|
path: null,
|
||||||
|
auth: {
|
||||||
|
authenticated: false,
|
||||||
|
method: 'none',
|
||||||
|
},
|
||||||
|
recommendation: 'Install CodeRabbit CLI to enable AI-powered code reviews.',
|
||||||
|
installCommand,
|
||||||
|
loginCommand,
|
||||||
|
installCommands: {
|
||||||
|
macos: 'curl -fsSL https://coderabbit.ai/install | bash',
|
||||||
|
npm: installCommand,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get version
|
||||||
|
const version = await getCodeRabbitVersion(cliPath);
|
||||||
|
|
||||||
|
// Get auth status
|
||||||
|
const authStatus = await getCodeRabbitAuthStatus(cliPath);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
installed: true,
|
||||||
|
version,
|
||||||
|
path: cliPath,
|
||||||
|
auth: authStatus,
|
||||||
|
recommendation: authStatus.authenticated
|
||||||
|
? undefined
|
||||||
|
: 'Sign in to CodeRabbit to enable AI-powered code reviews.',
|
||||||
|
installCommand,
|
||||||
|
loginCommand,
|
||||||
|
installCommands: {
|
||||||
|
macos: 'curl -fsSL https://coderabbit.ai/install | bash',
|
||||||
|
npm: installCommand,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Get CodeRabbit status failed');
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: getErrorMessage(error),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
113
apps/server/src/routes/setup/routes/deauth-coderabbit.ts
Normal file
113
apps/server/src/routes/setup/routes/deauth-coderabbit.ts
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
/**
|
||||||
|
* POST /deauth-coderabbit endpoint - Sign out from CodeRabbit CLI
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import { spawn, execSync } from 'child_process';
|
||||||
|
import { logError, getErrorMessage } from '../common.js';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import * as path from 'path';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find the CodeRabbit CLI command (coderabbit or cr)
|
||||||
|
*/
|
||||||
|
function findCodeRabbitCommand(): string | null {
|
||||||
|
const commands = ['coderabbit', 'cr'];
|
||||||
|
for (const command of commands) {
|
||||||
|
try {
|
||||||
|
const whichCommand = process.platform === 'win32' ? 'where' : 'which';
|
||||||
|
const result = execSync(`${whichCommand} ${command}`, {
|
||||||
|
encoding: 'utf8',
|
||||||
|
timeout: 2000,
|
||||||
|
}).trim();
|
||||||
|
if (result) {
|
||||||
|
return result.split('\n')[0];
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Command not found, try next
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createDeauthCodeRabbitHandler() {
|
||||||
|
return async (_req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
// Find CodeRabbit CLI
|
||||||
|
const cliPath = findCodeRabbitCommand();
|
||||||
|
|
||||||
|
if (cliPath) {
|
||||||
|
// Try to run the CLI logout command
|
||||||
|
const logoutResult = await new Promise<{ success: boolean; error?: string }>((resolve) => {
|
||||||
|
const child = spawn(cliPath, ['auth', 'logout'], {
|
||||||
|
stdio: 'pipe',
|
||||||
|
timeout: 10000,
|
||||||
|
});
|
||||||
|
|
||||||
|
let stderr = '';
|
||||||
|
child.stderr?.on('data', (data) => {
|
||||||
|
stderr += data.toString();
|
||||||
|
});
|
||||||
|
|
||||||
|
child.on('close', (code) => {
|
||||||
|
if (code === 0) {
|
||||||
|
resolve({ success: true });
|
||||||
|
} else {
|
||||||
|
resolve({ success: false, error: stderr || 'Logout command failed' });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
child.on('error', (err) => {
|
||||||
|
resolve({ success: false, error: err.message });
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!logoutResult.success) {
|
||||||
|
// CLI logout failed, create marker file as fallback
|
||||||
|
const automakerDir = path.join(process.cwd(), '.automaker');
|
||||||
|
const markerPath = path.join(automakerDir, '.coderabbit-disconnected');
|
||||||
|
|
||||||
|
if (!fs.existsSync(automakerDir)) {
|
||||||
|
fs.mkdirSync(automakerDir, { recursive: true });
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.writeFileSync(
|
||||||
|
markerPath,
|
||||||
|
JSON.stringify({
|
||||||
|
disconnectedAt: new Date().toISOString(),
|
||||||
|
message: 'CodeRabbit CLI is disconnected from the app',
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// CLI not installed, just create marker file
|
||||||
|
const automakerDir = path.join(process.cwd(), '.automaker');
|
||||||
|
const markerPath = path.join(automakerDir, '.coderabbit-disconnected');
|
||||||
|
|
||||||
|
if (!fs.existsSync(automakerDir)) {
|
||||||
|
fs.mkdirSync(automakerDir, { recursive: true });
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.writeFileSync(
|
||||||
|
markerPath,
|
||||||
|
JSON.stringify({
|
||||||
|
disconnectedAt: new Date().toISOString(),
|
||||||
|
message: 'CodeRabbit CLI is disconnected from the app',
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
message: 'Successfully signed out from CodeRabbit CLI',
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Deauth CodeRabbit failed');
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: getErrorMessage(error),
|
||||||
|
message: 'Failed to sign out from CodeRabbit CLI',
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
189
apps/server/src/routes/setup/routes/opencode-models.ts
Normal file
189
apps/server/src/routes/setup/routes/opencode-models.ts
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
/**
|
||||||
|
* OpenCode Dynamic Models API Routes
|
||||||
|
*
|
||||||
|
* Provides endpoints for:
|
||||||
|
* - GET /api/setup/opencode/models - Get available models (cached or refreshed)
|
||||||
|
* - POST /api/setup/opencode/models/refresh - Force refresh models from CLI
|
||||||
|
* - GET /api/setup/opencode/providers - Get authenticated providers
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import {
|
||||||
|
OpencodeProvider,
|
||||||
|
type OpenCodeProviderInfo,
|
||||||
|
} from '../../../providers/opencode-provider.js';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
import type { ModelDefinition } from '@automaker/types';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
|
||||||
|
const logger = createLogger('OpenCodeModelsRoute');
|
||||||
|
|
||||||
|
// Singleton provider instance for caching
|
||||||
|
let providerInstance: OpencodeProvider | null = null;
|
||||||
|
|
||||||
|
function getProvider(): OpencodeProvider {
|
||||||
|
if (!providerInstance) {
|
||||||
|
providerInstance = new OpencodeProvider();
|
||||||
|
}
|
||||||
|
return providerInstance;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Response type for models endpoint
|
||||||
|
*/
|
||||||
|
interface ModelsResponse {
|
||||||
|
success: boolean;
|
||||||
|
models?: ModelDefinition[];
|
||||||
|
count?: number;
|
||||||
|
cached?: boolean;
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Response type for providers endpoint
|
||||||
|
*/
|
||||||
|
interface ProvidersResponse {
|
||||||
|
success: boolean;
|
||||||
|
providers?: OpenCodeProviderInfo[];
|
||||||
|
authenticated?: OpenCodeProviderInfo[];
|
||||||
|
error?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates handler for GET /api/setup/opencode/models
|
||||||
|
*
|
||||||
|
* Returns currently available models (from cache if available).
|
||||||
|
* Query params:
|
||||||
|
* - refresh=true: Force refresh from CLI before returning
|
||||||
|
*
|
||||||
|
* Note: If cache is empty, this will trigger a refresh to get dynamic models.
|
||||||
|
*/
|
||||||
|
export function createGetOpencodeModelsHandler() {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const provider = getProvider();
|
||||||
|
const forceRefresh = req.query.refresh === 'true';
|
||||||
|
|
||||||
|
let models: ModelDefinition[];
|
||||||
|
let cached = true;
|
||||||
|
|
||||||
|
if (forceRefresh) {
|
||||||
|
models = await provider.refreshModels();
|
||||||
|
cached = false;
|
||||||
|
} else {
|
||||||
|
// Check if we have cached models
|
||||||
|
const cachedModels = provider.getAvailableModels();
|
||||||
|
|
||||||
|
// If cache only has default models (provider.hasCachedModels() would be false),
|
||||||
|
// trigger a refresh to get dynamic models
|
||||||
|
if (!provider.hasCachedModels()) {
|
||||||
|
models = await provider.refreshModels();
|
||||||
|
cached = false;
|
||||||
|
} else {
|
||||||
|
models = cachedModels;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const response: ModelsResponse = {
|
||||||
|
success: true,
|
||||||
|
models,
|
||||||
|
count: models.length,
|
||||||
|
cached,
|
||||||
|
};
|
||||||
|
|
||||||
|
res.json(response);
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Get OpenCode models failed');
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: getErrorMessage(error),
|
||||||
|
} as ModelsResponse);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates handler for POST /api/setup/opencode/models/refresh
|
||||||
|
*
|
||||||
|
* Forces a refresh of models from the OpenCode CLI.
|
||||||
|
*/
|
||||||
|
export function createRefreshOpencodeModelsHandler() {
|
||||||
|
return async (_req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const provider = getProvider();
|
||||||
|
const models = await provider.refreshModels();
|
||||||
|
|
||||||
|
const response: ModelsResponse = {
|
||||||
|
success: true,
|
||||||
|
models,
|
||||||
|
count: models.length,
|
||||||
|
cached: false,
|
||||||
|
};
|
||||||
|
|
||||||
|
res.json(response);
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Refresh OpenCode models failed');
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: getErrorMessage(error),
|
||||||
|
} as ModelsResponse);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates handler for GET /api/setup/opencode/providers
|
||||||
|
*
|
||||||
|
* Returns authenticated providers from OpenCode CLI.
|
||||||
|
* This calls `opencode auth list` to get provider status.
|
||||||
|
*/
|
||||||
|
export function createGetOpencodeProvidersHandler() {
|
||||||
|
return async (_req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const provider = getProvider();
|
||||||
|
const providers = await provider.fetchAuthenticatedProviders();
|
||||||
|
|
||||||
|
// Filter to only authenticated providers
|
||||||
|
const authenticated = providers.filter((p) => p.authenticated);
|
||||||
|
|
||||||
|
const response: ProvidersResponse = {
|
||||||
|
success: true,
|
||||||
|
providers,
|
||||||
|
authenticated,
|
||||||
|
};
|
||||||
|
|
||||||
|
res.json(response);
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Get OpenCode providers failed');
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: getErrorMessage(error),
|
||||||
|
} as ProvidersResponse);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates handler for POST /api/setup/opencode/cache/clear
|
||||||
|
*
|
||||||
|
* Clears the model cache, forcing a fresh fetch on next access.
|
||||||
|
*/
|
||||||
|
export function createClearOpencodeCacheHandler() {
|
||||||
|
return async (_req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const provider = getProvider();
|
||||||
|
provider.clearModelCache();
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
message: 'OpenCode model cache cleared',
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Clear OpenCode cache failed');
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: getErrorMessage(error),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user