mirror of
https://github.com/AutoMaker-Org/automaker.git
synced 2026-03-17 10:03:08 +00:00
Compare commits
840 Commits
v0.9.0
...
feature/pu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa345a50ac | ||
|
|
0e020f7e4a | ||
|
|
0a5540c9a2 | ||
|
|
7df2182818 | ||
|
|
ee52333636 | ||
|
|
47bd7a76cf | ||
|
|
ae10dea2bf | ||
|
|
be4153c374 | ||
|
|
a144a63c51 | ||
|
|
205f662022 | ||
|
|
53d07fefb8 | ||
|
|
2d907938cc | ||
|
|
15ca1eb6d3 | ||
|
|
4ee160fae4 | ||
|
|
4ba0026aa1 | ||
|
|
983eb21faa | ||
|
|
df9a6314da | ||
|
|
6903d3c508 | ||
|
|
5c441f2313 | ||
|
|
00f9891237 | ||
|
|
d30296d559 | ||
|
|
e6e04d57bc | ||
|
|
829c16181b | ||
|
|
13261b7e8c | ||
|
|
854ba6ec74 | ||
|
|
bddf1a4bf8 | ||
|
|
887e2ea76b | ||
|
|
dd4c738e91 | ||
|
|
43c19c70ca | ||
|
|
627580a8f0 | ||
|
|
a2d5c1d546 | ||
|
|
6b9946df95 | ||
|
|
cb99c4b4e8 | ||
|
|
9af63bc1ef | ||
|
|
17a99a0e20 | ||
|
|
f4e87d4c25 | ||
|
|
c7f515adde | ||
|
|
1df778a9db | ||
|
|
cb44f8a717 | ||
|
|
7fcf3c1e1f | ||
|
|
de021f96bf | ||
|
|
8bb10632b1 | ||
|
|
06ef4f883f | ||
|
|
7e84591ef1 | ||
|
|
efcdd849b9 | ||
|
|
dee770c2ab | ||
|
|
f7b3f75163 | ||
|
|
b5ad77b0f9 | ||
|
|
98b925b821 | ||
|
|
a09a2c76ae | ||
|
|
b9653d6338 | ||
|
|
44ef2084cf | ||
|
|
57446b4fba | ||
|
|
fa799d3cb5 | ||
|
|
78ec389477 | ||
|
|
f06088a062 | ||
|
|
8af1b8bd08 | ||
|
|
d5340fd1a4 | ||
|
|
aa940d44ff | ||
|
|
381698b048 | ||
|
|
30fce3f746 | ||
|
|
4a8c6b0eba | ||
|
|
416ef3a394 | ||
|
|
2805c0ea53 | ||
|
|
727a7a5b9d | ||
|
|
46dd219d15 | ||
|
|
67dd628115 | ||
|
|
ab5d6a0e54 | ||
|
|
0b03e70f1d | ||
|
|
434792a2ef | ||
|
|
462dbf1522 | ||
|
|
eed5e20438 | ||
|
|
bea26a6b61 | ||
|
|
e9802ac00c | ||
|
|
41014f6ab6 | ||
|
|
ac2e8cfa88 | ||
|
|
7d5bc722fa | ||
|
|
7765a12868 | ||
|
|
dfe6920df9 | ||
|
|
525b2f82b6 | ||
|
|
f459b73cb5 | ||
|
|
a935229031 | ||
|
|
a3a5c9e2cb | ||
|
|
1662c6bf0b | ||
|
|
a08ba1b517 | ||
|
|
8226699734 | ||
|
|
d4439fafa0 | ||
|
|
6f1325f3ee | ||
|
|
d4f68b659b | ||
|
|
ad6ce738b4 | ||
|
|
67ebf8c14b | ||
|
|
8ed13564f6 | ||
|
|
09507bff67 | ||
|
|
c70344156d | ||
|
|
8542a32f4f | ||
|
|
0745832d1e | ||
|
|
0f0f5159d2 | ||
|
|
bcc854234c | ||
|
|
5ffbfb3217 | ||
|
|
7c89923a6e | ||
|
|
63b1a353d9 | ||
|
|
49bdaaae71 | ||
|
|
28224e1051 | ||
|
|
df10bcd6df | ||
|
|
0ed4494992 | ||
|
|
43309e383f | ||
|
|
efd4284c10 | ||
|
|
473f935c90 | ||
|
|
7fd3d61a59 | ||
|
|
7bc1f68699 | ||
|
|
ade22ef258 | ||
|
|
31f8afc115 | ||
|
|
071af1b5c3 | ||
|
|
1b32a6bc3a | ||
|
|
a0484624b7 | ||
|
|
0383f85507 | ||
|
|
1a7dd5d1eb | ||
|
|
afa60399dc | ||
|
|
1b39e25497 | ||
|
|
828d0a0148 | ||
|
|
18624d12ce | ||
|
|
71a0309a0b | ||
|
|
e0f785aa99 | ||
|
|
2aa156ecbf | ||
|
|
94a8e09516 | ||
|
|
78072550c7 | ||
|
|
0cd149f2e3 | ||
|
|
2e577bb230 | ||
|
|
4f00b41cb0 | ||
|
|
ba45587a0a | ||
|
|
4912d37990 | ||
|
|
b24839bc49 | ||
|
|
e3a1c8c312 | ||
|
|
8f245e7757 | ||
|
|
cbb45b6612 | ||
|
|
25fa6fd616 | ||
|
|
ec5179eee9 | ||
|
|
2fac438cde | ||
|
|
5dca97dab4 | ||
|
|
58facb114c | ||
|
|
8387b7669d | ||
|
|
18fd1c6caa | ||
|
|
6029e95403 | ||
|
|
1eb28206c5 | ||
|
|
bc9dae0322 | ||
|
|
3bcdc883e6 | ||
|
|
c92c8e96b7 | ||
|
|
b73ef9f801 | ||
|
|
70fc03431c | ||
|
|
a0ea65d483 | ||
|
|
ef544e70c9 | ||
|
|
152cf00735 | ||
|
|
094f0809d7 | ||
|
|
61d43106c8 | ||
|
|
9c304eeec3 | ||
|
|
3563dd55da | ||
|
|
220c8e4ddf | ||
|
|
f97453484f | ||
|
|
835ffe3185 | ||
|
|
3b361cb0b9 | ||
|
|
d06d25b1b5 | ||
|
|
84570842d3 | ||
|
|
63cae19aec | ||
|
|
c9e721bda7 | ||
|
|
d4b7a0c57d | ||
|
|
0b6e84ec6e | ||
|
|
e9c2afcc02 | ||
|
|
88864ad6bc | ||
|
|
0aef72540e | ||
|
|
aad3ff2cdf | ||
|
|
ebc7987988 | ||
|
|
3ccea7a67b | ||
|
|
b37a287c9c | ||
|
|
45f6f17eb0 | ||
|
|
29b3eef500 | ||
|
|
010e516b0e | ||
|
|
00e4712ae7 | ||
|
|
4b4ae04fbe | ||
|
|
04775af561 | ||
|
|
b8fa7fc579 | ||
|
|
7fb0d0f2ca | ||
|
|
f15725f28a | ||
|
|
7d7d152d4e | ||
|
|
07f777da22 | ||
|
|
b10501ea79 | ||
|
|
1a460c301a | ||
|
|
c1f480fe49 | ||
|
|
ef3f8de33b | ||
|
|
d379bf412a | ||
|
|
cf35ca8650 | ||
|
|
4f1555f196 | ||
|
|
5aace0ce0f | ||
|
|
e439d8a632 | ||
|
|
b7c6b8bfc6 | ||
|
|
a60904bd51 | ||
|
|
d7c3337330 | ||
|
|
c848306e4c | ||
|
|
f0042312d0 | ||
|
|
e876d177b8 | ||
|
|
8caec15199 | ||
|
|
7fe9aacb09 | ||
|
|
f55c985634 | ||
|
|
38e8a4c4ea | ||
|
|
f3ce5ce8ab | ||
|
|
99de7813c9 | ||
|
|
2de3ae69d4 | ||
|
|
0b4e9573ed | ||
|
|
d7ad87bd1b | ||
|
|
615823652c | ||
|
|
2f883bad20 | ||
|
|
45706990df | ||
|
|
c9c406dd21 | ||
|
|
014736bc1d | ||
|
|
c05359c787 | ||
|
|
a32cb08d1e | ||
|
|
08d1497cbe | ||
|
|
5c335641fa | ||
|
|
0fb471ca15 | ||
|
|
b65037d995 | ||
|
|
5eda2c9b2b | ||
|
|
006152554b | ||
|
|
3b56d553c9 | ||
|
|
375f9ea9d4 | ||
|
|
bf25a7a4e5 | ||
|
|
5171abc37f | ||
|
|
9c8265c4e5 | ||
|
|
ef779daedf | ||
|
|
011ac404bb | ||
|
|
9587f13de5 | ||
|
|
08dc90b378 | ||
|
|
80ef21c8d0 | ||
|
|
98d98cc056 | ||
|
|
2a24377870 | ||
|
|
895e4c28ba | ||
|
|
ebf2fcadd6 | ||
|
|
019da6b77a | ||
|
|
605d9658d9 | ||
|
|
906f471521 | ||
|
|
a10ddadbde | ||
|
|
3399d48823 | ||
|
|
7f5c5e864d | ||
|
|
35d2d41821 | ||
|
|
6a3993385e | ||
|
|
df7024f4ea | ||
|
|
4485c49c9b | ||
|
|
7a5cb38a37 | ||
|
|
c9833b67a0 | ||
|
|
0f11ee2212 | ||
|
|
74b301c2d1 | ||
|
|
81ee2d1399 | ||
|
|
f025ced035 | ||
|
|
4f07948712 | ||
|
|
07f95ae13b | ||
|
|
8dd6ab2161 | ||
|
|
b5143f4b00 | ||
|
|
f5efa857ca | ||
|
|
c401bf4e63 | ||
|
|
43d5ec9aed | ||
|
|
f8108b1a6c | ||
|
|
076ab14a5e | ||
|
|
a4c43b99a5 | ||
|
|
0f00180c50 | ||
|
|
22853c988a | ||
|
|
e52837cbe7 | ||
|
|
d12e0705f0 | ||
|
|
a3e536b8e6 | ||
|
|
43661e5a6e | ||
|
|
1b2bf0df3f | ||
|
|
b1060c6a11 | ||
|
|
db87e83aed | ||
|
|
92b1fb3725 | ||
|
|
d7f86d142a | ||
|
|
bbe669cdf2 | ||
|
|
8e13245aab | ||
|
|
cec5f91a86 | ||
|
|
ed92d4fd80 | ||
|
|
a6190f71b3 | ||
|
|
d04934359a | ||
|
|
7246debb69 | ||
|
|
066ffe5639 | ||
|
|
7bf02b64fa | ||
|
|
a3c62e8358 | ||
|
|
1ecb97b71c | ||
|
|
1e87b73dfd | ||
|
|
5a3dac1533 | ||
|
|
f3b16ad8ce | ||
|
|
140c444e6f | ||
|
|
907c1d65b3 | ||
|
|
92f2702f3b | ||
|
|
735786701f | ||
|
|
900bbb5e80 | ||
|
|
bc3e3dad1c | ||
|
|
d8fa5c4cd1 | ||
|
|
f005c30017 | ||
|
|
4012a2964a | ||
|
|
0b92349890 | ||
|
|
51a75ae589 | ||
|
|
650edd69ca | ||
|
|
46abd34444 | ||
|
|
5cf817e9de | ||
|
|
42ee4f211d | ||
|
|
372cfe6982 | ||
|
|
1430fb6926 | ||
|
|
9e15f3609a | ||
|
|
b34ffd9565 | ||
|
|
ac9f33bd2b | ||
|
|
269b1c9478 | ||
|
|
7bc7918cc6 | ||
|
|
860d6836b9 | ||
|
|
5281b81ddf | ||
|
|
7a33940816 | ||
|
|
ee4464bdad | ||
|
|
7e1095b773 | ||
|
|
9d297c650a | ||
|
|
68d78f2f5b | ||
|
|
fb6d6bbf2f | ||
|
|
c8ed3fafce | ||
|
|
5939c5d20b | ||
|
|
ad6fc01045 | ||
|
|
ea34f304cb | ||
|
|
53ad78dfc8 | ||
|
|
26b819291f | ||
|
|
01859f3a9a | ||
|
|
a4214276d7 | ||
|
|
d09da4af20 | ||
|
|
afb6e14811 | ||
|
|
c65f931326 | ||
|
|
f480386905 | ||
|
|
7773db559d | ||
|
|
655f254538 | ||
|
|
b4be3c11e2 | ||
|
|
433e6016c3 | ||
|
|
02dfda108e | ||
|
|
57ce198ae9 | ||
|
|
733ca15e15 | ||
|
|
e110c058a2 | ||
|
|
0fdda11b09 | ||
|
|
0155da0be5 | ||
|
|
41b127ebf3 | ||
|
|
e7e83a30d9 | ||
|
|
40950b5fce | ||
|
|
3f05735be1 | ||
|
|
05f0ceceb6 | ||
|
|
28d50aa017 | ||
|
|
103c6bc8a0 | ||
|
|
6c47068f71 | ||
|
|
a9616ff309 | ||
|
|
4fa0923ff8 | ||
|
|
c3cecc18f2 | ||
|
|
3fcda8abfc | ||
|
|
a45ee59b7d | ||
|
|
662f854203 | ||
|
|
f2860d9366 | ||
|
|
6eb7acb6d4 | ||
|
|
4ab927a5fb | ||
|
|
02de3df3df | ||
|
|
b73885e04a | ||
|
|
afa93dde0d | ||
|
|
aac59c2b3a | ||
|
|
c3e7e57968 | ||
|
|
c55654b737 | ||
|
|
7bb97953a7 | ||
|
|
2214c2700b | ||
|
|
7bee54717c | ||
|
|
5ab53afd7f | ||
|
|
3ebd67f35f | ||
|
|
641bbde877 | ||
|
|
7c80249bbf | ||
|
|
a73a57b9a4 | ||
|
|
db71dc9aa5 | ||
|
|
a8ddd07442 | ||
|
|
2165223b49 | ||
|
|
3bde3d2732 | ||
|
|
900a312c92 | ||
|
|
69ff8df7c1 | ||
|
|
4f584f9a89 | ||
|
|
47a6033b43 | ||
|
|
a1f234c7e2 | ||
|
|
8facdc66a9 | ||
|
|
2ab78dd590 | ||
|
|
c14a40f7f8 | ||
|
|
8dd5858299 | ||
|
|
76eb3a2ac2 | ||
|
|
179c5ae9c2 | ||
|
|
8c356d7c36 | ||
|
|
a863dcc11d | ||
|
|
cf60f84f89 | ||
|
|
47e6ed6a17 | ||
|
|
d266c98e48 | ||
|
|
628e464b74 | ||
|
|
17d42e7931 | ||
|
|
5119ee4222 | ||
|
|
b039b745be | ||
|
|
02a7a54736 | ||
|
|
43481c2bab | ||
|
|
d7f6e72a9e | ||
|
|
82e22b4362 | ||
|
|
0d9259473e | ||
|
|
ea3930cf3d | ||
|
|
d97c4b7b57 | ||
|
|
2fac2ca4bb | ||
|
|
9bb52f1ded | ||
|
|
f987fc1f10 | ||
|
|
63b8eb0991 | ||
|
|
a52c0461e5 | ||
|
|
e73c92b031 | ||
|
|
09151aa3c8 | ||
|
|
d6300f33ca | ||
|
|
4b0d1399b1 | ||
|
|
55a34a9f1f | ||
|
|
c4652190eb | ||
|
|
af95dae73a | ||
|
|
1c1d9d30a7 | ||
|
|
3faebfa3fe | ||
|
|
d0eaf0e51d | ||
|
|
cf3ee6aec6 | ||
|
|
da80729f56 | ||
|
|
9ad58e1a74 | ||
|
|
55b17a7a11 | ||
|
|
2854e24e84 | ||
|
|
b91d84ee84 | ||
|
|
30a2c3d740 | ||
|
|
e3213b1426 | ||
|
|
bfc23cdfa1 | ||
|
|
8b5da3195b | ||
|
|
0c452a3ebc | ||
|
|
cfc5530d1c | ||
|
|
749fb3a5c1 | ||
|
|
dd26de9f55 | ||
|
|
b6cb926cbe | ||
|
|
eb30ef71f9 | ||
|
|
75fe579e93 | ||
|
|
8ab9dc5a11 | ||
|
|
96202d4bc2 | ||
|
|
f68aee6a19 | ||
|
|
7795d81183 | ||
|
|
0c053dab48 | ||
|
|
1ede7e7e6a | ||
|
|
980006d40e | ||
|
|
ef2dcbacd4 | ||
|
|
505a2b1e0b | ||
|
|
2e57553639 | ||
|
|
f37812247d | ||
|
|
484d4c65d5 | ||
|
|
327aef89a2 | ||
|
|
d96f369b73 | ||
|
|
f0e655f49a | ||
|
|
d22deabe79 | ||
|
|
518c81815e | ||
|
|
01652d0d11 | ||
|
|
44e665f1bf | ||
|
|
5b1e0105f4 | ||
|
|
832d10e133 | ||
|
|
7b7ac72c14 | ||
|
|
9137f0e75f | ||
|
|
b66efae5b7 | ||
|
|
2a8706e714 | ||
|
|
174c02cb79 | ||
|
|
a7f7898ee4 | ||
|
|
fdad82bf88 | ||
|
|
b0b49764b9 | ||
|
|
e10cb83adc | ||
|
|
b8875f71a5 | ||
|
|
4186b80a82 | ||
|
|
7eae0215f2 | ||
|
|
4cd84a4734 | ||
|
|
044c3d50d1 | ||
|
|
a1de0a78a0 | ||
|
|
fef9639e01 | ||
|
|
aef479218d | ||
|
|
ded5ecf4e9 | ||
|
|
a01f299597 | ||
|
|
21c9e88a86 | ||
|
|
af17f6e36f | ||
|
|
e69a2ad722 | ||
|
|
0480f6ccd6 | ||
|
|
24042d20c2 | ||
|
|
9c3b3a4104 | ||
|
|
17e2cdfc85 | ||
|
|
466c34afd4 | ||
|
|
b9567f5904 | ||
|
|
c2cf8ae892 | ||
|
|
3aa3c10ea4 | ||
|
|
5cd4183a7b | ||
|
|
2d9e38ad99 | ||
|
|
93d73f6d26 | ||
|
|
5209395a74 | ||
|
|
ef6b9ac2d2 | ||
|
|
92afbeb6bd | ||
|
|
bbdc11ce47 | ||
|
|
545bf2045d | ||
|
|
a0471098fa | ||
|
|
3320b40d15 | ||
|
|
bac5e1c220 | ||
|
|
33fa138d21 | ||
|
|
bc09a22e1f | ||
|
|
b771b51842 | ||
|
|
1a7bf27ead | ||
|
|
f3b00d0f78 | ||
|
|
c747baaee2 | ||
|
|
1322722db2 | ||
|
|
aa35eb3d3a | ||
|
|
616e2ef75f | ||
|
|
d98cae124f | ||
|
|
26aaef002d | ||
|
|
09bb59d090 | ||
|
|
2f38ffe2d5 | ||
|
|
12fa9d858d | ||
|
|
c4e1a58e0d | ||
|
|
8661f33c6d | ||
|
|
5c24ca2220 | ||
|
|
14559354dd | ||
|
|
3bf9dbd43a | ||
|
|
bd3999416b | ||
|
|
cc9f7d48c8 | ||
|
|
6bb0461be7 | ||
|
|
16ef026b38 | ||
|
|
50ed405c4a | ||
|
|
5407e1a9ff | ||
|
|
5436b18f70 | ||
|
|
8b7700364d | ||
|
|
3bdf3cbb5c | ||
|
|
45d9c9a5d8 | ||
|
|
6a23e6ce78 | ||
|
|
4e53215104 | ||
|
|
2899b6d416 | ||
|
|
b263cc615e | ||
|
|
97b0028919 | ||
|
|
fd1727a443 | ||
|
|
597cb9bfae | ||
|
|
c2430e5bd3 | ||
|
|
68df8efd10 | ||
|
|
c0d64bc994 | ||
|
|
6237f1a0fe | ||
|
|
30c50d9b78 | ||
|
|
03516ac09e | ||
|
|
5e5a136f1f | ||
|
|
98c50d44a4 | ||
|
|
0e9369816f | ||
|
|
be63a59e9c | ||
|
|
dbb84aba23 | ||
|
|
9819d2e91c | ||
|
|
4c24ba5a8b | ||
|
|
e67cab1e07 | ||
|
|
132b8f7529 | ||
|
|
d651e9d8d6 | ||
|
|
92f14508aa | ||
|
|
842b059fac | ||
|
|
49f9ecc168 | ||
|
|
e02fd889c2 | ||
|
|
52a821d3bb | ||
|
|
becd79f1e3 | ||
|
|
883ad2a04b | ||
|
|
bf93cdf0c4 | ||
|
|
c0ea1c736a | ||
|
|
8b448b9481 | ||
|
|
12f2b9f2b3 | ||
|
|
017ff3ca0a | ||
|
|
bcec178bbe | ||
|
|
e3347c7b9c | ||
|
|
6529446281 | ||
|
|
379551c40e | ||
|
|
7465017600 | ||
|
|
874c5a36de | ||
|
|
03436103d1 | ||
|
|
cb544e0011 | ||
|
|
df23c9e6ab | ||
|
|
52cc82fb3f | ||
|
|
d9571bfb8d | ||
|
|
07d800b589 | ||
|
|
ec042de69c | ||
|
|
585ae32c32 | ||
|
|
a89ba04109 | ||
|
|
05a3b95d75 | ||
|
|
0e269ca15d | ||
|
|
fd03cb4afa | ||
|
|
d6c5c93fe5 | ||
|
|
1abf219230 | ||
|
|
3a2ba6dbfe | ||
|
|
8fa8ba0a16 | ||
|
|
285f526e0c | ||
|
|
bd68b497ac | ||
|
|
06b047cfcb | ||
|
|
361cb06bf0 | ||
|
|
3170e22383 | ||
|
|
c585cee12f | ||
|
|
9dbec7281a | ||
|
|
c2fed78733 | ||
|
|
5fe7bcd378 | ||
|
|
20caa424fc | ||
|
|
c4e0a7cc96 | ||
|
|
d1219a225c | ||
|
|
3411256366 | ||
|
|
d08ef472a3 | ||
|
|
d81997d24b | ||
|
|
845674128e | ||
|
|
2bc931a8b0 | ||
|
|
e57549c06e | ||
|
|
241fd0b252 | ||
|
|
164acc1b4e | ||
|
|
78e5ddb4a8 | ||
|
|
43904cdb02 | ||
|
|
7ea1383e10 | ||
|
|
425e38811f | ||
|
|
f6bda66ed4 | ||
|
|
0df7e4a33d | ||
|
|
41ad717b8e | ||
|
|
fec5f88d91 | ||
|
|
724858d215 | ||
|
|
2b93afbd43 | ||
|
|
ca0f3ecedf | ||
|
|
ee0d0c6c59 | ||
|
|
ac38e85f3c | ||
|
|
ca3286a374 | ||
|
|
0898578c11 | ||
|
|
07593f8704 | ||
|
|
3f8a8db7a5 | ||
|
|
13eead3855 | ||
|
|
cb910feae9 | ||
|
|
c75f9a29cb | ||
|
|
3c5e453b01 | ||
|
|
63e0ffac42 | ||
|
|
d0155f28c8 | ||
|
|
27ca08d98a | ||
|
|
df99950475 | ||
|
|
6a85073d94 | ||
|
|
7b73ff34f1 | ||
|
|
8419b12f3f | ||
|
|
f1a5bcd17a | ||
|
|
28d8a4cc9e | ||
|
|
7108cdd2ca | ||
|
|
e7bfb19203 | ||
|
|
beac823472 | ||
|
|
c7fac3d9e6 | ||
|
|
3689eb969d | ||
|
|
5e330b7691 | ||
|
|
5ec5fe82e6 | ||
|
|
ee13bf9a8f | ||
|
|
219af28afc | ||
|
|
b64025b134 | ||
|
|
51e4e8489a | ||
|
|
bb70d04b88 | ||
|
|
32f6c6d6eb | ||
|
|
b6688e630e | ||
|
|
073f6d5793 | ||
|
|
9153b06f09 | ||
|
|
6cb2af8757 | ||
|
|
ca3b013a7b | ||
|
|
abde1ba40a | ||
|
|
b04659fb56 | ||
|
|
74ee30d5db | ||
|
|
a300466ca9 | ||
|
|
9311f2e62a | ||
|
|
67245158ea | ||
|
|
520d9a945c | ||
|
|
fa3ead0e8d | ||
|
|
253ab94646 | ||
|
|
fbb3f697e1 | ||
|
|
1a1517dffb | ||
|
|
690cf1f281 | ||
|
|
6f55da46ac | ||
|
|
57453966ac | ||
|
|
298acc9f89 | ||
|
|
f4390bc82f | ||
|
|
62af2031f6 | ||
|
|
0ddd672e0e | ||
|
|
7ef525effa | ||
|
|
2303dcd133 | ||
|
|
cc4f39a6ab | ||
|
|
d4076ad0ce | ||
|
|
3bd8626d48 | ||
|
|
ff5915dd20 | ||
|
|
8500f71565 | ||
|
|
81bab1d8ab | ||
|
|
24a6633322 | ||
|
|
f073f6ecc3 | ||
|
|
2870ddb223 | ||
|
|
1578d02e70 | ||
|
|
bb710ada1a | ||
|
|
33ae860059 | ||
|
|
3de6d58af3 | ||
|
|
c8e66a866e | ||
|
|
c25efdc0d8 | ||
|
|
bde82492ae | ||
|
|
67f18021c3 | ||
|
|
6704293cb1 | ||
|
|
8f1740c0f5 | ||
|
|
62019d5916 | ||
|
|
e66283b1d6 | ||
|
|
a0d6d76626 | ||
|
|
c2f5c07038 | ||
|
|
419abf88dd | ||
|
|
b7596617ed | ||
|
|
26da99e834 | ||
|
|
2b33a0d322 | ||
|
|
c796adbae8 | ||
|
|
18d82b1bb1 | ||
|
|
0c68fcc8c8 | ||
|
|
e4458b8222 | ||
|
|
eb8ebe3ce0 | ||
|
|
0dc70addb6 | ||
|
|
f3f5d05349 | ||
|
|
0c4b833b07 | ||
|
|
029c5ca855 | ||
|
|
1f270edbe1 | ||
|
|
47c188d8f9 | ||
|
|
cca4638b71 | ||
|
|
19c12b7813 | ||
|
|
0261ec2892 | ||
|
|
5e4f5f86cd | ||
|
|
fbab1d323f | ||
|
|
8b19266c9a | ||
|
|
1b9d194dd1 | ||
|
|
74c793b6c6 | ||
|
|
d1222268c3 | ||
|
|
df7a0f8687 | ||
|
|
c7def000df | ||
|
|
e2394244f6 | ||
|
|
007830ec74 | ||
|
|
f721eb7152 | ||
|
|
e56db2362c | ||
|
|
d2c7a9e05d | ||
|
|
acce06b304 | ||
|
|
4ab54270db | ||
|
|
f50520c93f | ||
|
|
cebf57ffd3 | ||
|
|
6020219fda | ||
|
|
8094941385 | ||
|
|
9ce3cfee7d | ||
|
|
6184440441 | ||
|
|
0cff4cf510 | ||
|
|
b152f119c5 | ||
|
|
9f936c6968 | ||
|
|
b8531cf7e8 | ||
|
|
edcc4e789b | ||
|
|
20cc401238 | ||
|
|
70204a2d36 | ||
|
|
e38325c27f | ||
|
|
5e4b422315 | ||
|
|
6c5206daf4 | ||
|
|
ed65f70315 | ||
|
|
f41a42010c | ||
|
|
aa8caeaeb0 | ||
|
|
a0669d4262 | ||
|
|
a4a792c6b1 | ||
|
|
6842e4c7f7 | ||
|
|
6638c35945 | ||
|
|
53f5c2b2bb | ||
|
|
6e13cdd516 | ||
|
|
a48c67d271 | ||
|
|
43fc3de2e1 | ||
|
|
80081b60bf | ||
|
|
cbca9b68e6 | ||
|
|
b9b3695497 | ||
|
|
1b9acb1395 | ||
|
|
01cf81a105 | ||
|
|
6381ecaa37 | ||
|
|
6d267ce0fa | ||
|
|
8b0b565282 | ||
|
|
a046d1232e | ||
|
|
d724e782dd | ||
|
|
a266d85ecd | ||
|
|
a4a111fad0 | ||
|
|
2a98de85a8 | ||
|
|
fb3a8499f3 | ||
|
|
33dd9ae347 | ||
|
|
ac87594b5d | ||
|
|
32656a9662 | ||
|
|
785a4d2c3b | ||
|
|
41a6c7f712 | ||
|
|
7e5d915b60 | ||
|
|
8321c06e16 | ||
|
|
f60c18d31a | ||
|
|
e171b6a049 | ||
|
|
6e4b611662 | ||
|
|
7522e58fee | ||
|
|
317c21ffc0 | ||
|
|
9c5fe44617 | ||
|
|
7f79d9692c | ||
|
|
2d4ffc7514 | ||
|
|
5f3db1f25e | ||
|
|
7115460804 | ||
|
|
0db8808b2a | ||
|
|
cf3ed1dd8f | ||
|
|
da682e3993 | ||
|
|
4a59e901e6 | ||
|
|
8ed2fa07a0 | ||
|
|
385e7f5c1e | ||
|
|
861fff1aae | ||
|
|
09527b3b67 | ||
|
|
d98ff16c8f | ||
|
|
e902e8ea4c | ||
|
|
aeb5bd829f | ||
|
|
a92457b871 | ||
|
|
c24e6207d0 | ||
|
|
6c412cd367 | ||
|
|
89a960629a | ||
|
|
05d96a7d6e | ||
|
|
41144ff1fa | ||
|
|
360cddcb91 | ||
|
|
427832e72e | ||
|
|
27c60658f7 | ||
|
|
fa8ae149d3 | ||
|
|
0c19beb11c | ||
|
|
e34e4a59e9 | ||
|
|
7cc092cd59 | ||
|
|
51cd7156d2 | ||
|
|
1dc843d2d0 | ||
|
|
4040bef4b8 | ||
|
|
e64a850f57 | ||
|
|
555523df38 | ||
|
|
dd882139f3 | ||
|
|
a67b8c6109 | ||
|
|
134208dab6 | ||
|
|
887343d232 | ||
|
|
299b838400 | ||
|
|
c5d0a8be7d | ||
|
|
fe433a84c9 | ||
|
|
543aa7a27b | ||
|
|
36ddf0513b | ||
|
|
c99883e634 | ||
|
|
604f98b08f | ||
|
|
c5009a0333 | ||
|
|
99b05d35a2 | ||
|
|
a3ecc6fe02 | ||
|
|
fc20dd5ad4 | ||
|
|
eb94e4de72 | ||
|
|
0fa5fdd478 | ||
|
|
472342c246 | ||
|
|
71e03c2a13 | ||
|
|
c3403c033c | ||
|
|
2a87d55519 | ||
|
|
2d309f6833 | ||
|
|
7a2a3ef500 | ||
|
|
3ff9658723 | ||
|
|
c587947de6 | ||
|
|
a9403651d4 | ||
|
|
d2f64f10ff | ||
|
|
9fe5b485f8 | ||
|
|
927ce9121d |
108
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
108
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
name: Feature Request
|
||||||
|
description: Suggest a new feature or enhancement for Automaker
|
||||||
|
title: '[Feature]: '
|
||||||
|
labels: ['enhancement']
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Thanks for taking the time to suggest a feature! Please fill out the form below to help us understand your request.
|
||||||
|
|
||||||
|
- type: dropdown
|
||||||
|
id: feature-area
|
||||||
|
attributes:
|
||||||
|
label: Feature Area
|
||||||
|
description: Which area of Automaker does this feature relate to?
|
||||||
|
options:
|
||||||
|
- UI/UX (User Interface)
|
||||||
|
- Agent/AI
|
||||||
|
- Kanban Board
|
||||||
|
- Git/Worktree Management
|
||||||
|
- Project Management
|
||||||
|
- Settings/Configuration
|
||||||
|
- Documentation
|
||||||
|
- Performance
|
||||||
|
- Other
|
||||||
|
default: 0
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: dropdown
|
||||||
|
id: priority
|
||||||
|
attributes:
|
||||||
|
label: Priority
|
||||||
|
description: How important is this feature to your workflow?
|
||||||
|
options:
|
||||||
|
- Nice to have
|
||||||
|
- Would improve my workflow
|
||||||
|
- Critical for my use case
|
||||||
|
default: 0
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: problem-statement
|
||||||
|
attributes:
|
||||||
|
label: Problem Statement
|
||||||
|
description: Is your feature request related to a problem? Please describe the problem you're trying to solve.
|
||||||
|
placeholder: A clear and concise description of what the problem is. Ex. I'm always frustrated when...
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: proposed-solution
|
||||||
|
attributes:
|
||||||
|
label: Proposed Solution
|
||||||
|
description: Describe the solution you'd like to see implemented.
|
||||||
|
placeholder: A clear and concise description of what you want to happen.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: alternatives-considered
|
||||||
|
attributes:
|
||||||
|
label: Alternatives Considered
|
||||||
|
description: Describe any alternative solutions or workarounds you've considered.
|
||||||
|
placeholder: A clear and concise description of any alternative solutions or features you've considered.
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: use-cases
|
||||||
|
attributes:
|
||||||
|
label: Use Cases
|
||||||
|
description: Describe specific scenarios where this feature would be useful.
|
||||||
|
placeholder: |
|
||||||
|
1. When working on...
|
||||||
|
2. As a user who needs to...
|
||||||
|
3. In situations where...
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: mockups
|
||||||
|
attributes:
|
||||||
|
label: Mockups/Screenshots
|
||||||
|
description: If applicable, add mockups, wireframes, or screenshots to help illustrate your feature request.
|
||||||
|
placeholder: Drag and drop images here or paste image URLs
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: additional-context
|
||||||
|
attributes:
|
||||||
|
label: Additional Context
|
||||||
|
description: Add any other context, references, or examples about the feature request here.
|
||||||
|
placeholder: Any additional information that might be helpful...
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
- type: checkboxes
|
||||||
|
id: terms
|
||||||
|
attributes:
|
||||||
|
label: Checklist
|
||||||
|
options:
|
||||||
|
- label: I have searched existing issues to ensure this feature hasn't been requested already
|
||||||
|
required: true
|
||||||
|
- label: I have provided a clear description of the problem and proposed solution
|
||||||
|
required: true
|
||||||
21
.github/actions/setup-project/action.yml
vendored
21
.github/actions/setup-project/action.yml
vendored
@@ -25,25 +25,34 @@ runs:
|
|||||||
cache: 'npm'
|
cache: 'npm'
|
||||||
cache-dependency-path: package-lock.json
|
cache-dependency-path: package-lock.json
|
||||||
|
|
||||||
- name: Check for SSH URLs in lockfile
|
|
||||||
if: inputs.check-lockfile == 'true'
|
|
||||||
shell: bash
|
|
||||||
run: npm run lint:lockfile
|
|
||||||
|
|
||||||
- name: Configure Git for HTTPS
|
- name: Configure Git for HTTPS
|
||||||
shell: bash
|
shell: bash
|
||||||
# Convert SSH URLs to HTTPS for git dependencies (e.g., @electron/node-gyp)
|
# Convert SSH URLs to HTTPS for git dependencies (e.g., @electron/node-gyp)
|
||||||
# This is needed because SSH authentication isn't available in CI
|
# This is needed because SSH authentication isn't available in CI
|
||||||
run: git config --global url."https://github.com/".insteadOf "git@github.com:"
|
run: git config --global url."https://github.com/".insteadOf "git@github.com:"
|
||||||
|
|
||||||
|
- name: Auto-fix SSH URLs in lockfile
|
||||||
|
if: inputs.check-lockfile == 'true'
|
||||||
|
shell: bash
|
||||||
|
# Auto-fix any git+ssh:// URLs in package-lock.json before linting
|
||||||
|
# This handles cases where npm reintroduces SSH URLs for git dependencies
|
||||||
|
run: node scripts/fix-lockfile-urls.mjs
|
||||||
|
|
||||||
|
- name: Check for SSH URLs in lockfile
|
||||||
|
if: inputs.check-lockfile == 'true'
|
||||||
|
shell: bash
|
||||||
|
run: npm run lint:lockfile
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
shell: bash
|
shell: bash
|
||||||
# Use npm install instead of npm ci to correctly resolve platform-specific
|
# Use npm install instead of npm ci to correctly resolve platform-specific
|
||||||
# optional dependencies (e.g., @tailwindcss/oxide, lightningcss binaries)
|
# optional dependencies (e.g., @tailwindcss/oxide, lightningcss binaries)
|
||||||
# Skip scripts to avoid electron-builder install-app-deps which uses too much memory
|
# Skip scripts to avoid electron-builder install-app-deps which uses too much memory
|
||||||
run: npm install --ignore-scripts
|
# Use --force to allow platform-specific dev dependencies like dmg-license on non-darwin platforms
|
||||||
|
run: npm install --ignore-scripts --force
|
||||||
|
|
||||||
- name: Install Linux native bindings
|
- name: Install Linux native bindings
|
||||||
|
if: runner.os == 'Linux'
|
||||||
shell: bash
|
shell: bash
|
||||||
# Workaround for npm optional dependencies bug (npm/cli#4828)
|
# Workaround for npm optional dependencies bug (npm/cli#4828)
|
||||||
# Explicitly install Linux bindings needed for build tools
|
# Explicitly install Linux bindings needed for build tools
|
||||||
|
|||||||
89
.github/workflows/e2e-tests.yml
vendored
89
.github/workflows/e2e-tests.yml
vendored
@@ -37,7 +37,14 @@ jobs:
|
|||||||
git config --global user.email "ci@example.com"
|
git config --global user.email "ci@example.com"
|
||||||
|
|
||||||
- name: Start backend server
|
- name: Start backend server
|
||||||
run: npm run start --workspace=apps/server &
|
run: |
|
||||||
|
echo "Starting backend server..."
|
||||||
|
# Start server in background and save PID
|
||||||
|
npm run start --workspace=apps/server > backend.log 2>&1 &
|
||||||
|
SERVER_PID=$!
|
||||||
|
echo "Server started with PID: $SERVER_PID"
|
||||||
|
echo "SERVER_PID=$SERVER_PID" >> $GITHUB_ENV
|
||||||
|
|
||||||
env:
|
env:
|
||||||
PORT: 3008
|
PORT: 3008
|
||||||
NODE_ENV: test
|
NODE_ENV: test
|
||||||
@@ -53,21 +60,70 @@ jobs:
|
|||||||
- name: Wait for backend server
|
- name: Wait for backend server
|
||||||
run: |
|
run: |
|
||||||
echo "Waiting for backend server to be ready..."
|
echo "Waiting for backend server to be ready..."
|
||||||
|
|
||||||
|
# Check if server process is running
|
||||||
|
if [ -z "$SERVER_PID" ]; then
|
||||||
|
echo "ERROR: Server PID not found in environment"
|
||||||
|
cat backend.log 2>/dev/null || echo "No backend log found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if process is actually running
|
||||||
|
if ! kill -0 $SERVER_PID 2>/dev/null; then
|
||||||
|
echo "ERROR: Server process $SERVER_PID is not running!"
|
||||||
|
echo "=== Backend logs ==="
|
||||||
|
cat backend.log
|
||||||
|
echo ""
|
||||||
|
echo "=== Recent system logs ==="
|
||||||
|
dmesg 2>/dev/null | tail -20 || echo "No dmesg available"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Wait for health endpoint
|
||||||
for i in {1..60}; do
|
for i in {1..60}; do
|
||||||
if curl -s -f http://localhost:3008/api/health > /dev/null 2>&1; then
|
if curl -s -f http://localhost:3008/api/health > /dev/null 2>&1; then
|
||||||
echo "Backend server is ready!"
|
echo "Backend server is ready!"
|
||||||
curl -s http://localhost:3008/api/health | jq . 2>/dev/null || echo "Health check response: $(curl -s http://localhost:3008/api/health 2>/dev/null || echo 'No response')"
|
echo "=== Backend logs ==="
|
||||||
|
cat backend.log
|
||||||
|
echo ""
|
||||||
|
echo "Health check response:"
|
||||||
|
curl -s http://localhost:3008/api/health | jq . 2>/dev/null || echo "Health check: $(curl -s http://localhost:3008/api/health 2>/dev/null || echo 'No response')"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Check if server process is still running
|
||||||
|
if ! kill -0 $SERVER_PID 2>/dev/null; then
|
||||||
|
echo "ERROR: Server process died during wait!"
|
||||||
|
echo "=== Backend logs ==="
|
||||||
|
cat backend.log
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
echo "Waiting... ($i/60)"
|
echo "Waiting... ($i/60)"
|
||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
echo "Backend server failed to start!"
|
|
||||||
echo "Checking server status..."
|
echo "ERROR: Backend server failed to start within 60 seconds!"
|
||||||
|
echo "=== Backend logs ==="
|
||||||
|
cat backend.log
|
||||||
|
echo ""
|
||||||
|
echo "=== Process status ==="
|
||||||
ps aux | grep -E "(node|tsx)" | grep -v grep || echo "No node processes found"
|
ps aux | grep -E "(node|tsx)" | grep -v grep || echo "No node processes found"
|
||||||
|
echo ""
|
||||||
|
echo "=== Port status ==="
|
||||||
netstat -tlnp 2>/dev/null | grep :3008 || echo "Port 3008 not listening"
|
netstat -tlnp 2>/dev/null | grep :3008 || echo "Port 3008 not listening"
|
||||||
echo "Testing health endpoint..."
|
lsof -i :3008 2>/dev/null || echo "lsof not available or port not in use"
|
||||||
|
echo ""
|
||||||
|
echo "=== Health endpoint test ==="
|
||||||
curl -v http://localhost:3008/api/health 2>&1 || echo "Health endpoint failed"
|
curl -v http://localhost:3008/api/health 2>&1 || echo "Health endpoint failed"
|
||||||
|
|
||||||
|
# Kill the server process if it's still hanging
|
||||||
|
if kill -0 $SERVER_PID 2>/dev/null; then
|
||||||
|
echo ""
|
||||||
|
echo "Killing stuck server process..."
|
||||||
|
kill -9 $SERVER_PID 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
|
||||||
exit 1
|
exit 1
|
||||||
|
|
||||||
- name: Run E2E tests
|
- name: Run E2E tests
|
||||||
@@ -77,10 +133,23 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
CI: true
|
CI: true
|
||||||
VITE_SERVER_URL: http://localhost:3008
|
VITE_SERVER_URL: http://localhost:3008
|
||||||
|
SERVER_URL: http://localhost:3008
|
||||||
VITE_SKIP_SETUP: 'true'
|
VITE_SKIP_SETUP: 'true'
|
||||||
# Keep UI-side login/defaults consistent
|
# Keep UI-side login/defaults consistent
|
||||||
AUTOMAKER_API_KEY: test-api-key-for-e2e-tests
|
AUTOMAKER_API_KEY: test-api-key-for-e2e-tests
|
||||||
|
|
||||||
|
- name: Print backend logs on failure
|
||||||
|
if: failure()
|
||||||
|
run: |
|
||||||
|
echo "=== E2E Tests Failed - Backend Logs ==="
|
||||||
|
cat backend.log 2>/dev/null || echo "No backend log found"
|
||||||
|
echo ""
|
||||||
|
echo "=== Process status at failure ==="
|
||||||
|
ps aux | grep -E "(node|tsx)" | grep -v grep || echo "No node processes found"
|
||||||
|
echo ""
|
||||||
|
echo "=== Port status ==="
|
||||||
|
netstat -tlnp 2>/dev/null | grep :3008 || echo "Port 3008 not listening"
|
||||||
|
|
||||||
- name: Upload Playwright report
|
- name: Upload Playwright report
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
if: always()
|
if: always()
|
||||||
@@ -98,3 +167,13 @@ jobs:
|
|||||||
apps/ui/test-results/
|
apps/ui/test-results/
|
||||||
retention-days: 7
|
retention-days: 7
|
||||||
if-no-files-found: ignore
|
if-no-files-found: ignore
|
||||||
|
|
||||||
|
- name: Cleanup - Kill backend server
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
if [ -n "$SERVER_PID" ]; then
|
||||||
|
echo "Cleaning up backend server (PID: $SERVER_PID)..."
|
||||||
|
kill $SERVER_PID 2>/dev/null || true
|
||||||
|
kill -9 $SERVER_PID 2>/dev/null || true
|
||||||
|
echo "Backend server cleanup complete"
|
||||||
|
fi
|
||||||
|
|||||||
2
.github/workflows/format-check.yml
vendored
2
.github/workflows/format-check.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
|||||||
cache-dependency-path: package-lock.json
|
cache-dependency-path: package-lock.json
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: npm install --ignore-scripts
|
run: npm install --ignore-scripts --force
|
||||||
|
|
||||||
- name: Check formatting
|
- name: Check formatting
|
||||||
run: npm run format:check
|
run: npm run format:check
|
||||||
|
|||||||
34
.github/workflows/release.yml
vendored
34
.github/workflows/release.yml
vendored
@@ -4,6 +4,9 @@ on:
|
|||||||
release:
|
release:
|
||||||
types: [published]
|
types: [published]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
strategy:
|
strategy:
|
||||||
@@ -35,6 +38,11 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
check-lockfile: 'true'
|
check-lockfile: 'true'
|
||||||
|
|
||||||
|
- name: Install RPM build tools (Linux)
|
||||||
|
if: matrix.os == 'ubuntu-latest'
|
||||||
|
shell: bash
|
||||||
|
run: sudo apt-get update && sudo apt-get install -y rpm
|
||||||
|
|
||||||
- name: Build Electron app (macOS)
|
- name: Build Electron app (macOS)
|
||||||
if: matrix.os == 'macos-latest'
|
if: matrix.os == 'macos-latest'
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -57,7 +65,10 @@ jobs:
|
|||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: macos-builds
|
name: macos-builds
|
||||||
path: apps/ui/release/*.{dmg,zip}
|
path: |
|
||||||
|
apps/ui/release/*.dmg
|
||||||
|
apps/ui/release/*.zip
|
||||||
|
if-no-files-found: error
|
||||||
retention-days: 30
|
retention-days: 30
|
||||||
|
|
||||||
- name: Upload Windows artifacts
|
- name: Upload Windows artifacts
|
||||||
@@ -66,6 +77,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: windows-builds
|
name: windows-builds
|
||||||
path: apps/ui/release/*.exe
|
path: apps/ui/release/*.exe
|
||||||
|
if-no-files-found: error
|
||||||
retention-days: 30
|
retention-days: 30
|
||||||
|
|
||||||
- name: Upload Linux artifacts
|
- name: Upload Linux artifacts
|
||||||
@@ -73,15 +85,21 @@ jobs:
|
|||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: linux-builds
|
name: linux-builds
|
||||||
path: apps/ui/release/*.{AppImage,deb}
|
path: |
|
||||||
|
apps/ui/release/*.AppImage
|
||||||
|
apps/ui/release/*.deb
|
||||||
|
apps/ui/release/*.rpm
|
||||||
|
if-no-files-found: error
|
||||||
retention-days: 30
|
retention-days: 30
|
||||||
|
|
||||||
upload:
|
upload:
|
||||||
needs: build
|
needs: build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: github.event.release.draft == false
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Download macOS artifacts
|
- name: Download macOS artifacts
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
@@ -103,9 +121,13 @@ jobs:
|
|||||||
- name: Upload to GitHub Release
|
- name: Upload to GitHub Release
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v2
|
||||||
with:
|
with:
|
||||||
|
fail_on_unmatched_files: true
|
||||||
files: |
|
files: |
|
||||||
artifacts/macos-builds/*
|
artifacts/macos-builds/*.dmg
|
||||||
artifacts/windows-builds/*
|
artifacts/macos-builds/*.zip
|
||||||
artifacts/linux-builds/*
|
artifacts/windows-builds/*.exe
|
||||||
|
artifacts/linux-builds/*.AppImage
|
||||||
|
artifacts/linux-builds/*.deb
|
||||||
|
artifacts/linux-builds/*.rpm
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|||||||
17
.gitignore
vendored
17
.gitignore
vendored
@@ -73,6 +73,9 @@ blob-report/
|
|||||||
!.env.example
|
!.env.example
|
||||||
!.env.local.example
|
!.env.local.example
|
||||||
|
|
||||||
|
# Codex config (contains API keys)
|
||||||
|
.codex/config.toml
|
||||||
|
|
||||||
# TypeScript
|
# TypeScript
|
||||||
*.tsbuildinfo
|
*.tsbuildinfo
|
||||||
|
|
||||||
@@ -85,3 +88,17 @@ docker-compose.override.yml
|
|||||||
|
|
||||||
pnpm-lock.yaml
|
pnpm-lock.yaml
|
||||||
yarn.lock
|
yarn.lock
|
||||||
|
|
||||||
|
# Fork-specific workflow files (should never be committed)
|
||||||
|
DEVELOPMENT_WORKFLOW.md
|
||||||
|
check-sync.sh
|
||||||
|
# API key files
|
||||||
|
data/.api-key
|
||||||
|
data/credentials.json
|
||||||
|
data/
|
||||||
|
.codex/
|
||||||
|
|
||||||
|
# GSD planning docs (local-only)
|
||||||
|
.planning/
|
||||||
|
.mcp.json
|
||||||
|
.planning
|
||||||
|
|||||||
@@ -31,7 +31,24 @@ fi
|
|||||||
|
|
||||||
# Ensure common system paths are in PATH (for systems without nvm)
|
# Ensure common system paths are in PATH (for systems without nvm)
|
||||||
# This helps find node/npm installed via Homebrew, system packages, etc.
|
# This helps find node/npm installed via Homebrew, system packages, etc.
|
||||||
export PATH="$PATH:/usr/local/bin:/opt/homebrew/bin:/usr/bin"
|
if [ -n "$WINDIR" ]; then
|
||||||
|
export PATH="$PATH:/c/Program Files/nodejs:/c/Program Files (x86)/nodejs"
|
||||||
|
export PATH="$PATH:$APPDATA/npm:$LOCALAPPDATA/Programs/nodejs"
|
||||||
|
else
|
||||||
|
export PATH="$PATH:/usr/local/bin:/opt/homebrew/bin:/usr/bin"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Auto-fix git+ssh:// URLs in package-lock.json if it's being committed
|
||||||
|
# This prevents CI failures from SSH URLs that npm introduces for git dependencies
|
||||||
|
if git diff --cached --name-only | grep -q "^package-lock.json$"; then
|
||||||
|
if command -v node >/dev/null 2>&1; then
|
||||||
|
if grep -q "git+ssh://" package-lock.json 2>/dev/null; then
|
||||||
|
echo "Fixing git+ssh:// URLs in package-lock.json..."
|
||||||
|
node scripts/fix-lockfile-urls.mjs
|
||||||
|
git add package-lock.json
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
# Run lint-staged - works with or without nvm
|
# Run lint-staged - works with or without nvm
|
||||||
# Prefer npx, fallback to npm exec, both work with system-installed Node.js
|
# Prefer npx, fallback to npm exec, both work with system-installed Node.js
|
||||||
|
|||||||
81
.planning/PROJECT.md
Normal file
81
.planning/PROJECT.md
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
# AutoModeService Refactoring
|
||||||
|
|
||||||
|
## What This Is
|
||||||
|
|
||||||
|
A comprehensive refactoring of the `auto-mode-service.ts` file (5k+ lines) into smaller, focused services with clear boundaries. This is an architectural cleanup of accumulated technical debt from rapid development, breaking the "god object" anti-pattern into maintainable, debuggable modules.
|
||||||
|
|
||||||
|
## Core Value
|
||||||
|
|
||||||
|
All existing auto-mode functionality continues working — features execute, pipelines flow, merges complete — while the codebase becomes maintainable.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
### Validated
|
||||||
|
|
||||||
|
<!-- Existing functionality that must be preserved -->
|
||||||
|
|
||||||
|
- ✓ Single feature execution with AI agent — existing
|
||||||
|
- ✓ Concurrent execution with configurable limits — existing
|
||||||
|
- ✓ Pipeline orchestration (backlog → in-progress → approval → verified) — existing
|
||||||
|
- ✓ Git worktree isolation per feature — existing
|
||||||
|
- ✓ Automatic merging of completed work — existing
|
||||||
|
- ✓ Custom pipeline support — existing
|
||||||
|
- ✓ Test runner integration — existing
|
||||||
|
- ✓ Event streaming to frontend — existing
|
||||||
|
|
||||||
|
### Active
|
||||||
|
|
||||||
|
<!-- Refactoring goals -->
|
||||||
|
|
||||||
|
- [ ] No service file exceeds ~500 lines
|
||||||
|
- [ ] Each service has single, clear responsibility
|
||||||
|
- [ ] Service boundaries make debugging obvious
|
||||||
|
- [ ] Changes to one service don't risk breaking unrelated features
|
||||||
|
- [ ] Test coverage for critical paths
|
||||||
|
|
||||||
|
### Out of Scope
|
||||||
|
|
||||||
|
- New auto-mode features — this is cleanup, not enhancement
|
||||||
|
- UI changes — backend refactor only
|
||||||
|
- Performance optimization — maintain current performance, don't optimize
|
||||||
|
- Other service refactoring — focus on auto-mode-service.ts only
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
**Current state:** `apps/server/src/services/auto-mode-service.ts` is ~5700 lines handling:
|
||||||
|
|
||||||
|
- Worktree management (create, cleanup, track)
|
||||||
|
- Agent/task execution coordination
|
||||||
|
- Concurrency control and queue management
|
||||||
|
- Pipeline state machine (column transitions)
|
||||||
|
- Merge handling and conflict resolution
|
||||||
|
- Event emission for real-time updates
|
||||||
|
|
||||||
|
**Technical environment:**
|
||||||
|
|
||||||
|
- Express 5 backend, TypeScript
|
||||||
|
- Event-driven architecture via EventEmitter
|
||||||
|
- WebSocket streaming to React frontend
|
||||||
|
- Git worktrees via @automaker/git-utils
|
||||||
|
- Minimal existing test coverage
|
||||||
|
|
||||||
|
**Codebase analysis:** See `.planning/codebase/` for full architecture, conventions, and existing patterns.
|
||||||
|
|
||||||
|
## Constraints
|
||||||
|
|
||||||
|
- **Breaking changes**: Acceptable — other parts of the app can be updated to match new service interfaces
|
||||||
|
- **Test coverage**: Currently minimal — must add tests during refactoring to catch regressions
|
||||||
|
- **Incremental approach**: Required — can't do big-bang rewrite with everything critical
|
||||||
|
- **Existing patterns**: Follow conventions in `.planning/codebase/CONVENTIONS.md`
|
||||||
|
|
||||||
|
## Key Decisions
|
||||||
|
|
||||||
|
| Decision | Rationale | Outcome |
|
||||||
|
| ------------------------- | --------------------------------------------------- | --------- |
|
||||||
|
| Accept breaking changes | Allows cleaner interfaces, worth the migration cost | — Pending |
|
||||||
|
| Add tests during refactor | No existing safety net, need to build one | — Pending |
|
||||||
|
| Incremental extraction | Everything is critical, can't break it all at once | — Pending |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
_Last updated: 2026-01-27 after initialization_
|
||||||
234
.planning/codebase/ARCHITECTURE.md
Normal file
234
.planning/codebase/ARCHITECTURE.md
Normal file
@@ -0,0 +1,234 @@
|
|||||||
|
# Architecture
|
||||||
|
|
||||||
|
**Analysis Date:** 2026-01-27
|
||||||
|
|
||||||
|
## Pattern Overview
|
||||||
|
|
||||||
|
**Overall:** Monorepo with layered client-server architecture (Electron-first) and pluggable provider abstraction for AI models.
|
||||||
|
|
||||||
|
**Key Characteristics:**
|
||||||
|
|
||||||
|
- Event-driven communication via WebSocket between frontend and backend
|
||||||
|
- Multi-provider AI model abstraction layer (Claude, Cursor, Codex, Gemini, OpenCode, Copilot)
|
||||||
|
- Feature-centric workflow stored in `.automaker/` directories
|
||||||
|
- Isolated git worktree execution for each feature
|
||||||
|
- State management through Zustand stores with API persistence
|
||||||
|
|
||||||
|
## Layers
|
||||||
|
|
||||||
|
**Presentation Layer (UI):**
|
||||||
|
|
||||||
|
- Purpose: React 19 Electron/web frontend with TanStack Router file-based routing
|
||||||
|
- Location: `apps/ui/src/`
|
||||||
|
- Contains: Route components, view pages, custom React hooks, Zustand stores, API client
|
||||||
|
- Depends on: @automaker/types, @automaker/utils, HTTP API backend
|
||||||
|
- Used by: Electron main process (desktop), web browser (web mode)
|
||||||
|
|
||||||
|
**API Layer (Server):**
|
||||||
|
|
||||||
|
- Purpose: Express 5 backend exposing RESTful and WebSocket endpoints
|
||||||
|
- Location: `apps/server/src/`
|
||||||
|
- Contains: Route handlers, business logic services, middleware, provider adapters
|
||||||
|
- Depends on: @automaker/types, @automaker/utils, @automaker/platform, Claude Agent SDK
|
||||||
|
- Used by: UI frontend via HTTP/WebSocket
|
||||||
|
|
||||||
|
**Service Layer (Server):**
|
||||||
|
|
||||||
|
- Purpose: Business logic and domain operations
|
||||||
|
- Location: `apps/server/src/services/`
|
||||||
|
- Contains: AgentService, FeatureLoader, AutoModeService, SettingsService, DevServerService, etc.
|
||||||
|
- Depends on: Providers, secure filesystem, feature storage
|
||||||
|
- Used by: Route handlers
|
||||||
|
|
||||||
|
**Provider Abstraction (Server):**
|
||||||
|
|
||||||
|
- Purpose: Unified interface for different AI model providers
|
||||||
|
- Location: `apps/server/src/providers/`
|
||||||
|
- Contains: ProviderFactory, specific provider implementations (ClaudeProvider, CursorProvider, CodexProvider, GeminiProvider, OpencodeProvider, CopilotProvider)
|
||||||
|
- Depends on: @automaker/types, provider SDKs
|
||||||
|
- Used by: AgentService
|
||||||
|
|
||||||
|
**Shared Library Layer:**
|
||||||
|
|
||||||
|
- Purpose: Type definitions and utilities shared across apps
|
||||||
|
- Location: `libs/`
|
||||||
|
- Contains: @automaker/types, @automaker/utils, @automaker/platform, @automaker/prompts, @automaker/model-resolver, @automaker/dependency-resolver, @automaker/git-utils, @automaker/spec-parser
|
||||||
|
- Depends on: None (types has no external deps)
|
||||||
|
- Used by: All apps and services
|
||||||
|
|
||||||
|
## Data Flow
|
||||||
|
|
||||||
|
**Feature Execution Flow:**
|
||||||
|
|
||||||
|
1. User creates/updates feature via UI (`apps/ui/src/`)
|
||||||
|
2. UI sends HTTP request to backend (`POST /api/features`)
|
||||||
|
3. Server route handler invokes FeatureLoader to persist to `.automaker/features/{featureId}/`
|
||||||
|
4. When executing, AgentService loads feature, creates isolated git worktree via @automaker/git-utils
|
||||||
|
5. AgentService invokes ProviderFactory to get appropriate AI provider (Claude, Cursor, etc.)
|
||||||
|
6. Provider executes with context from CLAUDE.md files via @automaker/utils loadContextFiles()
|
||||||
|
7. Server emits events via EventEmitter throughout execution
|
||||||
|
8. Events stream to frontend via WebSocket
|
||||||
|
9. UI updates stores and renders real-time progress
|
||||||
|
10. Feature results persist back to `.automaker/features/` with generated agent-output.md
|
||||||
|
|
||||||
|
**State Management:**
|
||||||
|
|
||||||
|
**Frontend State (Zustand):**
|
||||||
|
|
||||||
|
- `app-store.ts`: Global app state (projects, features, settings, boards, themes)
|
||||||
|
- `setup-store.ts`: First-time setup wizard flow
|
||||||
|
- `ideation-store.ts`: Ideation feature state
|
||||||
|
- `test-runners-store.ts`: Test runner configurations
|
||||||
|
- Settings now persist via API (`/api/settings`) rather than localStorage (see use-settings-sync.ts)
|
||||||
|
|
||||||
|
**Backend State (Services):**
|
||||||
|
|
||||||
|
- SettingsService: Global and project-specific settings (in-memory with file persistence)
|
||||||
|
- AgentService: Active agent sessions and conversation history
|
||||||
|
- FeatureLoader: Feature data model operations
|
||||||
|
- DevServerService: Development server logs
|
||||||
|
- EventHistoryService: Persists event logs for replay
|
||||||
|
|
||||||
|
**Real-Time Updates (WebSocket):**
|
||||||
|
|
||||||
|
- Server EventEmitter emits TypedEvent (type + payload)
|
||||||
|
- WebSocket handler subscribes to events and broadcasts to all clients
|
||||||
|
- Frontend listens on multiple WebSocket subscriptions and updates stores
|
||||||
|
|
||||||
|
## Key Abstractions
|
||||||
|
|
||||||
|
**Feature:**
|
||||||
|
|
||||||
|
- Purpose: Represents a development task/story with rich metadata
|
||||||
|
- Location: @automaker/types → `libs/types/src/feature.ts`
|
||||||
|
- Fields: id, title, description, status, images, tasks, priority, etc.
|
||||||
|
- Stored: `.automaker/features/{featureId}/feature.json`
|
||||||
|
|
||||||
|
**Provider:**
|
||||||
|
|
||||||
|
- Purpose: Abstracts different AI model implementations
|
||||||
|
- Location: `apps/server/src/providers/{provider}-provider.ts`
|
||||||
|
- Interface: Common execute() method with consistent message format
|
||||||
|
- Implementations: Claude, Cursor, Codex, Gemini, OpenCode, Copilot
|
||||||
|
- Factory: ProviderFactory picks correct provider based on model ID
|
||||||
|
|
||||||
|
**Event:**
|
||||||
|
|
||||||
|
- Purpose: Real-time updates streamed to frontend
|
||||||
|
- Location: @automaker/types → `libs/types/src/event.ts`
|
||||||
|
- Format: { type: EventType, payload: unknown }
|
||||||
|
- Examples: agent-started, agent-step, agent-complete, feature-updated, etc.
|
||||||
|
|
||||||
|
**AgentSession:**
|
||||||
|
|
||||||
|
- Purpose: Represents a conversation between user and AI agent
|
||||||
|
- Location: @automaker/types → `libs/types/src/session.ts`
|
||||||
|
- Contains: Messages (user + assistant), metadata, creation timestamp
|
||||||
|
- Stored: `{DATA_DIR}/agent-sessions/{sessionId}.json`
|
||||||
|
|
||||||
|
**Settings:**
|
||||||
|
|
||||||
|
- Purpose: Configuration for global and per-project behavior
|
||||||
|
- Location: @automaker/types → `libs/types/src/settings.ts`
|
||||||
|
- Stored: Global in `{DATA_DIR}/settings.json`, per-project in `.automaker/settings.json`
|
||||||
|
- Service: SettingsService in `apps/server/src/services/settings-service.ts`
|
||||||
|
|
||||||
|
## Entry Points
|
||||||
|
|
||||||
|
**Server:**
|
||||||
|
|
||||||
|
- Location: `apps/server/src/index.ts`
|
||||||
|
- Triggers: `npm run dev:server` or Docker startup
|
||||||
|
- Responsibilities:
|
||||||
|
- Initialize Express app with middleware
|
||||||
|
- Create shared EventEmitter for WebSocket streaming
|
||||||
|
- Bootstrap services (SettingsService, AgentService, FeatureLoader, etc.)
|
||||||
|
- Mount API routes at `/api/*`
|
||||||
|
- Create WebSocket servers for agent streaming and terminal sessions
|
||||||
|
- Load and apply user settings (log level, request logging, etc.)
|
||||||
|
|
||||||
|
**UI (Web):**
|
||||||
|
|
||||||
|
- Location: `apps/ui/src/main.ts` (Vite entry), `apps/ui/src/app.tsx` (React component)
|
||||||
|
- Triggers: `npm run dev:web` or `npm run build`
|
||||||
|
- Responsibilities:
|
||||||
|
- Initialize Zustand stores from API settings
|
||||||
|
- Setup React Router with TanStack Router
|
||||||
|
- Render root layout with sidebar and main content area
|
||||||
|
- Handle authentication via verifySession()
|
||||||
|
|
||||||
|
**UI (Electron):**
|
||||||
|
|
||||||
|
- Location: `apps/ui/src/main.ts` (Vite entry), `apps/ui/electron/main-process.ts` (Electron main process)
|
||||||
|
- Triggers: `npm run dev:electron`
|
||||||
|
- Responsibilities:
|
||||||
|
- Launch local server via node-pty
|
||||||
|
- Create native Electron window
|
||||||
|
- Bridge IPC between renderer and main process
|
||||||
|
- Provide file system access via preload.ts APIs
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
**Strategy:** Layered error classification and user-friendly messaging
|
||||||
|
|
||||||
|
**Patterns:**
|
||||||
|
|
||||||
|
**Backend Error Handling:**
|
||||||
|
|
||||||
|
- Errors classified via `classifyError()` from @automaker/utils
|
||||||
|
- Classification: ParseError, NetworkError, AuthenticationError, RateLimitError, etc.
|
||||||
|
- Response format: `{ success: false, error: { type, message, code }, details? }`
|
||||||
|
- Example: `apps/server/src/lib/error-handler.ts`
|
||||||
|
|
||||||
|
**Frontend Error Handling:**
|
||||||
|
|
||||||
|
- HTTP errors caught by api-fetch.ts with retry logic
|
||||||
|
- WebSocket disconnects trigger reconnection with exponential backoff
|
||||||
|
- Errors shown in toast notifications via `sonner` library
|
||||||
|
- Validation errors caught and displayed inline in forms
|
||||||
|
|
||||||
|
**Agent Execution Errors:**
|
||||||
|
|
||||||
|
- AgentService wraps provider calls in try-catch
|
||||||
|
- Aborts handled specially via `isAbortError()` check
|
||||||
|
- Rate limit errors trigger cooldown before retry
|
||||||
|
- Model-specific errors mapped to user guidance
|
||||||
|
|
||||||
|
## Cross-Cutting Concerns
|
||||||
|
|
||||||
|
**Logging:**
|
||||||
|
|
||||||
|
- Framework: @automaker/utils createLogger()
|
||||||
|
- Pattern: `const logger = createLogger('ModuleName')`
|
||||||
|
- Levels: ERROR, WARN, INFO, DEBUG (configurable via settings)
|
||||||
|
- Output: stdout (dev), files (production)
|
||||||
|
|
||||||
|
**Validation:**
|
||||||
|
|
||||||
|
- File path validation: @automaker/platform initAllowedPaths() enforces restrictions
|
||||||
|
- Model ID validation: @automaker/model-resolver resolveModelString()
|
||||||
|
- JSON schema validation: Manual checks in route handlers (no JSON schema lib)
|
||||||
|
- Authentication: Session token validation via validateWsConnectionToken()
|
||||||
|
|
||||||
|
**Authentication:**
|
||||||
|
|
||||||
|
- Frontend: Session token stored in httpOnly cookie
|
||||||
|
- Backend: authMiddleware checks token on protected routes
|
||||||
|
- WebSocket: validateWsConnectionToken() for upgrade requests
|
||||||
|
- Providers: API keys stored encrypted in `{DATA_DIR}/credentials.json`
|
||||||
|
|
||||||
|
**Internationalization:**
|
||||||
|
|
||||||
|
- Not detected - strings are English-only
|
||||||
|
|
||||||
|
**Performance:**
|
||||||
|
|
||||||
|
- Code splitting: File-based routing via TanStack Router
|
||||||
|
- Lazy loading: React.lazy() in route components
|
||||||
|
- Caching: React Query for HTTP requests (query-keys.ts defines cache strategy)
|
||||||
|
- Image optimization: Automatic base64 encoding for agent context
|
||||||
|
- State hydration: Settings loaded once at startup, synced via API
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
_Architecture analysis: 2026-01-27_
|
||||||
245
.planning/codebase/CONCERNS.md
Normal file
245
.planning/codebase/CONCERNS.md
Normal file
@@ -0,0 +1,245 @@
|
|||||||
|
# Codebase Concerns
|
||||||
|
|
||||||
|
**Analysis Date:** 2026-01-27
|
||||||
|
|
||||||
|
## Tech Debt
|
||||||
|
|
||||||
|
**Loose Type Safety in Error Handling:**
|
||||||
|
|
||||||
|
- Issue: Multiple uses of `as any` type assertions bypass TypeScript safety, particularly in error context handling and provider responses
|
||||||
|
- Files: `apps/server/src/providers/claude-provider.ts` (lines 318-322), `apps/server/src/lib/error-handler.ts`, `apps/server/src/routes/settings/routes/update-global.ts`
|
||||||
|
- Impact: Errors could have unchecked properties; refactoring becomes risky without compiler assistance
|
||||||
|
- Fix approach: Replace `as any` with proper type guards and discriminated unions; create helper functions for safe property access
|
||||||
|
|
||||||
|
**Missing Test Coverage for Critical Services:**
|
||||||
|
|
||||||
|
- Issue: Several core services explicitly excluded from test coverage thresholds due to integration complexity
|
||||||
|
- Files: `apps/server/vitest.config.ts` (line 22), explicitly excluded: `claude-usage-service.ts`, `mcp-test-service.ts`, `cli-provider.ts`, `cursor-provider.ts`
|
||||||
|
- Impact: Usage tracking, MCP integration, and CLI detection could break undetected; regression detection is limited
|
||||||
|
- Fix approach: Create integration test fixtures for CLI providers; mock MCP SDK for mcp-test-service tests; add usage tracking unit tests with mocked API calls
|
||||||
|
|
||||||
|
**Unused/Stub TODO Item Processing:**
|
||||||
|
|
||||||
|
- Issue: TodoWrite tool implementation exists but is partially integrated; tool name constants scattered across codex provider
|
||||||
|
- Files: `apps/server/src/providers/codex-tool-mapping.ts`, `apps/server/src/providers/codex-provider.ts`
|
||||||
|
- Impact: Todo list updates may not synchronize properly with all providers; unclear which providers support TodoWrite
|
||||||
|
- Fix approach: Consolidate tool name constants; add provider capability flags for todo support
|
||||||
|
|
||||||
|
**Electron Electron.ts Size and Complexity:**
|
||||||
|
|
||||||
|
- Issue: Single 3741-line file handles all Electron IPC, native bindings, and communication
|
||||||
|
- Files: `apps/ui/src/lib/electron.ts`
|
||||||
|
- Impact: Difficult to test; hard to isolate bugs; changes require full testing of all features; potential memory overhead from monolithic file
|
||||||
|
- Fix approach: Split by responsibility (IPC, window management, file operations, debug tools); create separate bridge layers
|
||||||
|
|
||||||
|
## Known Bugs
|
||||||
|
|
||||||
|
**API Key Management Incomplete for Gemini:**
|
||||||
|
|
||||||
|
- Symptoms: Gemini API key verification endpoint not implemented despite other providers having verification
|
||||||
|
- Files: `apps/ui/src/components/views/settings-view/api-keys/hooks/use-api-key-management.ts` (line 122)
|
||||||
|
- Trigger: User tries to verify Gemini API key in settings
|
||||||
|
- Workaround: Key verification skipped for Gemini; settings page still accepts and stores key
|
||||||
|
|
||||||
|
**Orphaned Features Detection Vulnerable to False Negatives:**
|
||||||
|
|
||||||
|
- Symptoms: Features marked as orphaned when branch matching logic doesn't account for all scenarios
|
||||||
|
- Files: `apps/server/src/services/auto-mode-service.ts` (lines 5714-5773)
|
||||||
|
- Trigger: Features that were manually switched branches or rebased
|
||||||
|
- Workaround: Manual cleanup via feature deletion; branch comparison is basic name matching only
|
||||||
|
|
||||||
|
**Terminal Themes Incomplete:**
|
||||||
|
|
||||||
|
- Symptoms: Light theme themes (solarizedlight, github) map to same generic lightTheme; no dedicated implementations
|
||||||
|
- Files: `apps/ui/src/config/terminal-themes.ts` (lines 593-594)
|
||||||
|
- Trigger: User selects solarizedlight or github terminal theme
|
||||||
|
- Workaround: Uses generic light theme instead of specific scheme; visual appearance doesn't match expectation
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
**Process Environment Variable Exposure:**
|
||||||
|
|
||||||
|
- Risk: Child processes inherit all parent `process.env` including sensitive credentials (API keys, tokens)
|
||||||
|
- Files: `apps/server/src/providers/cursor-provider.ts` (line 993), `apps/server/src/providers/codex-provider.ts` (line 1099)
|
||||||
|
- Current mitigation: Dotenv provides isolation at app startup; selective env passing to some providers
|
||||||
|
- Recommendations: Use explicit allowlists for env vars passed to child processes (only pass REQUIRED_KEYS); audit all spawn calls for env handling; document which providers need which credentials
|
||||||
|
|
||||||
|
**Unvalidated Provider Tool Input:**
|
||||||
|
|
||||||
|
- Risk: Tool input from CLI providers (Cursor, Copilot, Codex) is partially validated through Record<string, unknown> patterns; execution context could be escaped
|
||||||
|
- Files: `apps/server/src/providers/codex-provider.ts` (lines 506-543), `apps/server/src/providers/tool-normalization.ts`
|
||||||
|
- Current mitigation: Status enums validated; tool names checked against allow-lists in some providers
|
||||||
|
- Recommendations: Implement comprehensive schema validation for all tool inputs before execution; use zod or similar for runtime validation; add security tests for injection patterns
|
||||||
|
|
||||||
|
**API Key Storage in Settings Files:**
|
||||||
|
|
||||||
|
- Risk: API keys stored in plaintext in `~/.automaker/settings.json` and `data/settings.json`; file permissions may not be restricted
|
||||||
|
- Files: `apps/server/src/services/settings-service.ts`, uses `atomicWriteJson` without file permission enforcement
|
||||||
|
- Current mitigation: Limited by file system permissions; Electron mode has single-user access
|
||||||
|
- Recommendations: Encrypt sensitive settings fields (apiKeys, tokens); use OS credential stores (Keychain/Credential Manager) for production; add file permission checks on startup
|
||||||
|
|
||||||
|
## Performance Bottlenecks
|
||||||
|
|
||||||
|
**Synchronous Feature Loading at Startup:**
|
||||||
|
|
||||||
|
- Problem: All features loaded synchronously at project load; blocks UI with 1000+ features
|
||||||
|
- Files: `apps/server/src/services/feature-loader.ts` (line 230 Promise.all, but synchronous enumeration)
|
||||||
|
- Cause: Feature directory walk and JSON parsing is not paginated or lazy-loaded
|
||||||
|
- Improvement path: Implement lazy loading with pagination (load first 50, fetch more on scroll); add caching layer with TTL; move to background indexing; add feature count limits with warnings
|
||||||
|
|
||||||
|
**Auto-Mode Concurrency at Max Can Exceed Rate Limits:**
|
||||||
|
|
||||||
|
- Problem: maxConcurrency = 10 can quickly exhaust Claude API rate limits if all features execute simultaneously
|
||||||
|
- Files: `apps/server/src/services/auto-mode-service.ts` (line 2931 Promise.all for concurrent agents)
|
||||||
|
- Cause: No adaptive backoff; no API usage tracking before queuing; hint mentions reducing concurrency but doesn't enforce it
|
||||||
|
- Improvement path: Integrate with claude-usage-service to check remaining quota before starting features; implement exponential backoff on 429 errors; add per-model rate limit tracking
|
||||||
|
|
||||||
|
**Terminal Session Memory Leak Risk:**
|
||||||
|
|
||||||
|
- Problem: Terminal sessions accumulate in memory; expired sessions not cleaned up reliably
|
||||||
|
- Files: `apps/server/src/routes/terminal/common.ts` (line 66 cleanup runs every 5 minutes, but only for tokens)
|
||||||
|
- Cause: Cleanup interval is arbitrary; session map not bounded; no session lifespan limit
|
||||||
|
- Improvement path: Implement LRU eviction with max session count; reduce cleanup interval to 1 minute; add memory usage monitoring; auto-close idle sessions after 30 minutes
|
||||||
|
|
||||||
|
**Large File Content Loading Without Limits:**
|
||||||
|
|
||||||
|
- Problem: File content loaded entirely into memory; `describe-file.ts` truncates at 50KB but loads all content first
|
||||||
|
- Files: `apps/server/src/routes/context/routes/describe-file.ts` (line 128)
|
||||||
|
- Cause: Synchronous file read; no streaming; no check before reading large files
|
||||||
|
- Improvement path: Check file size before reading; stream large files; add file size warnings; implement chunked processing for analysis
|
||||||
|
|
||||||
|
## Fragile Areas
|
||||||
|
|
||||||
|
**Provider Factory Model Resolution:**
|
||||||
|
|
||||||
|
- Files: `apps/server/src/providers/provider-factory.ts`, `apps/server/src/providers/simple-query-service.ts`
|
||||||
|
- Why fragile: Each provider interprets model strings differently; no central registry; model aliases resolved at multiple layers (model-resolver, provider-specific maps, CLI validation)
|
||||||
|
- Safe modification: Add integration tests for each model alias per provider; create model capability matrix; centralize model validation before dispatch
|
||||||
|
- Test coverage: No dedicated tests; relies on E2E; no isolated unit tests for model resolution
|
||||||
|
|
||||||
|
**WebSocket Session Authentication:**
|
||||||
|
|
||||||
|
- Files: `apps/server/src/lib/auth.ts` (line 40 setInterval), `apps/server/src/index.ts` (token validation per message)
|
||||||
|
- Why fragile: Session tokens generated and validated at multiple points; no single source of truth; expiration is not atomic
|
||||||
|
- Safe modification: Add tests for token expiration edge cases; ensure cleanup removes all references; log all auth failures
|
||||||
|
- Test coverage: Auth middleware tested, but not session lifecycle
|
||||||
|
|
||||||
|
**Auto-Mode Feature State Machine:**
|
||||||
|
|
||||||
|
- Files: `apps/server/src/services/auto-mode-service.ts` (lines 465-600)
|
||||||
|
- Why fragile: Multiple states (running, queued, completed, error) managed across different methods; no explicit state transition validation; error recovery is defensive (catches all, logs, continues)
|
||||||
|
- Safe modification: Create explicit state enum with valid transitions; add invariant checks; unit test state transitions with all error cases
|
||||||
|
- Test coverage: Gaps in error recovery paths; no tests for concurrent state changes
|
||||||
|
|
||||||
|
## Scaling Limits
|
||||||
|
|
||||||
|
**Feature Count Scalability:**
|
||||||
|
|
||||||
|
- Current capacity: ~1000 features tested; UI performance degrades with pagination required
|
||||||
|
- Limit: 10K+ features cause >5s load times; memory usage ~100MB for metadata alone
|
||||||
|
- Scaling path: Implement feature database instead of file-per-feature; add ElasticSearch indexing for search; paginate API responses (50 per page); add feature archiving
|
||||||
|
|
||||||
|
**Concurrent Auto-Mode Executions:**
|
||||||
|
|
||||||
|
- Current capacity: maxConcurrency = 10 features; limited by Claude API rate limits
|
||||||
|
- Limit: Rate limit hits at ~4-5 simultaneous features with extended context (100K+ tokens)
|
||||||
|
- Scaling path: Implement token usage budgeting before feature start; queue features with estimated token cost; add provider-specific rate limit handling
|
||||||
|
|
||||||
|
**Terminal Session Count:**
|
||||||
|
|
||||||
|
- Current capacity: ~100 active terminal sessions per server
|
||||||
|
- Limit: Memory grows unbounded; no session count limit enforced
|
||||||
|
- Scaling path: Add max session count with least-recently-used eviction; implement session federation for distributed setup
|
||||||
|
|
||||||
|
**Worktree Disk Usage:**
|
||||||
|
|
||||||
|
- Current capacity: 10K worktrees (~20GB with typical repos)
|
||||||
|
- Limit: `.worktrees` directory grows without cleanup; old worktrees accumulate
|
||||||
|
- Scaling path: Add worktree TTL (delete if not used for 30 days); implement cleanup job; add quota warnings at 50/80% disk
|
||||||
|
|
||||||
|
## Dependencies at Risk
|
||||||
|
|
||||||
|
**node-pty Beta Version:**
|
||||||
|
|
||||||
|
- Risk: `node-pty@1.1.0-beta41` used for terminal emulation; beta status indicates possible instability
|
||||||
|
- Impact: Terminal features could break on minor platform changes; no guarantees on bug fixes
|
||||||
|
- Migration plan: Monitor releases for stable version; pin to specific commit if needed; test extensively on target platforms (macOS, Linux, Windows)
|
||||||
|
|
||||||
|
**@anthropic-ai/claude-agent-sdk 0.1.x:**
|
||||||
|
|
||||||
|
- Risk: Pre-1.0 version; SDK API may change in future releases; limited version stability guarantees
|
||||||
|
- Impact: Breaking changes could require significant refactoring; feature additions in SDK may not align with Automaker roadmap
|
||||||
|
- Migration plan: Pin to specific 0.1.x version; review SDK changelogs before upgrades; maintain SDK compatibility tests; consider fallback implementation for critical paths
|
||||||
|
|
||||||
|
**@openai/codex-sdk 0.77.x:**
|
||||||
|
|
||||||
|
- Risk: Codex model deprecated by OpenAI; SDK may be archived or unsupported
|
||||||
|
- Impact: Codex provider could become non-functional; error messages may not be actionable
|
||||||
|
- Migration plan: Monitor OpenAI roadmap for migration path; implement fallback to Claude for Codex requests; add deprecation warning in UI
|
||||||
|
|
||||||
|
**Express 5.2.x RC Stage:**
|
||||||
|
|
||||||
|
- Risk: Express 5 is still in release candidate phase (as of Node 22); full stability not guaranteed
|
||||||
|
- Impact: Minor version updates could include breaking changes; middleware compatibility issues possible
|
||||||
|
- Migration plan: Maintain compatibility layer for Express 5 API; test with latest major before release; document any version-specific workarounds
|
||||||
|
|
||||||
|
## Missing Critical Features
|
||||||
|
|
||||||
|
**Persistent Session Storage:**
|
||||||
|
|
||||||
|
- Problem: Agent conversation sessions stored only in-memory; restart loses all chat history
|
||||||
|
- Blocks: Long-running analysis across server restarts; session recovery not possible
|
||||||
|
- Impact: Users must re-run entire analysis if server restarts; lost productivity
|
||||||
|
|
||||||
|
**Rate Limit Awareness:**
|
||||||
|
|
||||||
|
- Problem: No tracking of API usage relative to rate limits before executing features
|
||||||
|
- Blocks: Predictable concurrent feature execution; users frequently hit rate limits unexpectedly
|
||||||
|
- Impact: Feature execution fails with cryptic rate limit errors; poor user experience
|
||||||
|
|
||||||
|
**Feature Dependency Visualization:**
|
||||||
|
|
||||||
|
- Problem: Dependency-resolver package exists but no UI to visualize or manage dependencies
|
||||||
|
- Blocks: Users cannot plan feature order; complex dependencies not visible
|
||||||
|
- Impact: Features implemented in wrong order; blocking dependencies missed
|
||||||
|
|
||||||
|
## Test Coverage Gaps
|
||||||
|
|
||||||
|
**CLI Provider Integration:**
|
||||||
|
|
||||||
|
- What's not tested: Actual CLI execution paths; environment setup; error recovery from CLI crashes
|
||||||
|
- Files: `apps/server/src/providers/cli-provider.ts`, `apps/server/src/lib/cli-detection.ts`
|
||||||
|
- Risk: Changes to CLI handling could break silently; detection logic not validated on target platforms
|
||||||
|
- Priority: High - affects all CLI-based providers (Cursor, Copilot, Codex)
|
||||||
|
|
||||||
|
**Cursor Provider Platform-Specific Paths:**
|
||||||
|
|
||||||
|
- What's not tested: Windows/Linux Cursor installation detection; version directory parsing; APPDATA environment variable handling
|
||||||
|
- Files: `apps/server/src/providers/cursor-provider.ts` (lines 267-498)
|
||||||
|
- Risk: Platform-specific bugs not caught; Cursor detection fails on non-standard installations
|
||||||
|
- Priority: High - Cursor is primary provider; platform differences critical
|
||||||
|
|
||||||
|
**Event Hook System State Changes:**
|
||||||
|
|
||||||
|
- What's not tested: Concurrent hook execution; cleanup on server shutdown; webhook delivery retries
|
||||||
|
- Files: `apps/server/src/services/event-hook-service.ts` (line 248 Promise.allSettled)
|
||||||
|
- Risk: Hooks may not execute in expected order; memory not cleaned up; webhooks lost on failure
|
||||||
|
- Priority: Medium - affects automation workflows
|
||||||
|
|
||||||
|
**Error Classification for New Providers:**
|
||||||
|
|
||||||
|
- What's not tested: Each provider's unique error patterns mapped to ErrorType enum; new provider errors not classified
|
||||||
|
- Files: `apps/server/src/lib/error-handler.ts` (lines 58-80), each provider error mapping
|
||||||
|
- Risk: User sees generic "unknown error" instead of actionable message; categorization regresses with new providers
|
||||||
|
- Priority: Medium - impacts user experience
|
||||||
|
|
||||||
|
**Feature State Corruption Scenarios:**
|
||||||
|
|
||||||
|
- What's not tested: Concurrent feature updates; partial writes with power loss; JSON parsing recovery
|
||||||
|
- Files: `apps/server/src/services/feature-loader.ts`, `@automaker/utils` (atomicWriteJson)
|
||||||
|
- Risk: Feature data corrupted on concurrent access; recovery incomplete; no validation before use
|
||||||
|
- Priority: High - data loss risk
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
_Concerns audit: 2026-01-27_
|
||||||
255
.planning/codebase/CONVENTIONS.md
Normal file
255
.planning/codebase/CONVENTIONS.md
Normal file
@@ -0,0 +1,255 @@
|
|||||||
|
# Coding Conventions
|
||||||
|
|
||||||
|
**Analysis Date:** 2026-01-27
|
||||||
|
|
||||||
|
## Naming Patterns
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
|
||||||
|
- PascalCase for class/service files: `auto-mode-service.ts`, `feature-loader.ts`, `claude-provider.ts`
|
||||||
|
- kebab-case for route/handler directories: `auto-mode/`, `features/`, `event-history/`
|
||||||
|
- kebab-case for utility files: `secure-fs.ts`, `sdk-options.ts`, `settings-helpers.ts`
|
||||||
|
- kebab-case for React components: `card.tsx`, `ansi-output.tsx`, `count-up-timer.tsx`
|
||||||
|
- kebab-case for hooks: `use-board-background-settings.ts`, `use-responsive-kanban.ts`, `use-test-logs.ts`
|
||||||
|
- kebab-case for store files: `app-store.ts`, `auth-store.ts`, `setup-store.ts`
|
||||||
|
- Organized by functionality: `routes/features/routes/list.ts`, `routes/features/routes/get.ts`
|
||||||
|
|
||||||
|
**Functions:**
|
||||||
|
|
||||||
|
- camelCase for all function names: `createEventEmitter()`, `getAutomakerDir()`, `executeQuery()`
|
||||||
|
- Verb-first for action functions: `buildPrompt()`, `classifyError()`, `loadContextFiles()`, `atomicWriteJson()`
|
||||||
|
- Prefix with `use` for React hooks: `useBoardBackgroundSettings()`, `useAppStore()`, `useUpdateProjectSettings()`
|
||||||
|
- Private methods prefixed with underscore: `_deleteOrphanedImages()`, `_migrateImages()`
|
||||||
|
|
||||||
|
**Variables:**
|
||||||
|
|
||||||
|
- camelCase for constants and variables: `featureId`, `projectPath`, `modelId`, `tempDir`
|
||||||
|
- UPPER_SNAKE_CASE for global constants/enums: `DEFAULT_MAX_CONCURRENCY`, `DEFAULT_PHASE_MODELS`
|
||||||
|
- Meaningful naming over abbreviations: `featureDirectory` not `fd`, `featureImages` not `img`
|
||||||
|
- Prefixes for computed values: `is*` for booleans: `isClaudeModel`, `isContainerized`, `isAutoLoginEnabled`
|
||||||
|
|
||||||
|
**Types:**
|
||||||
|
|
||||||
|
- PascalCase for interfaces and types: `Feature`, `ExecuteOptions`, `EventEmitter`, `ProviderConfig`
|
||||||
|
- Type files suffixed with `.d.ts`: `paths.d.ts`, `types.d.ts`
|
||||||
|
- Organized by domain: `src/store/types/`, `src/lib/`
|
||||||
|
- Re-export pattern from main package indexes: `export type { Feature };`
|
||||||
|
|
||||||
|
## Code Style
|
||||||
|
|
||||||
|
**Formatting:**
|
||||||
|
|
||||||
|
- Tool: Prettier 3.7.4
|
||||||
|
- Print width: 100 characters
|
||||||
|
- Tab width: 2 spaces
|
||||||
|
- Single quotes for strings
|
||||||
|
- Semicolons required
|
||||||
|
- Trailing commas: es5 (trailing in arrays/objects, not in params)
|
||||||
|
- Arrow functions always include parentheses: `(x) => x * 2`
|
||||||
|
- Line endings: LF (Unix)
|
||||||
|
- Bracket spacing: `{ key: value }`
|
||||||
|
|
||||||
|
**Linting:**
|
||||||
|
|
||||||
|
- Tool: ESLint (flat config in `apps/ui/eslint.config.mjs`)
|
||||||
|
- TypeScript ESLint plugin for `.ts`/`.tsx` files
|
||||||
|
- Recommended configs: `@eslint/js`, `@typescript-eslint/recommended`
|
||||||
|
- Unused variables warning with exception for parameters starting with `_`
|
||||||
|
- Type assertions are allowed with description when using `@ts-ignore`
|
||||||
|
- `@typescript-eslint/no-explicit-any` is warn-level (allow with caution)
|
||||||
|
|
||||||
|
## Import Organization
|
||||||
|
|
||||||
|
**Order:**
|
||||||
|
|
||||||
|
1. Node.js standard library: `import fs from 'fs/promises'`, `import path from 'path'`
|
||||||
|
2. Third-party packages: `import { describe, it } from 'vitest'`, `import { Router } from 'express'`
|
||||||
|
3. Shared packages (monorepo): `import type { Feature } from '@automaker/types'`, `import { createLogger } from '@automaker/utils'`
|
||||||
|
4. Local relative imports: `import { FeatureLoader } from './feature-loader.js'`, `import * as secureFs from '../lib/secure-fs.js'`
|
||||||
|
5. Type imports: separated with `import type { ... } from`
|
||||||
|
|
||||||
|
**Path Aliases:**
|
||||||
|
|
||||||
|
- `@/` - resolves to `./src` in both UI (`apps/ui/`) and server (`apps/server/`)
|
||||||
|
- Shared packages prefixed with `@automaker/`:
|
||||||
|
- `@automaker/types` - core TypeScript definitions
|
||||||
|
- `@automaker/utils` - logging, errors, utilities
|
||||||
|
- `@automaker/prompts` - AI prompt templates
|
||||||
|
- `@automaker/platform` - path management, security, processes
|
||||||
|
- `@automaker/model-resolver` - model alias resolution
|
||||||
|
- `@automaker/dependency-resolver` - feature dependency ordering
|
||||||
|
- `@automaker/git-utils` - git operations
|
||||||
|
- Extensions: `.js` extension used in imports for ESM imports
|
||||||
|
|
||||||
|
**Import Rules:**
|
||||||
|
|
||||||
|
- Always import from shared packages, never from old paths
|
||||||
|
- No circular dependencies between layers
|
||||||
|
- Services import from providers and utilities
|
||||||
|
- Routes import from services
|
||||||
|
- Shared packages have strict dependency hierarchy (types → utils → platform → git-utils → server/ui)
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
**Patterns:**
|
||||||
|
|
||||||
|
- Use `try-catch` blocks for async operations: wraps feature execution, file operations, git commands
|
||||||
|
- Throw `new Error(message)` with descriptive messages: `throw new Error('already running')`, `throw new Error('Feature ${featureId} not found')`
|
||||||
|
- Classify errors with `classifyError()` from `@automaker/utils` for categorization
|
||||||
|
- Log errors with context using `createLogger()`: includes error classification
|
||||||
|
- Return error info objects: `{ valid: false, errors: [...], warnings: [...] }`
|
||||||
|
- Validation returns structured result: `{ valid, errors, warnings }` from provider `validateConfig()`
|
||||||
|
|
||||||
|
**Error Types:**
|
||||||
|
|
||||||
|
- Authentication errors: distinguish from validation/runtime errors
|
||||||
|
- Path validation errors: caught by middleware in Express routes
|
||||||
|
- File system errors: logged and recovery attempted with backups
|
||||||
|
- SDK/API errors: classified and wrapped with context
|
||||||
|
- Abort/cancellation errors: handled without stack traces (graceful shutdown)
|
||||||
|
|
||||||
|
**Error Messages:**
|
||||||
|
|
||||||
|
- Descriptive and actionable: not vague error codes
|
||||||
|
- Include context when helpful: file paths, feature IDs, model names
|
||||||
|
- User-friendly messages via `getUserFriendlyErrorMessage()` for client display
|
||||||
|
|
||||||
|
## Logging
|
||||||
|
|
||||||
|
**Framework:**
|
||||||
|
|
||||||
|
- Built-in `createLogger()` from `@automaker/utils`
|
||||||
|
- Each module creates logger: `const logger = createLogger('ModuleName')`
|
||||||
|
- Logger functions: `info()`, `warn()`, `error()`, `debug()`
|
||||||
|
|
||||||
|
**Patterns:**
|
||||||
|
|
||||||
|
- Log operation start and completion for significant operations
|
||||||
|
- Log warnings for non-critical issues: file deletion failures, missing optional configs
|
||||||
|
- Log errors with full error object: `logger.error('operation failed', error)`
|
||||||
|
- Use module name as logger context: `createLogger('AutoMode')`, `createLogger('HttpClient')`
|
||||||
|
- Avoid logging sensitive data (API keys, passwords)
|
||||||
|
- No console.log in production code - use logger
|
||||||
|
|
||||||
|
**What to Log:**
|
||||||
|
|
||||||
|
- Feature execution start/completion
|
||||||
|
- Error classification and recovery attempts
|
||||||
|
- File operations (create, delete, migrate)
|
||||||
|
- API calls and responses (in debug mode)
|
||||||
|
- Async operation start/end
|
||||||
|
- Warnings for deprecated patterns
|
||||||
|
|
||||||
|
## Comments
|
||||||
|
|
||||||
|
**When to Comment:**
|
||||||
|
|
||||||
|
- Complex algorithms or business logic: explain the "why" not the "what"
|
||||||
|
- Integration points: explain how modules communicate
|
||||||
|
- Workarounds: explain the constraint that made the workaround necessary
|
||||||
|
- Non-obvious performance implications
|
||||||
|
- Edge cases and their handling
|
||||||
|
|
||||||
|
**JSDoc/TSDoc:**
|
||||||
|
|
||||||
|
- Used for public functions and classes
|
||||||
|
- Document parameters with `@param`
|
||||||
|
- Document return types with `@returns`
|
||||||
|
- Document exceptions with `@throws`
|
||||||
|
- Used for service classes: `/**\n * Module description\n * Manages: ...\n */`
|
||||||
|
- Not required for simple getters/setters
|
||||||
|
|
||||||
|
**Example JSDoc Pattern:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
/**
|
||||||
|
* Delete images that were removed from a feature
|
||||||
|
*/
|
||||||
|
private async deleteOrphanedImages(
|
||||||
|
projectPath: string,
|
||||||
|
oldPaths: Array<string>,
|
||||||
|
newPaths: Array<string>
|
||||||
|
): Promise<void> {
|
||||||
|
// Implementation
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Function Design
|
||||||
|
|
||||||
|
**Size:**
|
||||||
|
|
||||||
|
- Keep functions under 100 lines when possible
|
||||||
|
- Large services split into multiple related methods
|
||||||
|
- Private helper methods extracted for complex logic
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
|
||||||
|
- Use destructuring for object parameters with multiple properties
|
||||||
|
- Document parameter types with TypeScript types
|
||||||
|
- Optional parameters marked with `?`
|
||||||
|
- Use `Record<string, unknown>` for flexible object parameters
|
||||||
|
|
||||||
|
**Return Values:**
|
||||||
|
|
||||||
|
- Explicit return types required for all public functions
|
||||||
|
- Return structured objects for multiple values
|
||||||
|
- Use `Promise<T>` for async functions
|
||||||
|
- Async generators use `AsyncGenerator<T>` for streaming responses
|
||||||
|
- Never implicitly return `undefined` (explicit return or throw)
|
||||||
|
|
||||||
|
## Module Design
|
||||||
|
|
||||||
|
**Exports:**
|
||||||
|
|
||||||
|
- Default export for class instantiation: `export default class FeatureLoader {}`
|
||||||
|
- Named exports for functions: `export function createEventEmitter() {}`
|
||||||
|
- Type exports separated: `export type { Feature };`
|
||||||
|
- Barrel files (index.ts) re-export from module
|
||||||
|
|
||||||
|
**Barrel Files:**
|
||||||
|
|
||||||
|
- Used in routes: `routes/features/index.ts` creates router and exports
|
||||||
|
- Used in stores: `store/index.ts` exports all store hooks
|
||||||
|
- Pattern: group related exports for easier importing
|
||||||
|
|
||||||
|
**Service Classes:**
|
||||||
|
|
||||||
|
- Instantiated once and dependency injected
|
||||||
|
- Public methods for API surface
|
||||||
|
- Private methods prefixed with `_`
|
||||||
|
- No static methods - prefer instances or functions
|
||||||
|
- Constructor takes dependencies: `constructor(config?: ProviderConfig)`
|
||||||
|
|
||||||
|
**Provider Pattern:**
|
||||||
|
|
||||||
|
- Abstract base class: `BaseProvider` with abstract methods
|
||||||
|
- Concrete implementations: `ClaudeProvider`, `CodexProvider`, `CursorProvider`
|
||||||
|
- Common interface: `executeQuery()`, `detectInstallation()`, `validateConfig()`
|
||||||
|
- Factory for instantiation: `ProviderFactory.create()`
|
||||||
|
|
||||||
|
## TypeScript Specific
|
||||||
|
|
||||||
|
**Strict Mode:** Always enabled globally
|
||||||
|
|
||||||
|
- `strict: true` in all tsconfigs
|
||||||
|
- No implicit `any` - declare types explicitly
|
||||||
|
- No optional chaining on base types without narrowing
|
||||||
|
|
||||||
|
**Type Definitions:**
|
||||||
|
|
||||||
|
- Interface for shapes: `interface Feature { ... }`
|
||||||
|
- Type for unions/aliases: `type ModelAlias = 'haiku' | 'sonnet' | 'opus'`
|
||||||
|
- Type guards for narrowing: `if (typeof x === 'string') { ... }`
|
||||||
|
- Generic types for reusable patterns: `EventCallback<T>`
|
||||||
|
|
||||||
|
**React Specific (UI):**
|
||||||
|
|
||||||
|
- Functional components only
|
||||||
|
- React 19 with hooks
|
||||||
|
- Type props interface: `interface CardProps extends React.ComponentProps<'div'> { ... }`
|
||||||
|
- Zustand stores for state management
|
||||||
|
- Custom hooks for shared logic
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
_Convention analysis: 2026-01-27_
|
||||||
232
.planning/codebase/INTEGRATIONS.md
Normal file
232
.planning/codebase/INTEGRATIONS.md
Normal file
@@ -0,0 +1,232 @@
|
|||||||
|
# External Integrations
|
||||||
|
|
||||||
|
**Analysis Date:** 2026-01-27
|
||||||
|
|
||||||
|
## APIs & External Services
|
||||||
|
|
||||||
|
**AI/LLM Providers:**
|
||||||
|
|
||||||
|
- Claude (Anthropic)
|
||||||
|
- SDK: `@anthropic-ai/claude-agent-sdk` (0.1.76)
|
||||||
|
- Auth: `ANTHROPIC_API_KEY` environment variable or stored credentials
|
||||||
|
- Features: Extended thinking, vision/images, tools, streaming
|
||||||
|
- Implementation: `apps/server/src/providers/claude-provider.ts`
|
||||||
|
- Models: Opus 4.5, Sonnet 4, Haiku 4.5, and legacy models
|
||||||
|
- Custom endpoints: `ANTHROPIC_BASE_URL` (optional)
|
||||||
|
|
||||||
|
- GitHub Copilot
|
||||||
|
- SDK: `@github/copilot-sdk` (0.1.16)
|
||||||
|
- Auth: GitHub OAuth (via `gh` CLI) or `GITHUB_TOKEN` environment variable
|
||||||
|
- Features: Tools, streaming, runtime model discovery
|
||||||
|
- Implementation: `apps/server/src/providers/copilot-provider.ts`
|
||||||
|
- CLI detection: Searches for Copilot CLI binary
|
||||||
|
- Models: Dynamic discovery via `copilot models list`
|
||||||
|
|
||||||
|
- OpenAI Codex/GPT-4
|
||||||
|
- SDK: `@openai/codex-sdk` (0.77.0)
|
||||||
|
- Auth: `OPENAI_API_KEY` environment variable or stored credentials
|
||||||
|
- Features: Extended thinking, tools, sandbox execution
|
||||||
|
- Implementation: `apps/server/src/providers/codex-provider.ts`
|
||||||
|
- Execution modes: CLI (with sandbox) or SDK (direct API)
|
||||||
|
- Models: Dynamic discovery via Codex CLI or SDK
|
||||||
|
|
||||||
|
- Google Gemini
|
||||||
|
- Implementation: `apps/server/src/providers/gemini-provider.ts`
|
||||||
|
- Features: Vision support, tools, streaming
|
||||||
|
|
||||||
|
- OpenCode (AWS/Azure/other)
|
||||||
|
- Implementation: `apps/server/src/providers/opencode-provider.ts`
|
||||||
|
- Supports: Amazon Bedrock, Azure models, local models
|
||||||
|
- Features: Flexible provider architecture
|
||||||
|
|
||||||
|
- Cursor Editor
|
||||||
|
- Implementation: `apps/server/src/providers/cursor-provider.ts`
|
||||||
|
- Features: Integration with Cursor IDE
|
||||||
|
|
||||||
|
**Model Context Protocol (MCP):**
|
||||||
|
|
||||||
|
- SDK: `@modelcontextprotocol/sdk` (1.25.2)
|
||||||
|
- Purpose: Connect AI agents to external tools and data sources
|
||||||
|
- Implementation: `apps/server/src/services/mcp-test-service.ts`, `apps/server/src/routes/mcp/`
|
||||||
|
- Configuration: Per-project in `.automaker/` directory
|
||||||
|
|
||||||
|
## Data Storage
|
||||||
|
|
||||||
|
**Databases:**
|
||||||
|
|
||||||
|
- None - This codebase does NOT use traditional databases (SQL/NoSQL)
|
||||||
|
- All data stored as files in local filesystem
|
||||||
|
|
||||||
|
**File Storage:**
|
||||||
|
|
||||||
|
- Local filesystem only
|
||||||
|
- Locations:
|
||||||
|
- `.automaker/` - Project-specific data (features, context, settings)
|
||||||
|
- `./data/` or `DATA_DIR` env var - Global data (settings, credentials, sessions)
|
||||||
|
- Secure file operations: `@automaker/platform` exports `secureFs` for restricted file access
|
||||||
|
|
||||||
|
**Caching:**
|
||||||
|
|
||||||
|
- In-memory caches for:
|
||||||
|
- Model lists (Copilot, Codex runtime discovery)
|
||||||
|
- Feature metadata
|
||||||
|
- Project specifications
|
||||||
|
- No distributed/persistent caching system
|
||||||
|
|
||||||
|
## Authentication & Identity
|
||||||
|
|
||||||
|
**Auth Provider:**
|
||||||
|
|
||||||
|
- Custom implementation (no third-party provider)
|
||||||
|
- Authentication methods:
|
||||||
|
1. Claude Max Plan (OAuth via Anthropic CLI)
|
||||||
|
2. API Key mode (ANTHROPIC_API_KEY)
|
||||||
|
3. Custom provider profiles with API keys
|
||||||
|
4. Token-based session authentication for WebSocket
|
||||||
|
|
||||||
|
**Implementation:**
|
||||||
|
|
||||||
|
- `apps/server/src/lib/auth.ts` - Auth middleware
|
||||||
|
- `apps/server/src/routes/auth/` - Auth routes
|
||||||
|
- Session tokens for WebSocket connections
|
||||||
|
- Credential storage in `./data/credentials.json` (encrypted/protected)
|
||||||
|
|
||||||
|
## Monitoring & Observability
|
||||||
|
|
||||||
|
**Error Tracking:**
|
||||||
|
|
||||||
|
- None - No automatic error reporting service integrated
|
||||||
|
- Custom error classification: `@automaker/utils` exports `classifyError()`
|
||||||
|
- User-friendly error messages: `getUserFriendlyErrorMessage()`
|
||||||
|
|
||||||
|
**Logs:**
|
||||||
|
|
||||||
|
- Console logging with configurable levels
|
||||||
|
- Logger: `@automaker/utils` exports `createLogger()`
|
||||||
|
- Log levels: ERROR, WARN, INFO, DEBUG
|
||||||
|
- Environment: `LOG_LEVEL` env var (optional)
|
||||||
|
- Storage: Logs output to console/stdout (no persistent logging to files)
|
||||||
|
|
||||||
|
**Usage Tracking:**
|
||||||
|
|
||||||
|
- Claude API usage: `apps/server/src/services/claude-usage-service.ts`
|
||||||
|
- Codex API usage: `apps/server/src/services/codex-usage-service.ts`
|
||||||
|
- Tracks: Tokens, costs, rates
|
||||||
|
|
||||||
|
## CI/CD & Deployment
|
||||||
|
|
||||||
|
**Hosting:**
|
||||||
|
|
||||||
|
- Local development: Node.js server + Vite dev server
|
||||||
|
- Desktop: Electron application (macOS, Windows, Linux)
|
||||||
|
- Web: Express server deployed to any Node.js host
|
||||||
|
|
||||||
|
**CI Pipeline:**
|
||||||
|
|
||||||
|
- GitHub Actions likely (`.github/workflows/` present in repo)
|
||||||
|
- Testing: Playwright E2E, Vitest unit tests
|
||||||
|
- Linting: ESLint
|
||||||
|
- Formatting: Prettier
|
||||||
|
|
||||||
|
**Build Process:**
|
||||||
|
|
||||||
|
- `npm run build:packages` - Build shared packages
|
||||||
|
- `npm run build` - Build web UI
|
||||||
|
- `npm run build:electron` - Build Electron apps (platform-specific)
|
||||||
|
- Electron Builder handles code signing and distribution
|
||||||
|
|
||||||
|
## Environment Configuration
|
||||||
|
|
||||||
|
**Required env vars:**
|
||||||
|
|
||||||
|
- `ANTHROPIC_API_KEY` - For Claude provider (or provide in settings)
|
||||||
|
- `OPENAI_API_KEY` - For Codex provider (optional)
|
||||||
|
- `GITHUB_TOKEN` - For GitHub operations (optional)
|
||||||
|
|
||||||
|
**Optional env vars:**
|
||||||
|
|
||||||
|
- `PORT` - Server port (default 3008)
|
||||||
|
- `HOST` - Server bind address (default 0.0.0.0)
|
||||||
|
- `HOSTNAME` - Public hostname (default localhost)
|
||||||
|
- `DATA_DIR` - Data storage directory (default ./data)
|
||||||
|
- `ANTHROPIC_BASE_URL` - Custom Claude endpoint
|
||||||
|
- `ALLOWED_ROOT_DIRECTORY` - Restrict file operations to directory
|
||||||
|
- `AUTOMAKER_MOCK_AGENT` - Enable mock agent for testing
|
||||||
|
- `AUTOMAKER_AUTO_LOGIN` - Skip login prompt in dev
|
||||||
|
|
||||||
|
**Secrets location:**
|
||||||
|
|
||||||
|
- Runtime: Environment variables (`process.env`)
|
||||||
|
- Stored: `./data/credentials.json` (file-based)
|
||||||
|
- Retrieval: `apps/server/src/services/settings-service.ts`
|
||||||
|
|
||||||
|
## Webhooks & Callbacks
|
||||||
|
|
||||||
|
**Incoming:**
|
||||||
|
|
||||||
|
- WebSocket connections for real-time agent event streaming
|
||||||
|
- GitHub webhook routes (optional): `apps/server/src/routes/github/`
|
||||||
|
- Terminal WebSocket connections: `apps/server/src/routes/terminal/`
|
||||||
|
|
||||||
|
**Outgoing:**
|
||||||
|
|
||||||
|
- GitHub PRs: `apps/server/src/routes/worktree/routes/create-pr.ts`
|
||||||
|
- Git operations: `@automaker/git-utils` handles commits, pushes
|
||||||
|
- Terminal output streaming via WebSocket to clients
|
||||||
|
- Event hooks: `apps/server/src/services/event-hook-service.ts`
|
||||||
|
|
||||||
|
## Credential Management
|
||||||
|
|
||||||
|
**API Keys Storage:**
|
||||||
|
|
||||||
|
- File: `./data/credentials.json`
|
||||||
|
- Format: JSON with nested structure for different providers
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"apiKeys": {
|
||||||
|
"anthropic": "sk-...",
|
||||||
|
"openai": "sk-...",
|
||||||
|
"github": "ghp_..."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
- Access: `SettingsService.getCredentials()` from `apps/server/src/services/settings-service.ts`
|
||||||
|
- Security: File permissions should restrict to current user only
|
||||||
|
|
||||||
|
**Profile/Provider Configuration:**
|
||||||
|
|
||||||
|
- File: `./data/settings.json` (global) or `.automaker/settings.json` (per-project)
|
||||||
|
- Stores: Alternative provider profiles, model mappings, sandbox settings
|
||||||
|
- Types: `ClaudeApiProfile`, `ClaudeCompatibleProvider` from `@automaker/types`
|
||||||
|
|
||||||
|
## Third-Party Service Integration Points
|
||||||
|
|
||||||
|
**Git/GitHub:**
|
||||||
|
|
||||||
|
- `@automaker/git-utils` - Git operations (worktrees, commits, diffs)
|
||||||
|
- Codex/Cursor providers can create GitHub PRs
|
||||||
|
- GitHub CLI (`gh`) detection for Copilot authentication
|
||||||
|
|
||||||
|
**Terminal Access:**
|
||||||
|
|
||||||
|
- `node-pty` (1.1.0-beta41) - Pseudo-terminal interface
|
||||||
|
- `TerminalService` manages terminal sessions
|
||||||
|
- WebSocket streaming to frontend
|
||||||
|
|
||||||
|
**AI Models - Multi-Provider Abstraction:**
|
||||||
|
|
||||||
|
- `BaseProvider` interface: `apps/server/src/providers/base-provider.ts`
|
||||||
|
- Factory pattern: `apps/server/src/providers/provider-factory.ts`
|
||||||
|
- Allows swapping providers without changing agent logic
|
||||||
|
- All providers implement: `executeQuery()`, `detectInstallation()`, `getAvailableModels()`
|
||||||
|
|
||||||
|
**Process Spawning:**
|
||||||
|
|
||||||
|
- `@automaker/platform` exports `spawnProcess()`, `spawnJSONLProcess()`
|
||||||
|
- Codex CLI execution: JSONL output parsing
|
||||||
|
- Copilot CLI execution: Subprocess management
|
||||||
|
- Cursor IDE interaction: Process spawning for tool execution
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
_Integration audit: 2026-01-27_
|
||||||
230
.planning/codebase/STACK.md
Normal file
230
.planning/codebase/STACK.md
Normal file
@@ -0,0 +1,230 @@
|
|||||||
|
# Technology Stack
|
||||||
|
|
||||||
|
**Analysis Date:** 2026-01-27
|
||||||
|
|
||||||
|
## Languages
|
||||||
|
|
||||||
|
**Primary:**
|
||||||
|
|
||||||
|
- TypeScript 5.9.3 - Used across all packages, apps, and configuration
|
||||||
|
- JavaScript (Node.js) - Runtime execution for scripts and tooling
|
||||||
|
|
||||||
|
**Secondary:**
|
||||||
|
|
||||||
|
- YAML 2.7.0 - Configuration files
|
||||||
|
- CSS/Tailwind CSS 4.1.18 - Frontend styling
|
||||||
|
|
||||||
|
## Runtime
|
||||||
|
|
||||||
|
**Environment:**
|
||||||
|
|
||||||
|
- Node.js 22.x (>=22.0.0 <23.0.0) - Required version, specified in `.nvmrc`
|
||||||
|
|
||||||
|
**Package Manager:**
|
||||||
|
|
||||||
|
- npm - Monorepo workspace management via npm workspaces
|
||||||
|
- Lockfile: `package-lock.json` (present)
|
||||||
|
|
||||||
|
## Frameworks
|
||||||
|
|
||||||
|
**Core - Frontend:**
|
||||||
|
|
||||||
|
- React 19.2.3 - UI framework with hooks and concurrent features
|
||||||
|
- Vite 7.3.0 - Build tool and dev server (`apps/ui/vite.config.ts`)
|
||||||
|
- Electron 39.2.7 - Desktop application runtime (`apps/ui/package.json`)
|
||||||
|
- TanStack Router 1.141.6 - File-based routing (React)
|
||||||
|
- Zustand 5.0.9 - State management (lightweight alternative to Redux)
|
||||||
|
- TanStack Query (React Query) 5.90.17 - Server state management
|
||||||
|
|
||||||
|
**Core - Backend:**
|
||||||
|
|
||||||
|
- Express 5.2.1 - HTTP server framework (`apps/server/package.json`)
|
||||||
|
- WebSocket (ws) 8.18.3 - Real-time bidirectional communication
|
||||||
|
- Claude Agent SDK (@anthropic-ai/claude-agent-sdk) 0.1.76 - AI provider integration
|
||||||
|
|
||||||
|
**Testing:**
|
||||||
|
|
||||||
|
- Playwright 1.57.0 - End-to-end testing (`apps/ui` E2E tests)
|
||||||
|
- Vitest 4.0.16 - Unit testing framework (runs on all packages and server)
|
||||||
|
- @vitest/ui 4.0.16 - Visual test runner UI
|
||||||
|
- @vitest/coverage-v8 4.0.16 - Code coverage reporting
|
||||||
|
|
||||||
|
**Build/Dev:**
|
||||||
|
|
||||||
|
- electron-builder 26.0.12 - Electron app packaging and distribution
|
||||||
|
- @vitejs/plugin-react 5.1.2 - Vite React support
|
||||||
|
- vite-plugin-electron 0.29.0 - Vite plugin for Electron main process
|
||||||
|
- vite-plugin-electron-renderer 0.14.6 - Vite plugin for Electron renderer
|
||||||
|
- ESLint 9.39.2 - Code linting (`apps/ui`)
|
||||||
|
- @typescript-eslint/eslint-plugin 8.50.0 - TypeScript ESLint rules
|
||||||
|
- Prettier 3.7.4 - Code formatting (root-level config)
|
||||||
|
- Tailwind CSS 4.1.18 - Utility-first CSS framework
|
||||||
|
- @tailwindcss/vite 4.1.18 - Tailwind Vite integration
|
||||||
|
|
||||||
|
**UI Components & Libraries:**
|
||||||
|
|
||||||
|
- Radix UI - Unstyled accessible component library (@radix-ui packages)
|
||||||
|
- react-dropdown-menu 2.1.16
|
||||||
|
- react-dialog 1.1.15
|
||||||
|
- react-select 2.2.6
|
||||||
|
- react-tooltip 1.2.8
|
||||||
|
- react-tabs 1.1.13
|
||||||
|
- react-collapsible 1.1.12
|
||||||
|
- react-checkbox 1.3.3
|
||||||
|
- react-radio-group 1.3.8
|
||||||
|
- react-popover 1.1.15
|
||||||
|
- react-slider 1.3.6
|
||||||
|
- react-switch 1.2.6
|
||||||
|
- react-scroll-area 1.2.10
|
||||||
|
- react-label 2.1.8
|
||||||
|
- Lucide React 0.562.0 - Icon library
|
||||||
|
- Geist 1.5.1 - Design system UI library
|
||||||
|
- Sonner 2.0.7 - Toast notifications
|
||||||
|
|
||||||
|
**Code Editor & Terminal:**
|
||||||
|
|
||||||
|
- @uiw/react-codemirror 4.25.4 - Code editor React component
|
||||||
|
- CodeMirror (@codemirror packages) 6.x - Editor toolkit
|
||||||
|
- xterm.js (@xterm/xterm) 5.5.0 - Terminal emulator
|
||||||
|
- @xterm/addon-fit 0.10.0 - Fit addon for terminal
|
||||||
|
- @xterm/addon-search 0.15.0 - Search addon for terminal
|
||||||
|
- @xterm/addon-web-links 0.11.0 - Web links addon
|
||||||
|
- @xterm/addon-webgl 0.18.0 - WebGL renderer for terminal
|
||||||
|
|
||||||
|
**Diagram/Graph Visualization:**
|
||||||
|
|
||||||
|
- @xyflow/react 12.10.0 - React flow diagram library
|
||||||
|
- dagre 0.8.5 - Graph layout algorithms
|
||||||
|
|
||||||
|
**Markdown/Content Rendering:**
|
||||||
|
|
||||||
|
- react-markdown 10.1.0 - Markdown parser and renderer
|
||||||
|
- remark-gfm 4.0.1 - GitHub Flavored Markdown support
|
||||||
|
- rehype-raw 7.0.0 - Raw HTML support in markdown
|
||||||
|
- rehype-sanitize 6.0.0 - HTML sanitization
|
||||||
|
|
||||||
|
**Data Validation & Parsing:**
|
||||||
|
|
||||||
|
- zod 3.24.1 or 4.0.0 - Schema validation and TypeScript type inference
|
||||||
|
|
||||||
|
**Utilities:**
|
||||||
|
|
||||||
|
- class-variance-authority 0.7.1 - CSS variant utilities
|
||||||
|
- clsx 2.1.1 - Conditional className utility
|
||||||
|
- cmdk 1.1.1 - Command menu/palette
|
||||||
|
- tailwind-merge 3.4.0 - Tailwind CSS conflict resolution
|
||||||
|
- usehooks-ts 3.1.1 - TypeScript React hooks
|
||||||
|
- @dnd-kit (drag-and-drop) 6.3.1 - Drag and drop library
|
||||||
|
|
||||||
|
**Font Libraries:**
|
||||||
|
|
||||||
|
- @fontsource - Web font packages (Cascadia Code, Fira Code, IBM Plex, Inconsolata, Inter, etc.)
|
||||||
|
|
||||||
|
**Development Utilities:**
|
||||||
|
|
||||||
|
- cross-spawn 7.0.6 - Cross-platform process spawning
|
||||||
|
- dotenv 17.2.3 - Environment variable loading
|
||||||
|
- tsx 4.21.0 - TypeScript execution for Node.js
|
||||||
|
- tree-kill 1.2.2 - Process tree killer utility
|
||||||
|
- node-pty 1.1.0-beta41 - PTY/terminal interface for Node.js
|
||||||
|
|
||||||
|
## Key Dependencies
|
||||||
|
|
||||||
|
**Critical - AI/Agent Integration:**
|
||||||
|
|
||||||
|
- @anthropic-ai/claude-agent-sdk 0.1.76 - Core Claude AI provider
|
||||||
|
- @github/copilot-sdk 0.1.16 - GitHub Copilot integration
|
||||||
|
- @openai/codex-sdk 0.77.0 - OpenAI Codex/GPT-4 integration
|
||||||
|
- @modelcontextprotocol/sdk 1.25.2 - Model Context Protocol servers
|
||||||
|
|
||||||
|
**Infrastructure - Internal Packages:**
|
||||||
|
|
||||||
|
- @automaker/types 1.0.0 - Shared TypeScript type definitions
|
||||||
|
- @automaker/utils 1.0.0 - Logging, error handling, utilities
|
||||||
|
- @automaker/platform 1.0.0 - Path management, security, process spawning
|
||||||
|
- @automaker/prompts 1.0.0 - AI prompt templates
|
||||||
|
- @automaker/model-resolver 1.0.0 - Claude model alias resolution
|
||||||
|
- @automaker/dependency-resolver 1.0.0 - Feature dependency ordering
|
||||||
|
- @automaker/git-utils 1.0.0 - Git operations & worktree management
|
||||||
|
- @automaker/spec-parser 1.0.0 - Project specification parsing
|
||||||
|
|
||||||
|
**Server Utilities:**
|
||||||
|
|
||||||
|
- express 5.2.1 - Web framework
|
||||||
|
- cors 2.8.5 - CORS middleware
|
||||||
|
- morgan 1.10.1 - HTTP request logger
|
||||||
|
- cookie-parser 1.4.7 - Cookie parsing middleware
|
||||||
|
- yaml 2.7.0 - YAML parsing and generation
|
||||||
|
|
||||||
|
**Type Definitions:**
|
||||||
|
|
||||||
|
- @types/express 5.0.6
|
||||||
|
- @types/node 22.19.3
|
||||||
|
- @types/react 19.2.7
|
||||||
|
- @types/react-dom 19.2.3
|
||||||
|
- @types/dagre 0.7.53
|
||||||
|
- @types/ws 8.18.1
|
||||||
|
- @types/cookie 0.6.0
|
||||||
|
- @types/cookie-parser 1.4.10
|
||||||
|
- @types/cors 2.8.19
|
||||||
|
- @types/morgan 1.9.10
|
||||||
|
|
||||||
|
**Optional Dependencies (Platform-specific):**
|
||||||
|
|
||||||
|
- lightningcss (various platforms) 1.29.2 - CSS parser (alternate to PostCSS)
|
||||||
|
- dmg-license 1.0.11 - DMG license dialog for macOS
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
**Environment:**
|
||||||
|
|
||||||
|
- `.env` and `.env.example` files in `apps/server/` and `apps/ui/`
|
||||||
|
- `dotenv` library loads variables from `.env` files
|
||||||
|
- Key env vars:
|
||||||
|
- `ANTHROPIC_API_KEY` - Claude API authentication
|
||||||
|
- `OPENAI_API_KEY` - OpenAI/Codex authentication
|
||||||
|
- `GITHUB_TOKEN` - GitHub API access
|
||||||
|
- `ANTHROPIC_BASE_URL` - Custom Claude endpoint (optional)
|
||||||
|
- `HOST` - Server bind address (default: 0.0.0.0)
|
||||||
|
- `HOSTNAME` - Hostname for URLs (default: localhost)
|
||||||
|
- `PORT` - Server port (default: 3008)
|
||||||
|
- `DATA_DIR` - Data storage directory (default: ./data)
|
||||||
|
- `ALLOWED_ROOT_DIRECTORY` - Restrict file operations
|
||||||
|
- `AUTOMAKER_MOCK_AGENT` - Enable mock agent for testing
|
||||||
|
- `AUTOMAKER_AUTO_LOGIN` - Skip login in dev (disabled in production)
|
||||||
|
- `VITE_HOSTNAME` - Frontend API hostname
|
||||||
|
|
||||||
|
**Build:**
|
||||||
|
|
||||||
|
- `apps/ui/electron-builder.config.json` or `apps/ui/package.json` build config
|
||||||
|
- Electron builder targets:
|
||||||
|
- macOS: DMG and ZIP
|
||||||
|
- Windows: NSIS installer
|
||||||
|
- Linux: AppImage, DEB, RPM
|
||||||
|
- Vite config: `apps/ui/vite.config.ts`, `apps/server/tsconfig.json`
|
||||||
|
- TypeScript config: `tsconfig.json` files in each package
|
||||||
|
|
||||||
|
## Platform Requirements
|
||||||
|
|
||||||
|
**Development:**
|
||||||
|
|
||||||
|
- Node.js 22.x
|
||||||
|
- npm (included with Node.js)
|
||||||
|
- Git (for worktree operations)
|
||||||
|
- Python (optional, for some dev scripts)
|
||||||
|
|
||||||
|
**Production:**
|
||||||
|
|
||||||
|
- Electron desktop app: Windows, macOS, Linux
|
||||||
|
- Web browser: Modern Chromium-based browsers
|
||||||
|
- Server: Any platform supporting Node.js 22.x
|
||||||
|
|
||||||
|
**Deployment Target:**
|
||||||
|
|
||||||
|
- Local desktop (Electron)
|
||||||
|
- Local web server (Express + Vite)
|
||||||
|
- Remote server deployment (Docker, systemd, or other orchestration)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
_Stack analysis: 2026-01-27_
|
||||||
340
.planning/codebase/STRUCTURE.md
Normal file
340
.planning/codebase/STRUCTURE.md
Normal file
@@ -0,0 +1,340 @@
|
|||||||
|
# Codebase Structure
|
||||||
|
|
||||||
|
**Analysis Date:** 2026-01-27
|
||||||
|
|
||||||
|
## Directory Layout
|
||||||
|
|
||||||
|
```
|
||||||
|
automaker/
|
||||||
|
├── apps/ # Application packages
|
||||||
|
│ ├── ui/ # React + Electron frontend (port 3007)
|
||||||
|
│ │ ├── src/
|
||||||
|
│ │ │ ├── main.ts # Electron/Vite entry point
|
||||||
|
│ │ │ ├── app.tsx # Root React component (splash, router)
|
||||||
|
│ │ │ ├── renderer.tsx # Electron renderer entry
|
||||||
|
│ │ │ ├── routes/ # TanStack Router file-based routes
|
||||||
|
│ │ │ ├── components/ # React components (views, dialogs, UI, layout)
|
||||||
|
│ │ │ ├── store/ # Zustand state management
|
||||||
|
│ │ │ ├── hooks/ # Custom React hooks
|
||||||
|
│ │ │ ├── lib/ # Utilities (API client, electron, queries, etc.)
|
||||||
|
│ │ │ ├── electron/ # Electron main & preload process files
|
||||||
|
│ │ │ ├── config/ # UI configuration (fonts, themes, routes)
|
||||||
|
│ │ │ └── styles/ # CSS and theme files
|
||||||
|
│ │ ├── public/ # Static assets
|
||||||
|
│ │ └── tests/ # E2E Playwright tests
|
||||||
|
│ │
|
||||||
|
│ └── server/ # Express backend (port 3008)
|
||||||
|
│ ├── src/
|
||||||
|
│ │ ├── index.ts # Express app initialization, route mounting
|
||||||
|
│ │ ├── routes/ # REST API endpoints (30+ route folders)
|
||||||
|
│ │ ├── services/ # Business logic services
|
||||||
|
│ │ ├── providers/ # AI model provider implementations
|
||||||
|
│ │ ├── lib/ # Utilities (events, auth, helpers, etc.)
|
||||||
|
│ │ ├── middleware/ # Express middleware
|
||||||
|
│ │ └── types/ # Server-specific type definitions
|
||||||
|
│ └── tests/ # Unit tests (Vitest)
|
||||||
|
│
|
||||||
|
├── libs/ # Shared npm packages (@automaker/*)
|
||||||
|
│ ├── types/ # @automaker/types (no dependencies)
|
||||||
|
│ │ └── src/
|
||||||
|
│ │ ├── index.ts # Main export with all type definitions
|
||||||
|
│ │ ├── feature.ts # Feature, FeatureStatus, etc.
|
||||||
|
│ │ ├── provider.ts # Provider interfaces, model definitions
|
||||||
|
│ │ ├── settings.ts # Global and project settings types
|
||||||
|
│ │ ├── event.ts # Event types for real-time updates
|
||||||
|
│ │ ├── session.ts # AgentSession, conversation types
|
||||||
|
│ │ ├── model*.ts # Model-specific types (cursor, codex, gemini, etc.)
|
||||||
|
│ │ └── ... 20+ more type files
|
||||||
|
│ │
|
||||||
|
│ ├── utils/ # @automaker/utils (logging, errors, images, context)
|
||||||
|
│ │ └── src/
|
||||||
|
│ │ ├── logger.ts # createLogger() with LogLevel enum
|
||||||
|
│ │ ├── errors.ts # classifyError(), error types
|
||||||
|
│ │ ├── image-utils.ts # Image processing, base64 encoding
|
||||||
|
│ │ ├── context-loader.ts # loadContextFiles() for AI prompts
|
||||||
|
│ │ └── ... more utilities
|
||||||
|
│ │
|
||||||
|
│ ├── platform/ # @automaker/platform (paths, security, OS)
|
||||||
|
│ │ └── src/
|
||||||
|
│ │ ├── index.ts # Path getters (getFeatureDir, getFeaturesDir, etc.)
|
||||||
|
│ │ ├── secure-fs.ts # Secure filesystem operations
|
||||||
|
│ │ └── config/ # Claude auth detection, allowed paths
|
||||||
|
│ │
|
||||||
|
│ ├── prompts/ # @automaker/prompts (AI prompt templates)
|
||||||
|
│ │ └── src/
|
||||||
|
│ │ ├── index.ts # Main prompts export
|
||||||
|
│ │ └── *-prompt.ts # Prompt templates for different features
|
||||||
|
│ │
|
||||||
|
│ ├── model-resolver/ # @automaker/model-resolver
|
||||||
|
│ │ └── src/
|
||||||
|
│ │ └── index.ts # resolveModelString() for model aliases
|
||||||
|
│ │
|
||||||
|
│ ├── dependency-resolver/ # @automaker/dependency-resolver
|
||||||
|
│ │ └── src/
|
||||||
|
│ │ └── index.ts # Resolve feature dependencies
|
||||||
|
│ │
|
||||||
|
│ ├── git-utils/ # @automaker/git-utils (git operations)
|
||||||
|
│ │ └── src/
|
||||||
|
│ │ ├── index.ts # getGitRepositoryDiffs(), worktree management
|
||||||
|
│ │ └── ... git helpers
|
||||||
|
│ │
|
||||||
|
│ ├── spec-parser/ # @automaker/spec-parser
|
||||||
|
│ │ └── src/
|
||||||
|
│ │ └── ... spec parsing utilities
|
||||||
|
│ │
|
||||||
|
│ └── tsconfig.base.json # Base TypeScript config for all packages
|
||||||
|
│
|
||||||
|
├── .automaker/ # Project data directory (created by app)
|
||||||
|
│ ├── features/ # Feature storage
|
||||||
|
│ │ └── {featureId}/
|
||||||
|
│ │ ├── feature.json # Feature metadata and content
|
||||||
|
│ │ ├── agent-output.md # Agent execution results
|
||||||
|
│ │ └── images/ # Feature images
|
||||||
|
│ ├── context/ # Context files (CLAUDE.md, etc.)
|
||||||
|
│ ├── settings.json # Per-project settings
|
||||||
|
│ ├── spec.md # Project specification
|
||||||
|
│ └── analysis.json # Project structure analysis
|
||||||
|
│
|
||||||
|
├── data/ # Global data directory (default, configurable)
|
||||||
|
│ ├── settings.json # Global settings, profiles
|
||||||
|
│ ├── credentials.json # Encrypted API keys
|
||||||
|
│ ├── sessions-metadata.json # Chat session metadata
|
||||||
|
│ └── agent-sessions/ # Conversation histories
|
||||||
|
│
|
||||||
|
├── .planning/ # Generated documentation by GSD orchestrator
|
||||||
|
│ └── codebase/ # Codebase analysis documents
|
||||||
|
│ ├── ARCHITECTURE.md # Architecture patterns and layers
|
||||||
|
│ ├── STRUCTURE.md # This file
|
||||||
|
│ ├── STACK.md # Technology stack
|
||||||
|
│ ├── INTEGRATIONS.md # External API integrations
|
||||||
|
│ ├── CONVENTIONS.md # Code style and naming
|
||||||
|
│ ├── TESTING.md # Testing patterns
|
||||||
|
│ └── CONCERNS.md # Technical debt and issues
|
||||||
|
│
|
||||||
|
├── .github/ # GitHub Actions workflows
|
||||||
|
├── scripts/ # Build and utility scripts
|
||||||
|
├── tests/ # Test data and utilities
|
||||||
|
├── docs/ # Documentation
|
||||||
|
├── package.json # Root workspace config
|
||||||
|
├── package-lock.json # Lock file
|
||||||
|
├── CLAUDE.md # Project instructions for Claude Code
|
||||||
|
├── DEVELOPMENT_WORKFLOW.md # Development guidelines
|
||||||
|
└── README.md # Project overview
|
||||||
|
```
|
||||||
|
|
||||||
|
## Directory Purposes
|
||||||
|
|
||||||
|
**apps/ui/:**
|
||||||
|
|
||||||
|
- Purpose: React frontend for desktop (Electron) and web modes
|
||||||
|
- Build system: Vite 7 with TypeScript
|
||||||
|
- Styling: Tailwind CSS 4
|
||||||
|
- State: Zustand 5 with API persistence
|
||||||
|
- Routing: TanStack Router with file-based structure
|
||||||
|
- Desktop: Electron 39 with preload IPC bridge
|
||||||
|
|
||||||
|
**apps/server/:**
|
||||||
|
|
||||||
|
- Purpose: Express backend API and service layer
|
||||||
|
- Build system: TypeScript → JavaScript
|
||||||
|
- Runtime: Node.js 18+
|
||||||
|
- WebSocket: ws library for real-time streaming
|
||||||
|
- Process management: node-pty for terminal isolation
|
||||||
|
|
||||||
|
**libs/types/:**
|
||||||
|
|
||||||
|
- Purpose: Central type definitions (no dependencies, fast import)
|
||||||
|
- Used by: All other packages and apps
|
||||||
|
- Pattern: Single namespace export from index.ts
|
||||||
|
- Build: Compiled to ESM only
|
||||||
|
|
||||||
|
**libs/utils/:**
|
||||||
|
|
||||||
|
- Purpose: Shared utilities for logging, errors, file operations, image processing
|
||||||
|
- Used by: Server, UI, other libraries
|
||||||
|
- Notable: `createLogger()`, `classifyError()`, `loadContextFiles()`, `readImageAsBase64()`
|
||||||
|
|
||||||
|
**libs/platform/:**
|
||||||
|
|
||||||
|
- Purpose: OS-agnostic path management and security enforcement
|
||||||
|
- Used by: Server services for file operations
|
||||||
|
- Notable: Path normalization, allowed directory enforcement, Claude auth detection
|
||||||
|
|
||||||
|
**libs/prompts/:**
|
||||||
|
|
||||||
|
- Purpose: AI prompt templates injected into agent context
|
||||||
|
- Used by: AgentService when executing features
|
||||||
|
- Pattern: Function exports that return prompt strings
|
||||||
|
|
||||||
|
## Key File Locations
|
||||||
|
|
||||||
|
**Entry Points:**
|
||||||
|
|
||||||
|
**Server:**
|
||||||
|
|
||||||
|
- `apps/server/src/index.ts`: Express server initialization, route mounting, WebSocket setup
|
||||||
|
|
||||||
|
**UI (Web):**
|
||||||
|
|
||||||
|
- `apps/ui/src/main.ts`: Vite entry point
|
||||||
|
- `apps/ui/src/app.tsx`: Root React component
|
||||||
|
|
||||||
|
**UI (Electron):**
|
||||||
|
|
||||||
|
- `apps/ui/src/main.ts`: Vite entry point
|
||||||
|
- `apps/ui/src/electron/main-process.ts`: Electron main process
|
||||||
|
- `apps/ui/src/preload.ts`: Electron preload script for IPC bridge
|
||||||
|
|
||||||
|
**Configuration:**
|
||||||
|
|
||||||
|
- `apps/server/src/index.ts`: PORT, HOST, HOSTNAME, DATA_DIR env vars
|
||||||
|
- `apps/ui/src/config/`: Theme options, fonts, model aliases
|
||||||
|
- `libs/types/src/settings.ts`: Settings schema
|
||||||
|
- `.env.local`: Local development overrides (git-ignored)
|
||||||
|
|
||||||
|
**Core Logic:**
|
||||||
|
|
||||||
|
**Server:**
|
||||||
|
|
||||||
|
- `apps/server/src/services/agent-service.ts`: AI agent execution engine (31KB)
|
||||||
|
- `apps/server/src/services/auto-mode-service.ts`: Feature batching and automation (216KB - largest)
|
||||||
|
- `apps/server/src/services/feature-loader.ts`: Feature persistence and loading
|
||||||
|
- `apps/server/src/services/settings-service.ts`: Settings management
|
||||||
|
- `apps/server/src/providers/provider-factory.ts`: AI provider selection
|
||||||
|
|
||||||
|
**UI:**
|
||||||
|
|
||||||
|
- `apps/ui/src/store/app-store.ts`: Global state (84KB - largest frontend file)
|
||||||
|
- `apps/ui/src/lib/http-api-client.ts`: API client with auth (92KB)
|
||||||
|
- `apps/ui/src/components/views/board-view.tsx`: Kanban board (70KB)
|
||||||
|
- `apps/ui/src/routes/__root.tsx`: Root layout with session init (32KB)
|
||||||
|
|
||||||
|
**Testing:**
|
||||||
|
|
||||||
|
**E2E Tests:**
|
||||||
|
|
||||||
|
- `apps/ui/tests/`: Playwright tests organized by feature area
|
||||||
|
- `settings/`, `features/`, `projects/`, `agent/`, `utils/`, `context/`
|
||||||
|
|
||||||
|
**Unit Tests:**
|
||||||
|
|
||||||
|
- `libs/*/tests/`: Package-specific Vitest tests
|
||||||
|
- `apps/server/src/tests/`: Server integration tests
|
||||||
|
|
||||||
|
**Test Config:**
|
||||||
|
|
||||||
|
- `vitest.config.ts`: Root Vitest configuration
|
||||||
|
- `apps/ui/playwright.config.ts`: Playwright configuration
|
||||||
|
|
||||||
|
## Naming Conventions
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
|
||||||
|
- **Components:** PascalCase.tsx (e.g., `board-view.tsx`, `session-manager.tsx`)
|
||||||
|
- **Services:** camelCase-service.ts (e.g., `agent-service.ts`, `settings-service.ts`)
|
||||||
|
- **Hooks:** use-kebab-case.ts (e.g., `use-auto-mode.ts`, `use-settings-sync.ts`)
|
||||||
|
- **Utilities:** camelCase.ts (e.g., `api-fetch.ts`, `log-parser.ts`)
|
||||||
|
- **Routes:** kebab-case with index.ts pattern (e.g., `routes/agent/index.ts`)
|
||||||
|
- **Tests:** _.test.ts or _.spec.ts (co-located with source)
|
||||||
|
|
||||||
|
**Directories:**
|
||||||
|
|
||||||
|
- **Feature domains:** kebab-case (e.g., `auto-mode/`, `event-history/`, `project-settings-view/`)
|
||||||
|
- **Type categories:** kebab-case plural (e.g., `types/`, `services/`, `providers/`, `routes/`)
|
||||||
|
- **Shared utilities:** kebab-case (e.g., `lib/`, `utils/`, `hooks/`)
|
||||||
|
|
||||||
|
**TypeScript:**
|
||||||
|
|
||||||
|
- **Types:** PascalCase (e.g., `Feature`, `AgentSession`, `ProviderMessage`)
|
||||||
|
- **Interfaces:** PascalCase (e.g., `EventEmitter`, `ProviderFactory`)
|
||||||
|
- **Enums:** PascalCase (e.g., `LogLevel`, `FeatureStatus`)
|
||||||
|
- **Functions:** camelCase (e.g., `createLogger()`, `classifyError()`)
|
||||||
|
- **Constants:** UPPER_SNAKE_CASE (e.g., `DEFAULT_TIMEOUT_MS`, `MAX_RETRIES`)
|
||||||
|
- **Variables:** camelCase (e.g., `featureId`, `settingsService`)
|
||||||
|
|
||||||
|
## Where to Add New Code
|
||||||
|
|
||||||
|
**New Feature (end-to-end):**
|
||||||
|
|
||||||
|
- API Route: `apps/server/src/routes/{feature-name}/index.ts`
|
||||||
|
- Service Logic: `apps/server/src/services/{feature-name}-service.ts`
|
||||||
|
- UI Route: `apps/ui/src/routes/{feature-name}.tsx` (simple) or `{feature-name}/` (complex with subdir)
|
||||||
|
- Store: `apps/ui/src/store/{feature-name}-store.ts` (if complex state)
|
||||||
|
- Tests: `apps/ui/tests/{feature-name}/` or `apps/server/src/tests/`
|
||||||
|
|
||||||
|
**New Component/Module:**
|
||||||
|
|
||||||
|
- View Components: `apps/ui/src/components/views/{component-name}/`
|
||||||
|
- Dialog Components: `apps/ui/src/components/dialogs/{dialog-name}.tsx`
|
||||||
|
- Shared Components: `apps/ui/src/components/shared/` or `components/ui/` (shadcn)
|
||||||
|
- Layout Components: `apps/ui/src/components/layout/`
|
||||||
|
|
||||||
|
**Utilities:**
|
||||||
|
|
||||||
|
- New Library: Create in `libs/{package-name}/` with package.json and tsconfig.json
|
||||||
|
- Server Utilities: `apps/server/src/lib/{utility-name}.ts`
|
||||||
|
- Shared Utilities: Extend `libs/utils/src/` or create new lib if self-contained
|
||||||
|
- UI Utilities: `apps/ui/src/lib/{utility-name}.ts`
|
||||||
|
|
||||||
|
**New Provider (AI Model):**
|
||||||
|
|
||||||
|
- Implementation: `apps/server/src/providers/{provider-name}-provider.ts`
|
||||||
|
- Types: Add to `libs/types/src/{provider-name}-models.ts`
|
||||||
|
- Model Resolver: Update `libs/model-resolver/src/index.ts` with model alias mapping
|
||||||
|
- Settings: Update `libs/types/src/settings.ts` for provider-specific config
|
||||||
|
|
||||||
|
## Special Directories
|
||||||
|
|
||||||
|
**apps/ui/electron/:**
|
||||||
|
|
||||||
|
- Purpose: Electron-specific code (main process, IPC handlers, native APIs)
|
||||||
|
- Generated: Yes (preload.ts)
|
||||||
|
- Committed: Yes
|
||||||
|
|
||||||
|
**apps/ui/public/**
|
||||||
|
|
||||||
|
- Purpose: Static assets (sounds, images, icons)
|
||||||
|
- Generated: No
|
||||||
|
- Committed: Yes
|
||||||
|
|
||||||
|
**apps/ui/dist/:**
|
||||||
|
|
||||||
|
- Purpose: Built web application
|
||||||
|
- Generated: Yes
|
||||||
|
- Committed: No (.gitignore)
|
||||||
|
|
||||||
|
**apps/ui/dist-electron/:**
|
||||||
|
|
||||||
|
- Purpose: Built Electron app bundle
|
||||||
|
- Generated: Yes
|
||||||
|
- Committed: No (.gitignore)
|
||||||
|
|
||||||
|
**.automaker/features/{featureId}/:**
|
||||||
|
|
||||||
|
- Purpose: Per-feature persistent storage
|
||||||
|
- Structure: feature.json, agent-output.md, images/
|
||||||
|
- Generated: Yes (at runtime)
|
||||||
|
- Committed: Yes (tracked in project git)
|
||||||
|
|
||||||
|
**data/:**
|
||||||
|
|
||||||
|
- Purpose: Global data directory (global settings, credentials, sessions)
|
||||||
|
- Generated: Yes (created at first run)
|
||||||
|
- Committed: No (.gitignore)
|
||||||
|
- Configurable: Via DATA_DIR env var
|
||||||
|
|
||||||
|
**node_modules/:**
|
||||||
|
|
||||||
|
- Purpose: Installed dependencies
|
||||||
|
- Generated: Yes
|
||||||
|
- Committed: No (.gitignore)
|
||||||
|
|
||||||
|
**dist/**, **build/:**
|
||||||
|
|
||||||
|
- Purpose: Build artifacts
|
||||||
|
- Generated: Yes
|
||||||
|
- Committed: No (.gitignore)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
_Structure analysis: 2026-01-27_
|
||||||
389
.planning/codebase/TESTING.md
Normal file
389
.planning/codebase/TESTING.md
Normal file
@@ -0,0 +1,389 @@
|
|||||||
|
# Testing Patterns
|
||||||
|
|
||||||
|
**Analysis Date:** 2026-01-27
|
||||||
|
|
||||||
|
## Test Framework
|
||||||
|
|
||||||
|
**Runner:**
|
||||||
|
|
||||||
|
- Vitest 4.0.16 (for unit and integration tests)
|
||||||
|
- Playwright (for E2E tests)
|
||||||
|
- Config: `apps/server/vitest.config.ts`, `libs/*/vitest.config.ts`, `apps/ui/playwright.config.ts`
|
||||||
|
|
||||||
|
**Assertion Library:**
|
||||||
|
|
||||||
|
- Vitest built-in expect assertions
|
||||||
|
- API: `expect().toBe()`, `expect().toEqual()`, `expect().toHaveLength()`, `expect().toHaveProperty()`
|
||||||
|
|
||||||
|
**Run Commands:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run test # E2E tests (Playwright, headless)
|
||||||
|
npm run test:headed # E2E tests with browser visible
|
||||||
|
npm run test:packages # All shared package unit tests (vitest)
|
||||||
|
npm run test:server # Server unit tests (vitest run)
|
||||||
|
npm run test:server:coverage # Server tests with coverage report
|
||||||
|
npm run test:all # All tests (packages + server)
|
||||||
|
npm run test:unit # Vitest run (all projects)
|
||||||
|
npm run test:unit:watch # Vitest watch mode
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test File Organization
|
||||||
|
|
||||||
|
**Location:**
|
||||||
|
|
||||||
|
- Co-located with source: `src/module.ts` has `tests/unit/module.test.ts`
|
||||||
|
- Server tests: `apps/server/tests/` (separate directory)
|
||||||
|
- Library tests: `libs/*/tests/` (each package)
|
||||||
|
- E2E tests: `apps/ui/tests/` (Playwright)
|
||||||
|
|
||||||
|
**Naming:**
|
||||||
|
|
||||||
|
- Pattern: `{moduleName}.test.ts` for unit tests
|
||||||
|
- Pattern: `{moduleName}.spec.ts` for specification tests
|
||||||
|
- Glob pattern: `tests/**/*.test.ts`, `tests/**/*.spec.ts`
|
||||||
|
|
||||||
|
**Structure:**
|
||||||
|
|
||||||
|
```
|
||||||
|
apps/server/
|
||||||
|
├── tests/
|
||||||
|
│ ├── setup.ts # Global test setup
|
||||||
|
│ ├── unit/
|
||||||
|
│ │ ├── providers/ # Provider tests
|
||||||
|
│ │ │ ├── claude-provider.test.ts
|
||||||
|
│ │ │ ├── codex-provider.test.ts
|
||||||
|
│ │ │ └── base-provider.test.ts
|
||||||
|
│ │ └── services/
|
||||||
|
│ └── utils/
|
||||||
|
│ └── helpers.ts # Test utilities
|
||||||
|
└── src/
|
||||||
|
|
||||||
|
libs/platform/
|
||||||
|
├── tests/
|
||||||
|
│ ├── paths.test.ts
|
||||||
|
│ ├── security.test.ts
|
||||||
|
│ ├── subprocess.test.ts
|
||||||
|
│ └── node-finder.test.ts
|
||||||
|
└── src/
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test Structure
|
||||||
|
|
||||||
|
**Suite Organization:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||||
|
import { FeatureLoader } from '@/services/feature-loader.js';
|
||||||
|
|
||||||
|
describe('feature-loader.ts', () => {
|
||||||
|
let featureLoader: FeatureLoader;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.clearAllMocks();
|
||||||
|
featureLoader = new FeatureLoader();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(async () => {
|
||||||
|
// Cleanup resources
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('methodName', () => {
|
||||||
|
it('should do specific thing', () => {
|
||||||
|
expect(result).toBe(expected);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Patterns:**
|
||||||
|
|
||||||
|
- Setup pattern: `beforeEach()` initializes test instance, clears mocks
|
||||||
|
- Teardown pattern: `afterEach()` cleans up temp directories, removes created files
|
||||||
|
- Assertion pattern: one logical assertion per test (or multiple closely related)
|
||||||
|
- Test isolation: each test runs with fresh setup
|
||||||
|
|
||||||
|
## Mocking
|
||||||
|
|
||||||
|
**Framework:**
|
||||||
|
|
||||||
|
- Vitest `vi` module: `vi.mock()`, `vi.mocked()`, `vi.clearAllMocks()`
|
||||||
|
- Mock patterns: module mocking, function spying, return value mocking
|
||||||
|
|
||||||
|
**Patterns:**
|
||||||
|
|
||||||
|
Module mocking:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
vi.mock('@anthropic-ai/claude-agent-sdk');
|
||||||
|
// In test:
|
||||||
|
vi.mocked(sdk.query).mockReturnValue(
|
||||||
|
(async function* () {
|
||||||
|
yield { type: 'text', text: 'Response 1' };
|
||||||
|
})()
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
Async generator mocking (for streaming APIs):
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const generator = provider.executeQuery({
|
||||||
|
prompt: 'Hello',
|
||||||
|
model: 'claude-opus-4-5-20251101',
|
||||||
|
cwd: '/test',
|
||||||
|
});
|
||||||
|
const results = await collectAsyncGenerator(generator);
|
||||||
|
```
|
||||||
|
|
||||||
|
Partial mocking with spies:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const provider = new TestProvider();
|
||||||
|
const spy = vi.spyOn(provider, 'getName');
|
||||||
|
spy.mockReturnValue('mocked-name');
|
||||||
|
```
|
||||||
|
|
||||||
|
**What to Mock:**
|
||||||
|
|
||||||
|
- External APIs (Claude SDK, GitHub SDK, cloud services)
|
||||||
|
- File system operations (use temp directories instead when possible)
|
||||||
|
- Network calls
|
||||||
|
- Process execution
|
||||||
|
- Time-dependent operations
|
||||||
|
|
||||||
|
**What NOT to Mock:**
|
||||||
|
|
||||||
|
- Core business logic (test the actual implementation)
|
||||||
|
- Type definitions
|
||||||
|
- Internal module dependencies (test integration with real services)
|
||||||
|
- Standard library functions (fs, path, etc. - use fixtures instead)
|
||||||
|
|
||||||
|
## Fixtures and Factories
|
||||||
|
|
||||||
|
**Test Data:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Test helper for collecting async generator results
|
||||||
|
async function collectAsyncGenerator<T>(generator: AsyncGenerator<T>): Promise<T[]> {
|
||||||
|
const results: T[] = [];
|
||||||
|
for await (const item of generator) {
|
||||||
|
results.push(item);
|
||||||
|
}
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Temporary directory fixture
|
||||||
|
beforeEach(async () => {
|
||||||
|
tempDir = await fs.mkdtemp(path.join(os.tmpdir(), 'test-'));
|
||||||
|
projectPath = path.join(tempDir, 'test-project');
|
||||||
|
await fs.mkdir(projectPath, { recursive: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(async () => {
|
||||||
|
try {
|
||||||
|
await fs.rm(tempDir, { recursive: true, force: true });
|
||||||
|
} catch (error) {
|
||||||
|
// Ignore cleanup errors
|
||||||
|
}
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Location:**
|
||||||
|
|
||||||
|
- Inline in test files for simple fixtures
|
||||||
|
- `tests/utils/helpers.ts` for shared test utilities
|
||||||
|
- Factory functions for complex test objects: `createTestProvider()`, `createMockFeature()`
|
||||||
|
|
||||||
|
## Coverage
|
||||||
|
|
||||||
|
**Requirements (Server):**
|
||||||
|
|
||||||
|
- Lines: 60%
|
||||||
|
- Functions: 75%
|
||||||
|
- Branches: 55%
|
||||||
|
- Statements: 60%
|
||||||
|
- Config: `apps/server/vitest.config.ts` with thresholds
|
||||||
|
|
||||||
|
**Excluded from Coverage:**
|
||||||
|
|
||||||
|
- Route handlers: tested via integration/E2E tests
|
||||||
|
- Type re-exports
|
||||||
|
- Middleware: tested via integration tests
|
||||||
|
- Prompt templates
|
||||||
|
- MCP integration: awaits MCP SDK integration tests
|
||||||
|
- Provider CLI integrations: awaits integration tests
|
||||||
|
|
||||||
|
**View Coverage:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run test:server:coverage # Generate coverage report
|
||||||
|
# Opens HTML report in: apps/server/coverage/index.html
|
||||||
|
```
|
||||||
|
|
||||||
|
**Coverage Tools:**
|
||||||
|
|
||||||
|
- Provider: v8
|
||||||
|
- Reporters: text, json, html, lcov
|
||||||
|
- File inclusion: `src/**/*.ts`
|
||||||
|
- File exclusion: `src/**/*.d.ts`, specific service files in thresholds
|
||||||
|
|
||||||
|
## Test Types
|
||||||
|
|
||||||
|
**Unit Tests:**
|
||||||
|
|
||||||
|
- Scope: Individual functions and methods
|
||||||
|
- Approach: Test inputs → outputs with mocked dependencies
|
||||||
|
- Location: `apps/server/tests/unit/`
|
||||||
|
- Examples:
|
||||||
|
- Provider executeQuery() with mocked SDK
|
||||||
|
- Path construction functions with assertions
|
||||||
|
- Error classification with different error types
|
||||||
|
- Config validation with various inputs
|
||||||
|
|
||||||
|
**Integration Tests:**
|
||||||
|
|
||||||
|
- Scope: Multiple modules working together
|
||||||
|
- Approach: Test actual service calls with real file system or temp directories
|
||||||
|
- Pattern: Setup data → call method → verify results
|
||||||
|
- Example: Feature loader reading/writing feature.json files
|
||||||
|
- Example: Auto-mode service coordinating with multiple services
|
||||||
|
|
||||||
|
**E2E Tests:**
|
||||||
|
|
||||||
|
- Framework: Playwright
|
||||||
|
- Scope: Full user workflows from UI
|
||||||
|
- Location: `apps/ui/tests/`
|
||||||
|
- Config: `apps/ui/playwright.config.ts`
|
||||||
|
- Setup:
|
||||||
|
- Backend server with mock agent enabled
|
||||||
|
- Frontend Vite dev server
|
||||||
|
- Sequential execution (workers: 1) to avoid auth conflicts
|
||||||
|
- Screenshots/traces on failure
|
||||||
|
- Auth: Global setup authentication in `tests/global-setup.ts`
|
||||||
|
- Fixtures: `tests/e2e-fixtures/` for test project data
|
||||||
|
|
||||||
|
## Common Patterns
|
||||||
|
|
||||||
|
**Async Testing:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
it('should execute async operation', async () => {
|
||||||
|
const result = await featureLoader.loadFeature(projectPath, featureId);
|
||||||
|
expect(result).toBeDefined();
|
||||||
|
expect(result.id).toBe(featureId);
|
||||||
|
});
|
||||||
|
|
||||||
|
// For streams/generators:
|
||||||
|
const generator = provider.executeQuery({ prompt, model, cwd });
|
||||||
|
const results = await collectAsyncGenerator(generator);
|
||||||
|
expect(results).toHaveLength(2);
|
||||||
|
```
|
||||||
|
|
||||||
|
**Error Testing:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
it('should throw error when feature not found', async () => {
|
||||||
|
await expect(featureLoader.getFeature(projectPath, 'nonexistent')).rejects.toThrow('not found');
|
||||||
|
});
|
||||||
|
|
||||||
|
// Testing error classification:
|
||||||
|
const errorInfo = classifyError(new Error('ENOENT'));
|
||||||
|
expect(errorInfo.category).toBe('FileSystem');
|
||||||
|
```
|
||||||
|
|
||||||
|
**Fixture Setup:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
it('should create feature with images', async () => {
|
||||||
|
// Setup: create temp feature directory
|
||||||
|
const featureDir = path.join(projectPath, '.automaker', 'features', featureId);
|
||||||
|
await fs.mkdir(featureDir, { recursive: true });
|
||||||
|
|
||||||
|
// Act: perform operation
|
||||||
|
const result = await featureLoader.updateFeature(projectPath, {
|
||||||
|
id: featureId,
|
||||||
|
imagePaths: ['/temp/image.png'],
|
||||||
|
});
|
||||||
|
|
||||||
|
// Assert: verify file operations
|
||||||
|
const migratedPath = path.join(featureDir, 'images', 'image.png');
|
||||||
|
expect(fs.existsSync(migratedPath)).toBe(true);
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Mock Reset Pattern:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// In vitest.config.ts:
|
||||||
|
mockReset: true, // Reset all mocks before each test
|
||||||
|
restoreMocks: true, // Restore original implementations
|
||||||
|
clearMocks: true, // Clear mock call history
|
||||||
|
|
||||||
|
// In test:
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.clearAllMocks();
|
||||||
|
delete process.env.ANTHROPIC_API_KEY;
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test Configuration
|
||||||
|
|
||||||
|
**Vitest Config Patterns:**
|
||||||
|
|
||||||
|
Server config (`apps/server/vitest.config.ts`):
|
||||||
|
|
||||||
|
- Environment: node
|
||||||
|
- Globals: true (describe/it without imports)
|
||||||
|
- Setup files: `./tests/setup.ts`
|
||||||
|
- Alias resolution: resolves `@automaker/*` to source files for mocking
|
||||||
|
|
||||||
|
Library config:
|
||||||
|
|
||||||
|
- Simpler setup: just environment and globals
|
||||||
|
- Coverage with high thresholds (90%+ lines)
|
||||||
|
|
||||||
|
**Global Setup:**
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// tests/setup.ts
|
||||||
|
import { vi, beforeEach } from 'vitest';
|
||||||
|
|
||||||
|
process.env.NODE_ENV = 'test';
|
||||||
|
process.env.DATA_DIR = '/tmp/test-data';
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.clearAllMocks();
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
## Testing Best Practices
|
||||||
|
|
||||||
|
**Isolation:**
|
||||||
|
|
||||||
|
- Each test is independent (no state sharing)
|
||||||
|
- Cleanup temp files in afterEach
|
||||||
|
- Reset mocks and environment variables in beforeEach
|
||||||
|
|
||||||
|
**Clarity:**
|
||||||
|
|
||||||
|
- Descriptive test names: "should do X when Y condition"
|
||||||
|
- One logical assertion per test
|
||||||
|
- Clear arrange-act-assert structure
|
||||||
|
|
||||||
|
**Speed:**
|
||||||
|
|
||||||
|
- Mock external services
|
||||||
|
- Use in-memory temp directories
|
||||||
|
- Avoid real network calls
|
||||||
|
- Sequential E2E tests to prevent conflicts
|
||||||
|
|
||||||
|
**Maintainability:**
|
||||||
|
|
||||||
|
- Use beforeEach/afterEach for common setup
|
||||||
|
- Extract test helpers to `tests/utils/`
|
||||||
|
- Keep test data simple and local
|
||||||
|
- Mock consistently across tests
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
_Testing analysis: 2026-01-27_
|
||||||
@@ -161,12 +161,16 @@ Use `resolveModelString()` from `@automaker/model-resolver` to convert model ali
|
|||||||
|
|
||||||
- `haiku` → `claude-haiku-4-5`
|
- `haiku` → `claude-haiku-4-5`
|
||||||
- `sonnet` → `claude-sonnet-4-20250514`
|
- `sonnet` → `claude-sonnet-4-20250514`
|
||||||
- `opus` → `claude-opus-4-5-20251101`
|
- `opus` → `claude-opus-4-6`
|
||||||
|
|
||||||
## Environment Variables
|
## Environment Variables
|
||||||
|
|
||||||
- `ANTHROPIC_API_KEY` - Anthropic API key (or use Claude Code CLI auth)
|
- `ANTHROPIC_API_KEY` - Anthropic API key (or use Claude Code CLI auth)
|
||||||
|
- `HOST` - Host to bind server to (default: 0.0.0.0)
|
||||||
|
- `HOSTNAME` - Hostname for user-facing URLs (default: localhost)
|
||||||
- `PORT` - Server port (default: 3008)
|
- `PORT` - Server port (default: 3008)
|
||||||
- `DATA_DIR` - Data storage directory (default: ./data)
|
- `DATA_DIR` - Data storage directory (default: ./data)
|
||||||
- `ALLOWED_ROOT_DIRECTORY` - Restrict file operations to specific directory
|
- `ALLOWED_ROOT_DIRECTORY` - Restrict file operations to specific directory
|
||||||
- `AUTOMAKER_MOCK_AGENT=true` - Enable mock agent mode for CI testing
|
- `AUTOMAKER_MOCK_AGENT=true` - Enable mock agent mode for CI testing
|
||||||
|
- `AUTOMAKER_AUTO_LOGIN=true` - Skip login prompt in development (disabled when NODE_ENV=production)
|
||||||
|
- `VITE_HOSTNAME` - Hostname for frontend API URLs (default: localhost)
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ For complete details on contribution terms and rights assignment, please review
|
|||||||
- [Development Setup](#development-setup)
|
- [Development Setup](#development-setup)
|
||||||
- [Project Structure](#project-structure)
|
- [Project Structure](#project-structure)
|
||||||
- [Pull Request Process](#pull-request-process)
|
- [Pull Request Process](#pull-request-process)
|
||||||
|
- [Branching Strategy (RC Branches)](#branching-strategy-rc-branches)
|
||||||
- [Branch Naming Convention](#branch-naming-convention)
|
- [Branch Naming Convention](#branch-naming-convention)
|
||||||
- [Commit Message Format](#commit-message-format)
|
- [Commit Message Format](#commit-message-format)
|
||||||
- [Submitting a Pull Request](#submitting-a-pull-request)
|
- [Submitting a Pull Request](#submitting-a-pull-request)
|
||||||
@@ -186,6 +187,59 @@ automaker/
|
|||||||
|
|
||||||
This section covers everything you need to know about contributing changes through pull requests, from creating your branch to getting your code merged.
|
This section covers everything you need to know about contributing changes through pull requests, from creating your branch to getting your code merged.
|
||||||
|
|
||||||
|
### Branching Strategy (RC Branches)
|
||||||
|
|
||||||
|
Automaker uses **Release Candidate (RC) branches** for all development work. Understanding this workflow is essential before contributing.
|
||||||
|
|
||||||
|
**How it works:**
|
||||||
|
|
||||||
|
1. **All development happens on RC branches** - We maintain version-specific RC branches (e.g., `v0.10.0rc`, `v0.11.0rc`) where all active development occurs
|
||||||
|
2. **RC branches are eventually merged to main** - Once an RC branch is stable and ready for release, it gets merged into `main`
|
||||||
|
3. **Main branch is for releases only** - The `main` branch contains only released, stable code
|
||||||
|
|
||||||
|
**Before creating a PR:**
|
||||||
|
|
||||||
|
1. **Check for the latest RC branch** - Before starting work, check the repository for the current RC branch:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git fetch upstream
|
||||||
|
git branch -r | grep rc
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Base your work on the RC branch** - Create your feature branch from the latest RC branch, not from `main`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Find the latest RC branch (e.g., v0.11.0rc)
|
||||||
|
git checkout upstream/v0.11.0rc
|
||||||
|
git checkout -b feature/your-feature-name
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Target the RC branch in your PR** - When opening your pull request, set the base branch to the current RC branch, not `main`
|
||||||
|
|
||||||
|
**Example workflow:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Fetch latest changes
|
||||||
|
git fetch upstream
|
||||||
|
|
||||||
|
# 2. Check for RC branches
|
||||||
|
git branch -r | grep rc
|
||||||
|
# Output: upstream/v0.11.0rc
|
||||||
|
|
||||||
|
# 3. Create your branch from the RC
|
||||||
|
git checkout -b feature/add-dark-mode upstream/v0.11.0rc
|
||||||
|
|
||||||
|
# 4. Make your changes and commit
|
||||||
|
git commit -m "feat: Add dark mode support"
|
||||||
|
|
||||||
|
# 5. Push to your fork
|
||||||
|
git push origin feature/add-dark-mode
|
||||||
|
|
||||||
|
# 6. Open PR targeting the RC branch (v0.11.0rc), NOT main
|
||||||
|
```
|
||||||
|
|
||||||
|
**Important:** PRs opened directly against `main` will be asked to retarget to the current RC branch.
|
||||||
|
|
||||||
### Branch Naming Convention
|
### Branch Naming Convention
|
||||||
|
|
||||||
We use a consistent branch naming pattern to keep our repository organized:
|
We use a consistent branch naming pattern to keep our repository organized:
|
||||||
@@ -275,14 +329,14 @@ Follow these steps to submit your contribution:
|
|||||||
|
|
||||||
#### 1. Prepare Your Changes
|
#### 1. Prepare Your Changes
|
||||||
|
|
||||||
Ensure you've synced with the latest upstream changes:
|
Ensure you've synced with the latest upstream changes from the RC branch:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Fetch latest changes from upstream
|
# Fetch latest changes from upstream
|
||||||
git fetch upstream
|
git fetch upstream
|
||||||
|
|
||||||
# Rebase your branch on main (if needed)
|
# Rebase your branch on the current RC branch (if needed)
|
||||||
git rebase upstream/main
|
git rebase upstream/v0.11.0rc # Use the current RC branch name
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 2. Run Pre-submission Checks
|
#### 2. Run Pre-submission Checks
|
||||||
@@ -314,18 +368,19 @@ git push origin feature/your-feature-name
|
|||||||
|
|
||||||
1. Go to your fork on GitHub
|
1. Go to your fork on GitHub
|
||||||
2. Click "Compare & pull request" for your branch
|
2. Click "Compare & pull request" for your branch
|
||||||
3. Ensure the base repository is `AutoMaker-Org/automaker` and base branch is `main`
|
3. **Important:** Set the base repository to `AutoMaker-Org/automaker` and the base branch to the **current RC branch** (e.g., `v0.11.0rc`), not `main`
|
||||||
4. Fill out the PR template completely
|
4. Fill out the PR template completely
|
||||||
|
|
||||||
#### PR Requirements Checklist
|
#### PR Requirements Checklist
|
||||||
|
|
||||||
Your PR should include:
|
Your PR should include:
|
||||||
|
|
||||||
|
- [ ] **Targets the current RC branch** (not `main`) - see [Branching Strategy](#branching-strategy-rc-branches)
|
||||||
- [ ] **Clear title** describing the change (use conventional commit format)
|
- [ ] **Clear title** describing the change (use conventional commit format)
|
||||||
- [ ] **Description** explaining what changed and why
|
- [ ] **Description** explaining what changed and why
|
||||||
- [ ] **Link to related issue** (if applicable): `Closes #123` or `Fixes #456`
|
- [ ] **Link to related issue** (if applicable): `Closes #123` or `Fixes #456`
|
||||||
- [ ] **All CI checks passing** (format, lint, build, tests)
|
- [ ] **All CI checks passing** (format, lint, build, tests)
|
||||||
- [ ] **No merge conflicts** with main branch
|
- [ ] **No merge conflicts** with the RC branch
|
||||||
- [ ] **Tests included** for new functionality
|
- [ ] **Tests included** for new functionality
|
||||||
- [ ] **Documentation updated** if adding/changing public APIs
|
- [ ] **Documentation updated** if adding/changing public APIs
|
||||||
|
|
||||||
|
|||||||
37
Dockerfile
37
Dockerfile
@@ -25,9 +25,11 @@ COPY libs/types/package*.json ./libs/types/
|
|||||||
COPY libs/utils/package*.json ./libs/utils/
|
COPY libs/utils/package*.json ./libs/utils/
|
||||||
COPY libs/prompts/package*.json ./libs/prompts/
|
COPY libs/prompts/package*.json ./libs/prompts/
|
||||||
COPY libs/platform/package*.json ./libs/platform/
|
COPY libs/platform/package*.json ./libs/platform/
|
||||||
|
COPY libs/spec-parser/package*.json ./libs/spec-parser/
|
||||||
COPY libs/model-resolver/package*.json ./libs/model-resolver/
|
COPY libs/model-resolver/package*.json ./libs/model-resolver/
|
||||||
COPY libs/dependency-resolver/package*.json ./libs/dependency-resolver/
|
COPY libs/dependency-resolver/package*.json ./libs/dependency-resolver/
|
||||||
COPY libs/git-utils/package*.json ./libs/git-utils/
|
COPY libs/git-utils/package*.json ./libs/git-utils/
|
||||||
|
COPY libs/spec-parser/package*.json ./libs/spec-parser/
|
||||||
|
|
||||||
# Copy scripts (needed by npm workspace)
|
# Copy scripts (needed by npm workspace)
|
||||||
COPY scripts ./scripts
|
COPY scripts ./scripts
|
||||||
@@ -59,9 +61,22 @@ FROM node:22-slim AS server
|
|||||||
ARG GIT_COMMIT_SHA=unknown
|
ARG GIT_COMMIT_SHA=unknown
|
||||||
LABEL automaker.git.commit.sha="${GIT_COMMIT_SHA}"
|
LABEL automaker.git.commit.sha="${GIT_COMMIT_SHA}"
|
||||||
|
|
||||||
|
# Build arguments for user ID matching (allows matching host user for mounted volumes)
|
||||||
|
# Override at build time: docker build --build-arg UID=$(id -u) --build-arg GID=$(id -g) ...
|
||||||
|
ARG UID=1001
|
||||||
|
ARG GID=1001
|
||||||
|
|
||||||
# Install git, curl, bash (for terminal), gosu (for user switching), and GitHub CLI (pinned version, multi-arch)
|
# Install git, curl, bash (for terminal), gosu (for user switching), and GitHub CLI (pinned version, multi-arch)
|
||||||
|
# Also install Playwright/Chromium system dependencies (aligns with playwright install-deps on Debian/Ubuntu)
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
git curl bash gosu ca-certificates openssh-client \
|
git curl bash gosu ca-certificates openssh-client \
|
||||||
|
# Playwright/Chromium dependencies
|
||||||
|
libglib2.0-0 libnss3 libnspr4 libdbus-1-3 libatk1.0-0 libatk-bridge2.0-0 \
|
||||||
|
libcups2 libdrm2 libxkbcommon0 libatspi2.0-0 libxcomposite1 libxdamage1 \
|
||||||
|
libxfixes3 libxrandr2 libgbm1 libasound2 libpango-1.0-0 libcairo2 \
|
||||||
|
libx11-6 libx11-xcb1 libxcb1 libxext6 libxrender1 libxss1 libxtst6 \
|
||||||
|
libxshmfence1 libgtk-3-0 libexpat1 libfontconfig1 fonts-liberation \
|
||||||
|
xdg-utils libpangocairo-1.0-0 libpangoft2-1.0-0 libu2f-udev libvulkan1 \
|
||||||
&& GH_VERSION="2.63.2" \
|
&& GH_VERSION="2.63.2" \
|
||||||
&& ARCH=$(uname -m) \
|
&& ARCH=$(uname -m) \
|
||||||
&& case "$ARCH" in \
|
&& case "$ARCH" in \
|
||||||
@@ -79,8 +94,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
|||||||
RUN npm install -g @anthropic-ai/claude-code
|
RUN npm install -g @anthropic-ai/claude-code
|
||||||
|
|
||||||
# Create non-root user with home directory BEFORE installing Cursor CLI
|
# Create non-root user with home directory BEFORE installing Cursor CLI
|
||||||
RUN groupadd -g 1001 automaker && \
|
# Uses UID/GID build args to match host user for mounted volume permissions
|
||||||
useradd -u 1001 -g automaker -m -d /home/automaker -s /bin/bash automaker && \
|
# Use -o flag to allow non-unique IDs (GID 1000 may already exist as 'node' group)
|
||||||
|
RUN groupadd -o -g ${GID} automaker && \
|
||||||
|
useradd -o -u ${UID} -g automaker -m -d /home/automaker -s /bin/bash automaker && \
|
||||||
mkdir -p /home/automaker/.local/bin && \
|
mkdir -p /home/automaker/.local/bin && \
|
||||||
mkdir -p /home/automaker/.cursor && \
|
mkdir -p /home/automaker/.cursor && \
|
||||||
chown -R automaker:automaker /home/automaker && \
|
chown -R automaker:automaker /home/automaker && \
|
||||||
@@ -95,6 +112,13 @@ RUN curl https://cursor.com/install -fsS | bash && \
|
|||||||
ls -la /home/automaker/.local/bin/ && \
|
ls -la /home/automaker/.local/bin/ && \
|
||||||
echo "=== PATH is: $PATH ===" && \
|
echo "=== PATH is: $PATH ===" && \
|
||||||
(which cursor-agent && cursor-agent --version) || echo "cursor-agent installed (may need auth setup)"
|
(which cursor-agent && cursor-agent --version) || echo "cursor-agent installed (may need auth setup)"
|
||||||
|
|
||||||
|
# Install OpenCode CLI (for multi-provider AI model access)
|
||||||
|
RUN curl -fsSL https://opencode.ai/install | bash && \
|
||||||
|
echo "=== Checking OpenCode CLI installation ===" && \
|
||||||
|
ls -la /home/automaker/.local/bin/ && \
|
||||||
|
(which opencode && opencode --version) || echo "opencode installed (may need auth setup)"
|
||||||
|
|
||||||
USER root
|
USER root
|
||||||
|
|
||||||
# Add PATH to profile so it's available in all interactive shells (for login shells)
|
# Add PATH to profile so it's available in all interactive shells (for login shells)
|
||||||
@@ -124,6 +148,15 @@ COPY --from=server-builder /app/apps/server/package*.json ./apps/server/
|
|||||||
# Copy node_modules (includes symlinks to libs)
|
# Copy node_modules (includes symlinks to libs)
|
||||||
COPY --from=server-builder /app/node_modules ./node_modules
|
COPY --from=server-builder /app/node_modules ./node_modules
|
||||||
|
|
||||||
|
# Install Playwright Chromium browser for AI agent verification tests
|
||||||
|
# This adds ~300MB to the image but enables automated testing mode out of the box
|
||||||
|
# Using the locally installed playwright ensures we use the pinned version from package-lock.json
|
||||||
|
USER automaker
|
||||||
|
RUN ./node_modules/.bin/playwright install chromium && \
|
||||||
|
echo "=== Playwright Chromium installed ===" && \
|
||||||
|
ls -la /home/automaker/.cache/ms-playwright/
|
||||||
|
USER root
|
||||||
|
|
||||||
# Create data and projects directories
|
# Create data and projects directories
|
||||||
RUN mkdir -p /data /projects && chown automaker:automaker /data /projects
|
RUN mkdir -p /data /projects && chown automaker:automaker /data /projects
|
||||||
|
|
||||||
|
|||||||
@@ -8,9 +8,17 @@
|
|||||||
FROM node:22-slim
|
FROM node:22-slim
|
||||||
|
|
||||||
# Install build dependencies for native modules (node-pty) and runtime tools
|
# Install build dependencies for native modules (node-pty) and runtime tools
|
||||||
|
# Also install Playwright/Chromium system dependencies (aligns with playwright install-deps on Debian/Ubuntu)
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
python3 make g++ \
|
python3 make g++ \
|
||||||
git curl bash gosu ca-certificates openssh-client \
|
git curl bash gosu ca-certificates openssh-client \
|
||||||
|
# Playwright/Chromium dependencies
|
||||||
|
libglib2.0-0 libnss3 libnspr4 libdbus-1-3 libatk1.0-0 libatk-bridge2.0-0 \
|
||||||
|
libcups2 libdrm2 libxkbcommon0 libatspi2.0-0 libxcomposite1 libxdamage1 \
|
||||||
|
libxfixes3 libxrandr2 libgbm1 libasound2 libpango-1.0-0 libcairo2 \
|
||||||
|
libx11-6 libx11-xcb1 libxcb1 libxext6 libxrender1 libxss1 libxtst6 \
|
||||||
|
libxshmfence1 libgtk-3-0 libexpat1 libfontconfig1 fonts-liberation \
|
||||||
|
xdg-utils libpangocairo-1.0-0 libpangoft2-1.0-0 libu2f-udev libvulkan1 \
|
||||||
&& GH_VERSION="2.63.2" \
|
&& GH_VERSION="2.63.2" \
|
||||||
&& ARCH=$(uname -m) \
|
&& ARCH=$(uname -m) \
|
||||||
&& case "$ARCH" in \
|
&& case "$ARCH" in \
|
||||||
@@ -27,9 +35,15 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
|||||||
# Install Claude CLI globally
|
# Install Claude CLI globally
|
||||||
RUN npm install -g @anthropic-ai/claude-code
|
RUN npm install -g @anthropic-ai/claude-code
|
||||||
|
|
||||||
# Create non-root user
|
# Build arguments for user ID matching (allows matching host user for mounted volumes)
|
||||||
RUN groupadd -g 1001 automaker && \
|
# Override at build time: docker-compose build --build-arg UID=$(id -u) --build-arg GID=$(id -g)
|
||||||
useradd -u 1001 -g automaker -m -d /home/automaker -s /bin/bash automaker && \
|
ARG UID=1001
|
||||||
|
ARG GID=1001
|
||||||
|
|
||||||
|
# Create non-root user with configurable UID/GID
|
||||||
|
# Use -o flag to allow non-unique IDs (GID 1000 may already exist as 'node' group)
|
||||||
|
RUN groupadd -o -g ${GID} automaker && \
|
||||||
|
useradd -o -u ${UID} -g automaker -m -d /home/automaker -s /bin/bash automaker && \
|
||||||
mkdir -p /home/automaker/.local/bin && \
|
mkdir -p /home/automaker/.local/bin && \
|
||||||
mkdir -p /home/automaker/.cursor && \
|
mkdir -p /home/automaker/.cursor && \
|
||||||
chown -R automaker:automaker /home/automaker && \
|
chown -R automaker:automaker /home/automaker && \
|
||||||
|
|||||||
158
LICENSE
158
LICENSE
@@ -1,141 +1,27 @@
|
|||||||
AUTOMAKER LICENSE AGREEMENT
|
## Project Status
|
||||||
|
|
||||||
This License Agreement ("Agreement") is entered into between you ("Licensee") and the copyright holders of Automaker ("Licensor"). By using, copying, modifying, downloading, cloning, or distributing the Software (as defined below), you agree to be bound by the terms of this Agreement.
|
**This project is no longer actively maintained.** The codebase is provided as-is for those who wish to use, study, or fork it. No bug fixes, security updates, or new features are being developed. Community contributions may still be accepted, but there is no guarantee of review or merge.
|
||||||
|
|
||||||
1. DEFINITIONS
|
|
||||||
|
|
||||||
"Software" means the Automaker software, including all source code, object code, documentation, and related materials.
|
|
||||||
|
|
||||||
"Generated Files" means files created by the Software during normal operation to store internal state, configuration, or working data, including but not limited to app_spec.txt, feature.json, and similar files generated by the Software. Generated Files are not considered part of the Software for the purposes of this license and are not subject to the restrictions herein.
|
|
||||||
|
|
||||||
"Derivative Work" means any work that is based on, derived from, or incorporates the Software or any substantial portion of it, including but not limited to modifications, forks, adaptations, translations, or any altered version of the Software.
|
|
||||||
|
|
||||||
"Monetization" means any activity that generates revenue, income, or commercial benefit from the Software itself or any Derivative Work, including but not limited to:
|
|
||||||
|
|
||||||
- Reselling, redistributing, or sublicensing the Software, any Derivative Work, or any substantial portion thereof
|
|
||||||
- Including the Software, any Derivative Work, or substantial portions thereof in a product or service that you sell or distribute
|
|
||||||
- Offering the Software, any Derivative Work, or substantial portions thereof as a standalone product or service for sale
|
|
||||||
- Hosting the Software or any Derivative Work as a service (whether free or paid) for use by others, including cloud hosting, Software-as-a-Service (SaaS), or any other form of hosted access for third parties
|
|
||||||
- Extracting, reselling, redistributing, or sublicensing any prompts, context, or other instructional content bundled within the Software
|
|
||||||
- Creating, distributing, or selling modified versions, forks, or Derivative Works of the Software
|
|
||||||
|
|
||||||
Monetization does NOT include:
|
|
||||||
|
|
||||||
- Using the Software internally within your organization, regardless of whether your organization is for-profit
|
|
||||||
- Using the Software to build products or services that generate revenue, as long as you are not reselling or redistributing the Software itself
|
|
||||||
- Using the Software to provide services for which fees are charged, as long as the Software itself is not being resold or redistributed
|
|
||||||
- Hosting the Software anywhere for personal use by a single developer, as long as the Software is not made accessible to others
|
|
||||||
|
|
||||||
"Core Contributors" means the following individuals who are granted perpetual, royalty-free licenses:
|
|
||||||
|
|
||||||
- Cody Seibert (webdevcody)
|
|
||||||
- SuperComboGamer (SCG)
|
|
||||||
- Kacper Lachowicz (Shironex, Shirone)
|
|
||||||
- Ben Scott (trueheads)
|
|
||||||
|
|
||||||
2. GRANT OF LICENSE
|
|
||||||
|
|
||||||
Subject to the terms and conditions of this Agreement, Licensor hereby grants to Licensee a non-exclusive, non-transferable license to use, copy, modify, and distribute the Software, provided that:
|
|
||||||
|
|
||||||
a) Licensee may freely clone, install, and use the Software locally or within an organization for the purpose of building, developing, and maintaining other products, software, or services. There are no restrictions on the products you build _using_ the Software.
|
|
||||||
|
|
||||||
b) Licensee may run the Software on personal or organizational infrastructure for internal use.
|
|
||||||
|
|
||||||
c) Core Contributors are each individually granted a perpetual, worldwide, royalty-free, non-exclusive license to use, copy, modify, distribute, and sublicense the Software for any purpose, including Monetization, without payment of any fees or royalties. Each Core Contributor may exercise these rights independently and does not require permission, consent, or approval from any other Core Contributor to Monetize the Software in any way they see fit.
|
|
||||||
|
|
||||||
d) Commercial licenses for the Software may be discussed and issued to external parties or companies seeking to use the Software for financial gain or Monetization purposes. Core Contributors already have full rights under section 2(c) and do not require commercial licenses. Any commercial license issued to external parties shall require a unanimous vote by all Core Contributors and shall be granted in writing and signed by all Core Contributors.
|
|
||||||
|
|
||||||
e) The list of individuals defined as "Core Contributors" in Section 1 shall be amended to reflect any revocation or reinstatement of status made under this section.
|
|
||||||
|
|
||||||
3. RESTRICTIONS
|
|
||||||
|
|
||||||
Licensee may NOT:
|
|
||||||
|
|
||||||
- Engage in any Monetization of the Software or any Derivative Work without explicit written permission from all Core Contributors
|
|
||||||
- Resell, redistribute, or sublicense the Software, any Derivative Work, or any substantial portion thereof
|
|
||||||
- Create, distribute, or sell modified versions, forks, or Derivative Works of the Software for any commercial purpose
|
|
||||||
- Include the Software, any Derivative Work, or substantial portions thereof in a product or service that you sell or distribute
|
|
||||||
- Offer the Software, any Derivative Work, or substantial portions thereof as a standalone product or service for sale
|
|
||||||
- Extract, resell, redistribute, or sublicense any prompts, context, or other instructional content bundled within the Software
|
|
||||||
- Host the Software or any Derivative Work as a service (whether free or paid) for use by others (except Core Contributors)
|
|
||||||
- Remove or alter any copyright notices or license terms
|
|
||||||
- Use the Software in any manner that violates applicable laws or regulations
|
|
||||||
|
|
||||||
Licensee MAY:
|
|
||||||
|
|
||||||
- Use the Software internally within their organization (commercial or non-profit)
|
|
||||||
- Use the Software to build other commercial products (products that do NOT contain the Software or Derivative Works)
|
|
||||||
- Modify the Software for internal use within their organization (commercial or non-profit)
|
|
||||||
|
|
||||||
4. CORE CONTRIBUTOR STATUS MANAGEMENT
|
|
||||||
|
|
||||||
a) Core Contributor status may be revoked indefinitely by the remaining Core Contributors if:
|
|
||||||
|
|
||||||
- A Core Contributor cannot be reached for a period of one (1) month through reasonable means of communication (including but not limited to email, Discord, GitHub, or other project communication channels)
|
|
||||||
- AND the Core Contributor has not contributed to the project during that one-month period. For purposes of this section, "contributed" means at least one of the following activities:
|
|
||||||
- Discussing the Software through project communication channels
|
|
||||||
- Committing code changes to the project repository
|
|
||||||
- Submitting bug fixes or patches
|
|
||||||
- Participating in project-related discussions or decision-making
|
|
||||||
|
|
||||||
b) Revocation of Core Contributor status requires a unanimous vote by all other Core Contributors (excluding the Core Contributor whose status is being considered for revocation).
|
|
||||||
|
|
||||||
c) Upon revocation of Core Contributor status, the individual shall no longer be considered a Core Contributor and shall lose the rights granted under section 2(c) of this Agreement. However, any Contributions made prior to revocation shall remain subject to the terms of section 5 (CONTRIBUTIONS AND RIGHTS ASSIGNMENT).
|
|
||||||
|
|
||||||
d) A revoked Core Contributor may be reinstated to Core Contributor status with a unanimous vote by all current Core Contributors. Upon reinstatement, the individual shall regain all rights granted under section 2(c) of this Agreement.
|
|
||||||
|
|
||||||
5. CONTRIBUTIONS AND RIGHTS ASSIGNMENT
|
|
||||||
|
|
||||||
By submitting, pushing, or contributing any code, documentation, pull requests, issues, or other materials ("Contributions") to the Automaker project, you agree to the following terms without reservation:
|
|
||||||
|
|
||||||
a) **Full Ownership Transfer & Rights Grant:** You hereby assign to the Core Contributors all right, title, and interest in and to your Contributions, including all copyrights, patents, and other intellectual property rights. If such assignment is not effective under applicable law, you grant the Core Contributors an unrestricted, perpetual, worldwide, non-exclusive, royalty-free, fully paid-up, irrevocable, sublicensable, and transferable license to use, reproduce, modify, adapt, publish, translate, create derivative works from, distribute, perform, display, and otherwise exploit your Contributions in any manner they see fit, including for any commercial purpose or Monetization.
|
|
||||||
|
|
||||||
b) **No Take-Backs:** You understand and agree that this grant of rights is irrevocable ("no take-backs"). You cannot revoke, rescind, or terminate this grant of rights once your Contribution has been submitted.
|
|
||||||
|
|
||||||
c) **Waiver of Moral Rights:** You waive any "moral rights" or other rights with respect to attribution of authorship or integrity of materials regarding your Contributions that you may have under any applicable law.
|
|
||||||
|
|
||||||
d) **Right to Contribute:** You represent and warrant that you are the original author of the Contributions, or that you have sufficient rights to grant the rights conveyed by this section, and that your Contributions do not infringe upon the rights of any third party.
|
|
||||||
|
|
||||||
6. TERMINATION
|
|
||||||
|
|
||||||
This license will terminate automatically if Licensee breaches any term of this Agreement. Upon termination, Licensee must immediately cease all use of the Software and destroy all copies in their possession.
|
|
||||||
|
|
||||||
7. HIGH RISK DISCLAIMER AND LIMITATION OF LIABILITY
|
|
||||||
|
|
||||||
a) **AI RISKS:** THE SOFTWARE UTILIZES ARTIFICIAL INTELLIGENCE TO GENERATE CODE, EXECUTE COMMANDS, AND INTERACT WITH YOUR FILE SYSTEM. YOU ACKNOWLEDGE THAT AI SYSTEMS CAN BE UNPREDICTABLE, MAY GENERATE INCORRECT, INSECURE, OR DESTRUCTIVE CODE, AND MAY TAKE ACTIONS THAT COULD DAMAGE YOUR SYSTEM, FILES, OR HARDWARE.
|
|
||||||
|
|
||||||
b) **USE AT YOUR OWN RISK:** YOU AGREE THAT YOUR USE OF THE SOFTWARE IS SOLELY AT YOUR OWN RISK. THE CORE CONTRIBUTORS AND LICENSOR DO NOT GUARANTEE THAT THE SOFTWARE OR ANY CODE GENERATED BY IT WILL BE SAFE, BUG-FREE, OR FUNCTIONAL.
|
|
||||||
|
|
||||||
c) **NO WARRANTY:** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT.
|
|
||||||
|
|
||||||
d) **LIMITATION OF LIABILITY:** IN NO EVENT SHALL THE CORE CONTRIBUTORS, LICENSORS, OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE, INCLUDING BUT NOT LIMITED TO:
|
|
||||||
|
|
||||||
- DAMAGE TO HARDWARE OR COMPUTER SYSTEMS
|
|
||||||
- DATA LOSS OR CORRUPTION
|
|
||||||
- GENERATION OF BAD, VULNERABLE, OR MALICIOUS CODE
|
|
||||||
- FINANCIAL LOSSES
|
|
||||||
- BUSINESS INTERRUPTION
|
|
||||||
|
|
||||||
8. LICENSE AMENDMENTS
|
|
||||||
|
|
||||||
Any amendment, modification, or update to this License Agreement must be agreed upon unanimously by all Core Contributors. No changes to this Agreement shall be effective unless all Core Contributors have provided their written consent or approval through a unanimous vote.
|
|
||||||
|
|
||||||
9. CONTACT
|
|
||||||
|
|
||||||
For inquiries regarding this license or permissions for Monetization, please contact the Core Contributors through the official project channels:
|
|
||||||
|
|
||||||
- Agentic Jumpstart Discord: https://discord.gg/JUDWZDN3VT
|
|
||||||
- Website: https://automaker.app
|
|
||||||
- Email: automakerapp@gmail.com
|
|
||||||
|
|
||||||
Any permission for Monetization requires the unanimous written consent of all Core Contributors.
|
|
||||||
|
|
||||||
10. GOVERNING LAW
|
|
||||||
|
|
||||||
This Agreement shall be governed by and construed in accordance with the laws of the State of Tennessee, USA, without regard to conflict of law principles.
|
|
||||||
|
|
||||||
By using the Software, you acknowledge that you have read this Agreement, understand it, and agree to be bound by its terms and conditions.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2025 Automaker Core Contributors
|
Copyright (c) 2025 Automaker Core Contributors
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|||||||
2
OPENCODE_CONFIG_CONTENT
Normal file
2
OPENCODE_CONFIG_CONTENT
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://opencode.ai/config.json",}
|
||||||
247
README.md
247
README.md
@@ -28,6 +28,7 @@
|
|||||||
- [Quick Start](#quick-start)
|
- [Quick Start](#quick-start)
|
||||||
- [How to Run](#how-to-run)
|
- [How to Run](#how-to-run)
|
||||||
- [Development Mode](#development-mode)
|
- [Development Mode](#development-mode)
|
||||||
|
- [Interactive TUI Launcher](#interactive-tui-launcher-recommended-for-new-users)
|
||||||
- [Building for Production](#building-for-production)
|
- [Building for Production](#building-for-production)
|
||||||
- [Testing](#testing)
|
- [Testing](#testing)
|
||||||
- [Linting](#linting)
|
- [Linting](#linting)
|
||||||
@@ -101,11 +102,9 @@ In the Discord, you can:
|
|||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
|
|
||||||
- **Node.js 18+** (tested with Node.js 22)
|
- **Node.js 22+** (required: >=22.0.0 <23.0.0)
|
||||||
- **npm** (comes with Node.js)
|
- **npm** (comes with Node.js)
|
||||||
- **Authentication** (choose one):
|
- **[Claude Code CLI](https://code.claude.com/docs/en/overview)** - Install and authenticate with your Anthropic subscription. Automaker integrates with your authenticated Claude Code CLI to access Claude models.
|
||||||
- **[Claude Code CLI](https://code.claude.com/docs/en/overview)** (recommended) - Install and authenticate, credentials used automatically
|
|
||||||
- **Anthropic API Key** - Direct API key for Claude Agent SDK ([get one here](https://console.anthropic.com/))
|
|
||||||
|
|
||||||
### Quick Start
|
### Quick Start
|
||||||
|
|
||||||
@@ -117,30 +116,14 @@ cd automaker
|
|||||||
# 2. Install dependencies
|
# 2. Install dependencies
|
||||||
npm install
|
npm install
|
||||||
|
|
||||||
# 3. Build shared packages (can be skipped - npm run dev does it automatically)
|
# 3. Start Automaker
|
||||||
npm run build:packages
|
|
||||||
|
|
||||||
# 4. Start Automaker
|
|
||||||
npm run dev
|
npm run dev
|
||||||
# Choose between:
|
# Choose between:
|
||||||
# 1. Web Application (browser at localhost:3007)
|
# 1. Web Application (browser at localhost:3007)
|
||||||
# 2. Desktop Application (Electron - recommended)
|
# 2. Desktop Application (Electron - recommended)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Authentication Setup:** On first run, Automaker will automatically show a setup wizard where you can configure authentication. You can choose to:
|
**Authentication:** Automaker integrates with your authenticated Claude Code CLI. Make sure you have [installed and authenticated](https://code.claude.com/docs/en/quickstart) the Claude Code CLI before running Automaker. Your CLI credentials will be detected automatically.
|
||||||
|
|
||||||
- Use **Claude Code CLI** (recommended) - Automaker will detect your CLI credentials automatically
|
|
||||||
- Enter an **API key** directly in the wizard
|
|
||||||
|
|
||||||
If you prefer to set up authentication before running (e.g., for headless deployments or CI/CD), you can set it manually:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Option A: Environment variable
|
|
||||||
export ANTHROPIC_API_KEY="sk-ant-..."
|
|
||||||
|
|
||||||
# Option B: Create .env file in project root
|
|
||||||
echo "ANTHROPIC_API_KEY=sk-ant-..." > .env
|
|
||||||
```
|
|
||||||
|
|
||||||
**For Development:** `npm run dev` starts the development server with Vite live reload and hot module replacement for fast refresh and instant updates as you make changes.
|
**For Development:** `npm run dev` starts the development server with Vite live reload and hot module replacement for fast refresh and instant updates as you make changes.
|
||||||
|
|
||||||
@@ -179,6 +162,40 @@ npm run dev:electron:wsl:gpu
|
|||||||
npm run dev:web
|
npm run dev:web
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Interactive TUI Launcher (Recommended for New Users)
|
||||||
|
|
||||||
|
For a user-friendly interactive menu, use the built-in TUI launcher script:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Show interactive menu with all launch options
|
||||||
|
./start-automaker.sh
|
||||||
|
|
||||||
|
# Or launch directly without menu
|
||||||
|
./start-automaker.sh web # Web browser
|
||||||
|
./start-automaker.sh electron # Desktop app
|
||||||
|
./start-automaker.sh electron-debug # Desktop + DevTools
|
||||||
|
|
||||||
|
# Additional options
|
||||||
|
./start-automaker.sh --help # Show all available options
|
||||||
|
./start-automaker.sh --version # Show version information
|
||||||
|
./start-automaker.sh --check-deps # Verify project dependencies
|
||||||
|
./start-automaker.sh --no-colors # Disable colored output
|
||||||
|
./start-automaker.sh --no-history # Don't remember last choice
|
||||||
|
```
|
||||||
|
|
||||||
|
**Features:**
|
||||||
|
|
||||||
|
- 🎨 Beautiful terminal UI with gradient colors and ASCII art
|
||||||
|
- ⌨️ Interactive menu (press 1-3 to select, Q to exit)
|
||||||
|
- 💾 Remembers your last choice
|
||||||
|
- ✅ Pre-flight checks (validates Node.js, npm, dependencies)
|
||||||
|
- 📏 Responsive layout (adapts to terminal size)
|
||||||
|
- ⏱️ 30-second timeout for hands-free selection
|
||||||
|
- 🌐 Cross-shell compatible (bash/zsh)
|
||||||
|
|
||||||
|
**History File:**
|
||||||
|
Your last selected mode is saved in `~/.automaker_launcher_history` for quick re-runs.
|
||||||
|
|
||||||
### Building for Production
|
### Building for Production
|
||||||
|
|
||||||
#### Web Application
|
#### Web Application
|
||||||
@@ -197,11 +214,30 @@ npm run build:electron
|
|||||||
# Platform-specific builds
|
# Platform-specific builds
|
||||||
npm run build:electron:mac # macOS (DMG + ZIP, x64 + arm64)
|
npm run build:electron:mac # macOS (DMG + ZIP, x64 + arm64)
|
||||||
npm run build:electron:win # Windows (NSIS installer, x64)
|
npm run build:electron:win # Windows (NSIS installer, x64)
|
||||||
npm run build:electron:linux # Linux (AppImage + DEB, x64)
|
npm run build:electron:linux # Linux (AppImage + DEB + RPM, x64)
|
||||||
|
|
||||||
# Output directory: apps/ui/release/
|
# Output directory: apps/ui/release/
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Linux Distribution Packages:**
|
||||||
|
|
||||||
|
- **AppImage**: Universal format, works on any Linux distribution
|
||||||
|
- **DEB**: Ubuntu, Debian, Linux Mint, Pop!\_OS
|
||||||
|
- **RPM**: Fedora, RHEL, Rocky Linux, AlmaLinux, openSUSE
|
||||||
|
|
||||||
|
**Installing on Fedora/RHEL:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Download the RPM package
|
||||||
|
wget https://github.com/AutoMaker-Org/automaker/releases/latest/download/Automaker-<version>-x86_64.rpm
|
||||||
|
|
||||||
|
# Install with dnf (Fedora)
|
||||||
|
sudo dnf install ./Automaker-<version>-x86_64.rpm
|
||||||
|
|
||||||
|
# Or with yum (RHEL/CentOS)
|
||||||
|
sudo yum localinstall ./Automaker-<version>-x86_64.rpm
|
||||||
|
```
|
||||||
|
|
||||||
#### Docker Deployment
|
#### Docker Deployment
|
||||||
|
|
||||||
Docker provides the most secure way to run Automaker by isolating it from your host filesystem.
|
Docker provides the most secure way to run Automaker by isolating it from your host filesystem.
|
||||||
@@ -220,16 +256,9 @@ docker-compose logs -f
|
|||||||
docker-compose down
|
docker-compose down
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Configuration
|
##### Authentication
|
||||||
|
|
||||||
Create a `.env` file in the project root if using API key authentication:
|
Automaker integrates with your authenticated Claude Code CLI. To use CLI authentication in Docker, mount your Claude CLI config directory (see [Claude CLI Authentication](#claude-cli-authentication) below).
|
||||||
|
|
||||||
```bash
|
|
||||||
# Optional: Anthropic API key (not needed if using Claude CLI authentication)
|
|
||||||
ANTHROPIC_API_KEY=sk-ant-...
|
|
||||||
```
|
|
||||||
|
|
||||||
**Note:** Most users authenticate via Claude CLI instead of API keys. See [Claude CLI Authentication](#claude-cli-authentication-optional) below.
|
|
||||||
|
|
||||||
##### Working with Projects (Host Directory Access)
|
##### Working with Projects (Host Directory Access)
|
||||||
|
|
||||||
@@ -243,9 +272,9 @@ services:
|
|||||||
- /path/to/your/project:/projects/your-project
|
- /path/to/your/project:/projects/your-project
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Claude CLI Authentication (Optional)
|
##### Claude CLI Authentication
|
||||||
|
|
||||||
To use Claude Code CLI authentication instead of an API key, mount your Claude CLI config directory:
|
Mount your Claude CLI config directory to use your authenticated CLI credentials:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
services:
|
services:
|
||||||
@@ -259,6 +288,31 @@ services:
|
|||||||
|
|
||||||
**Note:** The Claude CLI config must be writable (do not use `:ro` flag) as the CLI writes debug files.
|
**Note:** The Claude CLI config must be writable (do not use `:ro` flag) as the CLI writes debug files.
|
||||||
|
|
||||||
|
> **⚠️ Important: Linux/WSL Users**
|
||||||
|
>
|
||||||
|
> The container runs as UID 1001 by default. If your host user has a different UID (common on Linux/WSL where the first user is UID 1000), you must create a `.env` file to match your host user:
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> # Check your UID/GID
|
||||||
|
> id -u # outputs your UID (e.g., 1000)
|
||||||
|
> id -g # outputs your GID (e.g., 1000)
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> Create a `.env` file in the automaker directory:
|
||||||
|
>
|
||||||
|
> ```
|
||||||
|
> UID=1000
|
||||||
|
> GID=1000
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> Then rebuild the images:
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> docker compose build
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> Without this, files written by the container will be inaccessible to your host user.
|
||||||
|
|
||||||
##### GitHub CLI Authentication (For Git Push/PR Operations)
|
##### GitHub CLI Authentication (For Git Push/PR Operations)
|
||||||
|
|
||||||
To enable git push and GitHub CLI operations inside the container:
|
To enable git push and GitHub CLI operations inside the container:
|
||||||
@@ -309,6 +363,42 @@ services:
|
|||||||
|
|
||||||
The Docker image supports both AMD64 and ARM64 architectures. The GitHub CLI and Claude CLI are automatically downloaded for the correct architecture during build.
|
The Docker image supports both AMD64 and ARM64 architectures. The GitHub CLI and Claude CLI are automatically downloaded for the correct architecture during build.
|
||||||
|
|
||||||
|
##### Playwright for Automated Testing
|
||||||
|
|
||||||
|
The Docker image includes **Playwright Chromium pre-installed** for AI agent verification tests. When agents implement features in automated testing mode, they use Playwright to verify the implementation works correctly.
|
||||||
|
|
||||||
|
**No additional setup required** - Playwright verification works out of the box.
|
||||||
|
|
||||||
|
#### Optional: Persist browsers for manual updates
|
||||||
|
|
||||||
|
By default, Playwright Chromium is pre-installed in the Docker image. If you need to manually update browsers or want to persist browser installations across container restarts (not image rebuilds), you can mount a volume.
|
||||||
|
|
||||||
|
**Important:** When you first add this volume mount to an existing setup, the empty volume will override the pre-installed browsers. You must re-install them:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# After adding the volume mount for the first time
|
||||||
|
docker exec --user automaker -w /app automaker-server npx playwright install chromium
|
||||||
|
```
|
||||||
|
|
||||||
|
Add this to your `docker-compose.override.yml`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
server:
|
||||||
|
volumes:
|
||||||
|
- playwright-cache:/home/automaker/.cache/ms-playwright
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
playwright-cache:
|
||||||
|
name: automaker-playwright-cache
|
||||||
|
```
|
||||||
|
|
||||||
|
**Updating browsers manually:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec --user automaker -w /app automaker-server npx playwright install chromium
|
||||||
|
```
|
||||||
|
|
||||||
### Testing
|
### Testing
|
||||||
|
|
||||||
#### End-to-End Tests (Playwright)
|
#### End-to-End Tests (Playwright)
|
||||||
@@ -343,10 +433,6 @@ npm run lint
|
|||||||
|
|
||||||
### Environment Configuration
|
### Environment Configuration
|
||||||
|
|
||||||
#### Authentication (if not using Claude Code CLI)
|
|
||||||
|
|
||||||
- `ANTHROPIC_API_KEY` - Your Anthropic API key for Claude Agent SDK (not needed if using Claude Code CLI)
|
|
||||||
|
|
||||||
#### Optional - Server
|
#### Optional - Server
|
||||||
|
|
||||||
- `PORT` - Server port (default: 3008)
|
- `PORT` - Server port (default: 3008)
|
||||||
@@ -357,49 +443,23 @@ npm run lint
|
|||||||
|
|
||||||
- `AUTOMAKER_API_KEY` - Optional API authentication for the server
|
- `AUTOMAKER_API_KEY` - Optional API authentication for the server
|
||||||
- `ALLOWED_ROOT_DIRECTORY` - Restrict file operations to specific directory
|
- `ALLOWED_ROOT_DIRECTORY` - Restrict file operations to specific directory
|
||||||
- `CORS_ORIGIN` - CORS policy (default: \*)
|
- `CORS_ORIGIN` - CORS allowed origins (comma-separated list; defaults to localhost only)
|
||||||
|
|
||||||
#### Optional - Development
|
#### Optional - Development
|
||||||
|
|
||||||
- `VITE_SKIP_ELECTRON` - Skip Electron in dev mode
|
- `VITE_SKIP_ELECTRON` - Skip Electron in dev mode
|
||||||
- `OPEN_DEVTOOLS` - Auto-open DevTools in Electron
|
- `OPEN_DEVTOOLS` - Auto-open DevTools in Electron
|
||||||
|
- `AUTOMAKER_SKIP_SANDBOX_WARNING` - Skip sandbox warning dialog (useful for dev/CI)
|
||||||
|
- `AUTOMAKER_AUTO_LOGIN=true` - Skip login prompt in development (ignored when NODE_ENV=production)
|
||||||
|
|
||||||
### Authentication Setup
|
### Authentication Setup
|
||||||
|
|
||||||
#### Option 1: Claude Code CLI (Recommended)
|
Automaker integrates with your authenticated Claude Code CLI and uses your Anthropic subscription.
|
||||||
|
|
||||||
Install and authenticate the Claude Code CLI following the [official quickstart guide](https://code.claude.com/docs/en/quickstart).
|
Install and authenticate the Claude Code CLI following the [official quickstart guide](https://code.claude.com/docs/en/quickstart).
|
||||||
|
|
||||||
Once authenticated, Automaker will automatically detect and use your CLI credentials. No additional configuration needed!
|
Once authenticated, Automaker will automatically detect and use your CLI credentials. No additional configuration needed!
|
||||||
|
|
||||||
#### Option 2: Direct API Key
|
|
||||||
|
|
||||||
If you prefer not to use the CLI, you can provide an Anthropic API key directly using one of these methods:
|
|
||||||
|
|
||||||
##### 2a. Shell Configuration
|
|
||||||
|
|
||||||
Add to your `~/.bashrc` or `~/.zshrc`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export ANTHROPIC_API_KEY="sk-ant-..."
|
|
||||||
```
|
|
||||||
|
|
||||||
Then restart your terminal or run `source ~/.bashrc` (or `source ~/.zshrc`).
|
|
||||||
|
|
||||||
##### 2b. .env File
|
|
||||||
|
|
||||||
Create a `.env` file in the project root (gitignored):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ANTHROPIC_API_KEY=sk-ant-...
|
|
||||||
PORT=3008
|
|
||||||
DATA_DIR=./data
|
|
||||||
```
|
|
||||||
|
|
||||||
##### 2c. In-App Storage
|
|
||||||
|
|
||||||
The application can store your API key securely in the settings UI. The key is persisted in the `DATA_DIR` directory.
|
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
### Core Workflow
|
### Core Workflow
|
||||||
@@ -508,20 +568,24 @@ Automaker provides several specialized views accessible via the sidebar or keybo
|
|||||||
| **Agent** | `A` | Interactive chat sessions with AI agents for exploratory work and questions |
|
| **Agent** | `A` | Interactive chat sessions with AI agents for exploratory work and questions |
|
||||||
| **Spec** | `D` | Project specification editor with AI-powered generation and feature suggestions |
|
| **Spec** | `D` | Project specification editor with AI-powered generation and feature suggestions |
|
||||||
| **Context** | `C` | Manage context files (markdown, images) that AI agents automatically reference |
|
| **Context** | `C` | Manage context files (markdown, images) that AI agents automatically reference |
|
||||||
| **Profiles** | `M` | Create and manage AI agent profiles with custom prompts and configurations |
|
|
||||||
| **Settings** | `S` | Configure themes, shortcuts, defaults, authentication, and more |
|
| **Settings** | `S` | Configure themes, shortcuts, defaults, authentication, and more |
|
||||||
| **Terminal** | `T` | Integrated terminal with tabs, splits, and persistent sessions |
|
| **Terminal** | `T` | Integrated terminal with tabs, splits, and persistent sessions |
|
||||||
| **GitHub Issues** | - | Import and validate GitHub issues, convert to tasks |
|
| **Graph** | `H` | Visualize feature dependencies with interactive graph visualization |
|
||||||
|
| **Ideation** | `I` | Brainstorm and generate ideas with AI assistance |
|
||||||
|
| **Memory** | `Y` | View and manage agent memory and conversation history |
|
||||||
|
| **GitHub Issues** | `G` | Import and validate GitHub issues, convert to tasks |
|
||||||
|
| **GitHub PRs** | `R` | View and manage GitHub pull requests |
|
||||||
| **Running Agents** | - | View all active agents across projects with status and progress |
|
| **Running Agents** | - | View all active agents across projects with status and progress |
|
||||||
|
|
||||||
### Keyboard Navigation
|
### Keyboard Navigation
|
||||||
|
|
||||||
All shortcuts are customizable in Settings. Default shortcuts:
|
All shortcuts are customizable in Settings. Default shortcuts:
|
||||||
|
|
||||||
- **Navigation:** `K` (Board), `A` (Agent), `D` (Spec), `C` (Context), `S` (Settings), `M` (Profiles), `T` (Terminal)
|
- **Navigation:** `K` (Board), `A` (Agent), `D` (Spec), `C` (Context), `S` (Settings), `T` (Terminal), `H` (Graph), `I` (Ideation), `Y` (Memory), `G` (GitHub Issues), `R` (GitHub PRs)
|
||||||
- **UI:** `` ` `` (Toggle sidebar)
|
- **UI:** `` ` `` (Toggle sidebar)
|
||||||
- **Actions:** `N` (New item in current view), `G` (Start next features), `O` (Open project), `P` (Project picker)
|
- **Actions:** `N` (New item in current view), `O` (Open project), `P` (Project picker)
|
||||||
- **Projects:** `Q`/`E` (Cycle previous/next project)
|
- **Projects:** `Q`/`E` (Cycle previous/next project)
|
||||||
|
- **Terminal:** `Alt+D` (Split right), `Alt+S` (Split down), `Alt+W` (Close), `Alt+T` (New tab)
|
||||||
|
|
||||||
## Architecture
|
## Architecture
|
||||||
|
|
||||||
@@ -586,10 +650,16 @@ Stored in `{projectPath}/.automaker/`:
|
|||||||
│ ├── agent-output.md # AI agent output log
|
│ ├── agent-output.md # AI agent output log
|
||||||
│ └── images/ # Attached images
|
│ └── images/ # Attached images
|
||||||
├── context/ # Context files for AI agents
|
├── context/ # Context files for AI agents
|
||||||
|
├── worktrees/ # Git worktree metadata
|
||||||
|
├── validations/ # GitHub issue validation results
|
||||||
|
├── ideation/ # Brainstorming and analysis data
|
||||||
|
│ └── analysis.json # Project structure analysis
|
||||||
|
├── board/ # Board-related data
|
||||||
|
├── images/ # Project-level images
|
||||||
├── settings.json # Project-specific settings
|
├── settings.json # Project-specific settings
|
||||||
├── spec.md # Project specification
|
├── app_spec.txt # Project specification (XML format)
|
||||||
├── analysis.json # Project structure analysis
|
├── active-branches.json # Active git branches tracking
|
||||||
└── feature-suggestions.json # AI-generated suggestions
|
└── execution-state.json # Auto-mode execution state
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Global Data
|
#### Global Data
|
||||||
@@ -627,7 +697,6 @@ data/
|
|||||||
|
|
||||||
- [Contributing Guide](./CONTRIBUTING.md) - How to contribute to Automaker
|
- [Contributing Guide](./CONTRIBUTING.md) - How to contribute to Automaker
|
||||||
- [Project Documentation](./docs/) - Architecture guides, patterns, and developer docs
|
- [Project Documentation](./docs/) - Architecture guides, patterns, and developer docs
|
||||||
- [Docker Isolation Guide](./docs/docker-isolation.md) - Security-focused Docker deployment
|
|
||||||
- [Shared Packages Guide](./docs/llm-shared-packages.md) - Using monorepo packages
|
- [Shared Packages Guide](./docs/llm-shared-packages.md) - Using monorepo packages
|
||||||
|
|
||||||
### Community
|
### Community
|
||||||
@@ -636,26 +705,10 @@ Join the **Agentic Jumpstart** Discord to connect with other builders exploring
|
|||||||
|
|
||||||
👉 [Agentic Jumpstart Discord](https://discord.gg/jjem7aEDKU)
|
👉 [Agentic Jumpstart Discord](https://discord.gg/jjem7aEDKU)
|
||||||
|
|
||||||
|
## Project Status
|
||||||
|
|
||||||
|
**This project is no longer actively maintained.** The codebase is provided as-is for those who wish to use, study, or fork it. No bug fixes, security updates, or new features are being developed. Community contributions may still be accepted, but there is no guarantee of review or merge.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
This project is licensed under the **Automaker License Agreement**. See [LICENSE](LICENSE) for the full text.
|
This project is licensed under the **MIT License**. See [LICENSE](LICENSE) for the full text.
|
||||||
|
|
||||||
**Summary of Terms:**
|
|
||||||
|
|
||||||
- **Allowed:**
|
|
||||||
- **Build Anything:** You can clone and use Automaker locally or in your organization to build ANY product (commercial or free).
|
|
||||||
- **Internal Use:** You can use it internally within your company (commercial or non-profit) without restriction.
|
|
||||||
- **Modify:** You can modify the code for internal use within your organization (commercial or non-profit).
|
|
||||||
|
|
||||||
- **Restricted (The "No Monetization of the Tool" Rule):**
|
|
||||||
- **No Resale:** You cannot resell Automaker itself.
|
|
||||||
- **No SaaS:** You cannot host Automaker as a service for others.
|
|
||||||
- **No Monetizing Mods:** You cannot distribute modified versions of Automaker for money.
|
|
||||||
|
|
||||||
- **Liability:**
|
|
||||||
- **Use at Own Risk:** This tool uses AI. We are **NOT** responsible if it breaks your computer, deletes your files, or generates bad code. You assume all risk.
|
|
||||||
|
|
||||||
- **Contributing:**
|
|
||||||
- By contributing to this repository, you grant the Core Contributors full, irrevocable rights to your code (copyright assignment).
|
|
||||||
|
|
||||||
**Core Contributors** (Cody Seibert (webdevcody), SuperComboGamer (SCG), Kacper Lachowicz (Shironex, Shirone), and Ben Scott (trueheads)) are granted perpetual, royalty-free licenses for any use, including monetization.
|
|
||||||
|
|||||||
17
TODO.md
17
TODO.md
@@ -1,17 +0,0 @@
|
|||||||
# Bugs
|
|
||||||
|
|
||||||
- Setting the default model does not seem like it works.
|
|
||||||
|
|
||||||
# UX
|
|
||||||
|
|
||||||
- Consolidate all models to a single place in the settings instead of having AI profiles and all this other stuff
|
|
||||||
- Simplify the create feature modal. It should just be one page. I don't need nessa tabs and all these nested buttons. It's too complex.
|
|
||||||
- added to do's list checkbox directly into the card so as it's going through if there's any to do items we can see those update live
|
|
||||||
- When the feature is done, I want to see a summary of the LLM. That's the first thing I should see when I double click the card.
|
|
||||||
- I went away to mass edit all my features. For example, when I created a new project, it added auto testing on every single feature card. Now I have to manually go through one by one and change those. Have a way to mass edit those, the configuration of all them.
|
|
||||||
- Double check and debug if there's memory leaks. It seems like the memory of automaker grows like 3 gigabytes. It's 5gb right now and I'm running three different cursor cli features implementing at the same time.
|
|
||||||
- Typing in the text area of the plan mode was super laggy.
|
|
||||||
- When I have a bunch of features running at the same time, it seems like I cannot edit the features in the backlog. Like they don't persist their file changes and I think this is because of the secure FS file has an internal queue to prevent hitting that file open write limit. We may have to reconsider refactoring away from file system and do Postgres or SQLite or something.
|
|
||||||
- modals are not scrollable if height of the screen is small enough
|
|
||||||
- and the Agent Runner add an archival button for the new sessions.
|
|
||||||
- investigate a potential issue with the feature cards not refreshing. I see a lock icon on the feature card But it doesn't go away until I open the card and edit it and I turn the testing mode off. I think there's like a refresh sync issue.
|
|
||||||
@@ -44,6 +44,11 @@ CORS_ORIGIN=http://localhost:3007
|
|||||||
# OPTIONAL - Server
|
# OPTIONAL - Server
|
||||||
# ============================================
|
# ============================================
|
||||||
|
|
||||||
|
# Host to bind the server to (default: 0.0.0.0)
|
||||||
|
# Use 0.0.0.0 to listen on all interfaces (recommended for Docker/remote access)
|
||||||
|
# Use 127.0.0.1 or localhost to restrict to local connections only
|
||||||
|
HOST=0.0.0.0
|
||||||
|
|
||||||
# Port to run the server on
|
# Port to run the server on
|
||||||
PORT=3008
|
PORT=3008
|
||||||
|
|
||||||
@@ -63,6 +68,14 @@ TERMINAL_PASSWORD=
|
|||||||
|
|
||||||
ENABLE_REQUEST_LOGGING=false
|
ENABLE_REQUEST_LOGGING=false
|
||||||
|
|
||||||
|
# ============================================
|
||||||
|
# OPTIONAL - UI Behavior
|
||||||
|
# ============================================
|
||||||
|
|
||||||
|
# Skip the sandbox warning dialog on startup (default: false)
|
||||||
|
# Set to "true" to disable the warning entirely (useful for dev/CI environments)
|
||||||
|
AUTOMAKER_SKIP_SANDBOX_WARNING=false
|
||||||
|
|
||||||
# ============================================
|
# ============================================
|
||||||
# OPTIONAL - Debugging
|
# OPTIONAL - Debugging
|
||||||
# ============================================
|
# ============================================
|
||||||
|
|||||||
74
apps/server/eslint.config.mjs
Normal file
74
apps/server/eslint.config.mjs
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
import { defineConfig, globalIgnores } from 'eslint/config';
|
||||||
|
import js from '@eslint/js';
|
||||||
|
import ts from '@typescript-eslint/eslint-plugin';
|
||||||
|
import tsParser from '@typescript-eslint/parser';
|
||||||
|
|
||||||
|
const eslintConfig = defineConfig([
|
||||||
|
js.configs.recommended,
|
||||||
|
{
|
||||||
|
files: ['**/*.ts'],
|
||||||
|
languageOptions: {
|
||||||
|
parser: tsParser,
|
||||||
|
parserOptions: {
|
||||||
|
ecmaVersion: 'latest',
|
||||||
|
sourceType: 'module',
|
||||||
|
},
|
||||||
|
globals: {
|
||||||
|
// Node.js globals
|
||||||
|
console: 'readonly',
|
||||||
|
process: 'readonly',
|
||||||
|
Buffer: 'readonly',
|
||||||
|
__dirname: 'readonly',
|
||||||
|
__filename: 'readonly',
|
||||||
|
URL: 'readonly',
|
||||||
|
URLSearchParams: 'readonly',
|
||||||
|
AbortController: 'readonly',
|
||||||
|
AbortSignal: 'readonly',
|
||||||
|
fetch: 'readonly',
|
||||||
|
Response: 'readonly',
|
||||||
|
Request: 'readonly',
|
||||||
|
Headers: 'readonly',
|
||||||
|
FormData: 'readonly',
|
||||||
|
RequestInit: 'readonly',
|
||||||
|
// Timers
|
||||||
|
setTimeout: 'readonly',
|
||||||
|
setInterval: 'readonly',
|
||||||
|
clearTimeout: 'readonly',
|
||||||
|
clearInterval: 'readonly',
|
||||||
|
setImmediate: 'readonly',
|
||||||
|
clearImmediate: 'readonly',
|
||||||
|
queueMicrotask: 'readonly',
|
||||||
|
// Node.js types
|
||||||
|
NodeJS: 'readonly',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
plugins: {
|
||||||
|
'@typescript-eslint': ts,
|
||||||
|
},
|
||||||
|
rules: {
|
||||||
|
...ts.configs.recommended.rules,
|
||||||
|
'@typescript-eslint/no-unused-vars': [
|
||||||
|
'warn',
|
||||||
|
{
|
||||||
|
argsIgnorePattern: '^_',
|
||||||
|
varsIgnorePattern: '^_',
|
||||||
|
caughtErrorsIgnorePattern: '^_',
|
||||||
|
ignoreRestSiblings: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
'@typescript-eslint/no-explicit-any': 'warn',
|
||||||
|
// Server code frequently works with terminal output containing ANSI escape codes
|
||||||
|
'no-control-regex': 'off',
|
||||||
|
'@typescript-eslint/ban-ts-comment': [
|
||||||
|
'error',
|
||||||
|
{
|
||||||
|
'ts-nocheck': 'allow-with-description',
|
||||||
|
minimumDescriptionLength: 10,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
globalIgnores(['dist/**', 'node_modules/**']),
|
||||||
|
]);
|
||||||
|
|
||||||
|
export default eslintConfig;
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@automaker/server",
|
"name": "@automaker/server",
|
||||||
"version": "0.9.0",
|
"version": "0.13.0",
|
||||||
"description": "Backend server for Automaker - provides API for both web and Electron modes",
|
"description": "Backend server for Automaker - provides API for both web and Electron modes",
|
||||||
"author": "AutoMaker Team",
|
"author": "AutoMaker Team",
|
||||||
"license": "SEE LICENSE IN LICENSE",
|
"license": "SEE LICENSE IN LICENSE",
|
||||||
@@ -24,7 +24,7 @@
|
|||||||
"test:unit": "vitest run tests/unit"
|
"test:unit": "vitest run tests/unit"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@anthropic-ai/claude-agent-sdk": "0.1.76",
|
"@anthropic-ai/claude-agent-sdk": "0.2.32",
|
||||||
"@automaker/dependency-resolver": "1.0.0",
|
"@automaker/dependency-resolver": "1.0.0",
|
||||||
"@automaker/git-utils": "1.0.0",
|
"@automaker/git-utils": "1.0.0",
|
||||||
"@automaker/model-resolver": "1.0.0",
|
"@automaker/model-resolver": "1.0.0",
|
||||||
@@ -32,17 +32,20 @@
|
|||||||
"@automaker/prompts": "1.0.0",
|
"@automaker/prompts": "1.0.0",
|
||||||
"@automaker/types": "1.0.0",
|
"@automaker/types": "1.0.0",
|
||||||
"@automaker/utils": "1.0.0",
|
"@automaker/utils": "1.0.0",
|
||||||
"@modelcontextprotocol/sdk": "1.25.1",
|
"@github/copilot-sdk": "^0.1.16",
|
||||||
"@openai/codex-sdk": "^0.77.0",
|
"@modelcontextprotocol/sdk": "1.25.2",
|
||||||
|
"@openai/codex-sdk": "^0.98.0",
|
||||||
"cookie-parser": "1.4.7",
|
"cookie-parser": "1.4.7",
|
||||||
"cors": "2.8.5",
|
"cors": "2.8.5",
|
||||||
"dotenv": "17.2.3",
|
"dotenv": "17.2.3",
|
||||||
"express": "5.2.1",
|
"express": "5.2.1",
|
||||||
"morgan": "1.10.1",
|
"morgan": "1.10.1",
|
||||||
"node-pty": "1.1.0-beta41",
|
"node-pty": "1.1.0-beta41",
|
||||||
"ws": "8.18.3"
|
"ws": "8.18.3",
|
||||||
|
"yaml": "2.7.0"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
"@playwright/test": "1.57.0",
|
||||||
"@types/cookie": "0.6.0",
|
"@types/cookie": "0.6.0",
|
||||||
"@types/cookie-parser": "1.4.10",
|
"@types/cookie-parser": "1.4.10",
|
||||||
"@types/cors": "2.8.19",
|
"@types/cors": "2.8.19",
|
||||||
|
|||||||
@@ -16,10 +16,20 @@ import { createServer } from 'http';
|
|||||||
import dotenv from 'dotenv';
|
import dotenv from 'dotenv';
|
||||||
|
|
||||||
import { createEventEmitter, type EventEmitter } from './lib/events.js';
|
import { createEventEmitter, type EventEmitter } from './lib/events.js';
|
||||||
import { initAllowedPaths } from '@automaker/platform';
|
import { initAllowedPaths, getClaudeAuthIndicators } from '@automaker/platform';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger, setLogLevel, LogLevel } from '@automaker/utils';
|
||||||
|
|
||||||
const logger = createLogger('Server');
|
const logger = createLogger('Server');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Map server log level string to LogLevel enum
|
||||||
|
*/
|
||||||
|
const LOG_LEVEL_MAP: Record<string, LogLevel> = {
|
||||||
|
error: LogLevel.ERROR,
|
||||||
|
warn: LogLevel.WARN,
|
||||||
|
info: LogLevel.INFO,
|
||||||
|
debug: LogLevel.DEBUG,
|
||||||
|
};
|
||||||
import { authMiddleware, validateWsConnectionToken, checkRawAuthentication } from './lib/auth.js';
|
import { authMiddleware, validateWsConnectionToken, checkRawAuthentication } from './lib/auth.js';
|
||||||
import { requireJsonContentType } from './middleware/require-json-content-type.js';
|
import { requireJsonContentType } from './middleware/require-json-content-type.js';
|
||||||
import { createAuthRoutes } from './routes/auth/index.js';
|
import { createAuthRoutes } from './routes/auth/index.js';
|
||||||
@@ -33,7 +43,6 @@ import { createEnhancePromptRoutes } from './routes/enhance-prompt/index.js';
|
|||||||
import { createWorktreeRoutes } from './routes/worktree/index.js';
|
import { createWorktreeRoutes } from './routes/worktree/index.js';
|
||||||
import { createGitRoutes } from './routes/git/index.js';
|
import { createGitRoutes } from './routes/git/index.js';
|
||||||
import { createSetupRoutes } from './routes/setup/index.js';
|
import { createSetupRoutes } from './routes/setup/index.js';
|
||||||
import { createSuggestionsRoutes } from './routes/suggestions/index.js';
|
|
||||||
import { createModelsRoutes } from './routes/models/index.js';
|
import { createModelsRoutes } from './routes/models/index.js';
|
||||||
import { createRunningAgentsRoutes } from './routes/running-agents/index.js';
|
import { createRunningAgentsRoutes } from './routes/running-agents/index.js';
|
||||||
import { createWorkspaceRoutes } from './routes/workspace/index.js';
|
import { createWorkspaceRoutes } from './routes/workspace/index.js';
|
||||||
@@ -47,7 +56,7 @@ import {
|
|||||||
import { createSettingsRoutes } from './routes/settings/index.js';
|
import { createSettingsRoutes } from './routes/settings/index.js';
|
||||||
import { AgentService } from './services/agent-service.js';
|
import { AgentService } from './services/agent-service.js';
|
||||||
import { FeatureLoader } from './services/feature-loader.js';
|
import { FeatureLoader } from './services/feature-loader.js';
|
||||||
import { AutoModeService } from './services/auto-mode-service.js';
|
import { AutoModeServiceCompat } from './services/auto-mode/index.js';
|
||||||
import { getTerminalService } from './services/terminal-service.js';
|
import { getTerminalService } from './services/terminal-service.js';
|
||||||
import { SettingsService } from './services/settings-service.js';
|
import { SettingsService } from './services/settings-service.js';
|
||||||
import { createSpecRegenerationRoutes } from './routes/app-spec/index.js';
|
import { createSpecRegenerationRoutes } from './routes/app-spec/index.js';
|
||||||
@@ -55,6 +64,12 @@ import { createClaudeRoutes } from './routes/claude/index.js';
|
|||||||
import { ClaudeUsageService } from './services/claude-usage-service.js';
|
import { ClaudeUsageService } from './services/claude-usage-service.js';
|
||||||
import { createCodexRoutes } from './routes/codex/index.js';
|
import { createCodexRoutes } from './routes/codex/index.js';
|
||||||
import { CodexUsageService } from './services/codex-usage-service.js';
|
import { CodexUsageService } from './services/codex-usage-service.js';
|
||||||
|
import { CodexAppServerService } from './services/codex-app-server-service.js';
|
||||||
|
import { CodexModelCacheService } from './services/codex-model-cache-service.js';
|
||||||
|
import { createZaiRoutes } from './routes/zai/index.js';
|
||||||
|
import { ZaiUsageService } from './services/zai-usage-service.js';
|
||||||
|
import { createGeminiRoutes } from './routes/gemini/index.js';
|
||||||
|
import { GeminiUsageService } from './services/gemini-usage-service.js';
|
||||||
import { createGitHubRoutes } from './routes/github/index.js';
|
import { createGitHubRoutes } from './routes/github/index.js';
|
||||||
import { createContextRoutes } from './routes/context/index.js';
|
import { createContextRoutes } from './routes/context/index.js';
|
||||||
import { createBacklogPlanRoutes } from './routes/backlog-plan/index.js';
|
import { createBacklogPlanRoutes } from './routes/backlog-plan/index.js';
|
||||||
@@ -65,34 +80,168 @@ import { createPipelineRoutes } from './routes/pipeline/index.js';
|
|||||||
import { pipelineService } from './services/pipeline-service.js';
|
import { pipelineService } from './services/pipeline-service.js';
|
||||||
import { createIdeationRoutes } from './routes/ideation/index.js';
|
import { createIdeationRoutes } from './routes/ideation/index.js';
|
||||||
import { IdeationService } from './services/ideation-service.js';
|
import { IdeationService } from './services/ideation-service.js';
|
||||||
|
import { getDevServerService } from './services/dev-server-service.js';
|
||||||
|
import { eventHookService } from './services/event-hook-service.js';
|
||||||
|
import { createNotificationsRoutes } from './routes/notifications/index.js';
|
||||||
|
import { getNotificationService } from './services/notification-service.js';
|
||||||
|
import { createEventHistoryRoutes } from './routes/event-history/index.js';
|
||||||
|
import { getEventHistoryService } from './services/event-history-service.js';
|
||||||
|
import { getTestRunnerService } from './services/test-runner-service.js';
|
||||||
|
import { createProjectsRoutes } from './routes/projects/index.js';
|
||||||
|
|
||||||
// Load environment variables
|
// Load environment variables
|
||||||
dotenv.config();
|
dotenv.config();
|
||||||
|
|
||||||
const PORT = parseInt(process.env.PORT || '3008', 10);
|
const PORT = parseInt(process.env.PORT || '3008', 10);
|
||||||
|
const HOST = process.env.HOST || '0.0.0.0';
|
||||||
|
const HOSTNAME = process.env.HOSTNAME || 'localhost';
|
||||||
const DATA_DIR = process.env.DATA_DIR || './data';
|
const DATA_DIR = process.env.DATA_DIR || './data';
|
||||||
const ENABLE_REQUEST_LOGGING = process.env.ENABLE_REQUEST_LOGGING !== 'false'; // Default to true
|
logger.info('[SERVER_STARTUP] process.env.DATA_DIR:', process.env.DATA_DIR);
|
||||||
|
logger.info('[SERVER_STARTUP] Resolved DATA_DIR:', DATA_DIR);
|
||||||
|
logger.info('[SERVER_STARTUP] process.cwd():', process.cwd());
|
||||||
|
const ENABLE_REQUEST_LOGGING_DEFAULT = process.env.ENABLE_REQUEST_LOGGING !== 'false'; // Default to true
|
||||||
|
|
||||||
// Check for required environment variables
|
// Runtime-configurable request logging flag (can be changed via settings)
|
||||||
const hasAnthropicKey = !!process.env.ANTHROPIC_API_KEY;
|
let requestLoggingEnabled = ENABLE_REQUEST_LOGGING_DEFAULT;
|
||||||
|
|
||||||
if (!hasAnthropicKey) {
|
/**
|
||||||
logger.warn(`
|
* Enable or disable HTTP request logging at runtime
|
||||||
╔═══════════════════════════════════════════════════════════════════════╗
|
*/
|
||||||
║ ⚠️ WARNING: No Claude authentication configured ║
|
export function setRequestLoggingEnabled(enabled: boolean): void {
|
||||||
║ ║
|
requestLoggingEnabled = enabled;
|
||||||
║ The Claude Agent SDK requires authentication to function. ║
|
|
||||||
║ ║
|
|
||||||
║ Set your Anthropic API key: ║
|
|
||||||
║ export ANTHROPIC_API_KEY="sk-ant-..." ║
|
|
||||||
║ ║
|
|
||||||
║ Or use the setup wizard in Settings to configure authentication. ║
|
|
||||||
╚═══════════════════════════════════════════════════════════════════════╝
|
|
||||||
`);
|
|
||||||
} else {
|
|
||||||
logger.info('✓ ANTHROPIC_API_KEY detected (API key auth)');
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current request logging state
|
||||||
|
*/
|
||||||
|
export function isRequestLoggingEnabled(): boolean {
|
||||||
|
return requestLoggingEnabled;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Width for log box content (excluding borders)
|
||||||
|
const BOX_CONTENT_WIDTH = 67;
|
||||||
|
|
||||||
|
// Check for Claude authentication (async - runs in background)
|
||||||
|
// The Claude Agent SDK can use either ANTHROPIC_API_KEY or Claude Code CLI authentication
|
||||||
|
(async () => {
|
||||||
|
const hasAnthropicKey = !!process.env.ANTHROPIC_API_KEY;
|
||||||
|
const hasEnvOAuthToken = !!process.env.CLAUDE_CODE_OAUTH_TOKEN;
|
||||||
|
|
||||||
|
logger.debug('[CREDENTIAL_CHECK] Starting credential detection...');
|
||||||
|
logger.debug('[CREDENTIAL_CHECK] Environment variables:', {
|
||||||
|
hasAnthropicKey,
|
||||||
|
hasEnvOAuthToken,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (hasAnthropicKey) {
|
||||||
|
logger.info('✓ ANTHROPIC_API_KEY detected');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hasEnvOAuthToken) {
|
||||||
|
logger.info('✓ CLAUDE_CODE_OAUTH_TOKEN detected');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for Claude Code CLI authentication
|
||||||
|
// Store indicators outside the try block so we can use them in the warning message
|
||||||
|
let cliAuthIndicators: Awaited<ReturnType<typeof getClaudeAuthIndicators>> | null = null;
|
||||||
|
|
||||||
|
try {
|
||||||
|
cliAuthIndicators = await getClaudeAuthIndicators();
|
||||||
|
const indicators = cliAuthIndicators;
|
||||||
|
|
||||||
|
// Log detailed credential detection results
|
||||||
|
const { checks, ...indicatorSummary } = indicators;
|
||||||
|
logger.debug('[CREDENTIAL_CHECK] Claude CLI auth indicators:', indicatorSummary);
|
||||||
|
|
||||||
|
logger.debug('[CREDENTIAL_CHECK] File check details:', checks);
|
||||||
|
|
||||||
|
const hasCliAuth =
|
||||||
|
indicators.hasStatsCacheWithActivity ||
|
||||||
|
(indicators.hasSettingsFile && indicators.hasProjectsSessions) ||
|
||||||
|
(indicators.hasCredentialsFile &&
|
||||||
|
(indicators.credentials?.hasOAuthToken || indicators.credentials?.hasApiKey));
|
||||||
|
|
||||||
|
logger.debug('[CREDENTIAL_CHECK] Auth determination:', {
|
||||||
|
hasCliAuth,
|
||||||
|
reason: hasCliAuth
|
||||||
|
? indicators.hasStatsCacheWithActivity
|
||||||
|
? 'stats cache with activity'
|
||||||
|
: indicators.hasSettingsFile && indicators.hasProjectsSessions
|
||||||
|
? 'settings file + project sessions'
|
||||||
|
: indicators.credentials?.hasOAuthToken
|
||||||
|
? 'credentials file with OAuth token'
|
||||||
|
: 'credentials file with API key'
|
||||||
|
: 'no valid credentials found',
|
||||||
|
});
|
||||||
|
|
||||||
|
if (hasCliAuth) {
|
||||||
|
logger.info('✓ Claude Code CLI authentication detected');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Ignore errors checking CLI auth - will fall through to warning
|
||||||
|
logger.warn('Error checking for Claude Code CLI authentication:', error);
|
||||||
|
}
|
||||||
|
|
||||||
|
// No authentication found - show warning with paths that were checked
|
||||||
|
const wHeader = '⚠️ WARNING: No Claude authentication configured'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const w1 = 'The Claude Agent SDK requires authentication to function.'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const w2 = 'Options:'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const w3 = '1. Install Claude Code CLI and authenticate with subscription'.padEnd(
|
||||||
|
BOX_CONTENT_WIDTH
|
||||||
|
);
|
||||||
|
const w4 = '2. Set your Anthropic API key:'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const w5 = ' export ANTHROPIC_API_KEY="sk-ant-..."'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const w6 = '3. Use the setup wizard in Settings to configure authentication.'.padEnd(
|
||||||
|
BOX_CONTENT_WIDTH
|
||||||
|
);
|
||||||
|
|
||||||
|
// Build paths checked summary from the indicators (if available)
|
||||||
|
let pathsCheckedInfo = '';
|
||||||
|
if (cliAuthIndicators) {
|
||||||
|
const pathsChecked: string[] = [];
|
||||||
|
|
||||||
|
// Collect paths that were checked (paths are always populated strings)
|
||||||
|
pathsChecked.push(`Settings: ${cliAuthIndicators.checks.settingsFile.path}`);
|
||||||
|
pathsChecked.push(`Stats cache: ${cliAuthIndicators.checks.statsCache.path}`);
|
||||||
|
pathsChecked.push(`Projects dir: ${cliAuthIndicators.checks.projectsDir.path}`);
|
||||||
|
for (const credFile of cliAuthIndicators.checks.credentialFiles) {
|
||||||
|
pathsChecked.push(`Credentials: ${credFile.path}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pathsChecked.length > 0) {
|
||||||
|
pathsCheckedInfo = `
|
||||||
|
║ ║
|
||||||
|
║ ${'Paths checked:'.padEnd(BOX_CONTENT_WIDTH)}║
|
||||||
|
${pathsChecked
|
||||||
|
.map((p) => {
|
||||||
|
const maxLen = BOX_CONTENT_WIDTH - 4;
|
||||||
|
const display = p.length > maxLen ? '...' + p.slice(-(maxLen - 3)) : p;
|
||||||
|
return `║ ${display.padEnd(maxLen)} ║`;
|
||||||
|
})
|
||||||
|
.join('\n')}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.warn(`
|
||||||
|
╔═════════════════════════════════════════════════════════════════════╗
|
||||||
|
║ ${wHeader}║
|
||||||
|
╠═════════════════════════════════════════════════════════════════════╣
|
||||||
|
║ ║
|
||||||
|
║ ${w1}║
|
||||||
|
║ ║
|
||||||
|
║ ${w2}║
|
||||||
|
║ ${w3}║
|
||||||
|
║ ${w4}║
|
||||||
|
║ ${w5}║
|
||||||
|
║ ${w6}║${pathsCheckedInfo}
|
||||||
|
║ ║
|
||||||
|
╚═════════════════════════════════════════════════════════════════════╝
|
||||||
|
`);
|
||||||
|
})();
|
||||||
|
|
||||||
// Initialize security
|
// Initialize security
|
||||||
initAllowedPaths();
|
initAllowedPaths();
|
||||||
|
|
||||||
@@ -100,22 +249,21 @@ initAllowedPaths();
|
|||||||
const app = express();
|
const app = express();
|
||||||
|
|
||||||
// Middleware
|
// Middleware
|
||||||
// Custom colored logger showing only endpoint and status code (configurable via ENABLE_REQUEST_LOGGING env var)
|
// Custom colored logger showing only endpoint and status code (dynamically configurable)
|
||||||
if (ENABLE_REQUEST_LOGGING) {
|
morgan.token('status-colored', (_req, res) => {
|
||||||
morgan.token('status-colored', (_req, res) => {
|
|
||||||
const status = res.statusCode;
|
const status = res.statusCode;
|
||||||
if (status >= 500) return `\x1b[31m${status}\x1b[0m`; // Red for server errors
|
if (status >= 500) return `\x1b[31m${status}\x1b[0m`; // Red for server errors
|
||||||
if (status >= 400) return `\x1b[33m${status}\x1b[0m`; // Yellow for client errors
|
if (status >= 400) return `\x1b[33m${status}\x1b[0m`; // Yellow for client errors
|
||||||
if (status >= 300) return `\x1b[36m${status}\x1b[0m`; // Cyan for redirects
|
if (status >= 300) return `\x1b[36m${status}\x1b[0m`; // Cyan for redirects
|
||||||
return `\x1b[32m${status}\x1b[0m`; // Green for success
|
return `\x1b[32m${status}\x1b[0m`; // Green for success
|
||||||
});
|
});
|
||||||
|
|
||||||
app.use(
|
app.use(
|
||||||
morgan(':method :url :status-colored', {
|
morgan(':method :url :status-colored', {
|
||||||
skip: (req) => req.url === '/api/health', // Skip health check logs
|
// Skip when request logging is disabled or for health check endpoints
|
||||||
|
skip: (req) => !requestLoggingEnabled || req.url === '/api/health',
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
}
|
|
||||||
// CORS configuration
|
// CORS configuration
|
||||||
// When using credentials (cookies), origin cannot be '*'
|
// When using credentials (cookies), origin cannot be '*'
|
||||||
// We dynamically allow the requesting origin for local development
|
// We dynamically allow the requesting origin for local development
|
||||||
@@ -139,15 +287,26 @@ app.use(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// For local development, allow localhost origins
|
// For local development, allow all localhost/loopback origins (any port)
|
||||||
|
try {
|
||||||
|
const url = new URL(origin);
|
||||||
|
const hostname = url.hostname;
|
||||||
|
|
||||||
if (
|
if (
|
||||||
origin.startsWith('http://localhost:') ||
|
hostname === 'localhost' ||
|
||||||
origin.startsWith('http://127.0.0.1:') ||
|
hostname === '127.0.0.1' ||
|
||||||
origin.startsWith('http://[::1]:')
|
hostname === '::1' ||
|
||||||
|
hostname === '0.0.0.0' ||
|
||||||
|
hostname.startsWith('192.168.') ||
|
||||||
|
hostname.startsWith('10.') ||
|
||||||
|
hostname.startsWith('172.')
|
||||||
) {
|
) {
|
||||||
callback(null, origin);
|
callback(null, origin);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
} catch {
|
||||||
|
// Ignore URL parsing errors
|
||||||
|
}
|
||||||
|
|
||||||
// Reject other origins by default for security
|
// Reject other origins by default for security
|
||||||
callback(new Error('Not allowed by CORS'));
|
callback(new Error('Not allowed by CORS'));
|
||||||
@@ -166,16 +325,130 @@ const events: EventEmitter = createEventEmitter();
|
|||||||
const settingsService = new SettingsService(DATA_DIR);
|
const settingsService = new SettingsService(DATA_DIR);
|
||||||
const agentService = new AgentService(DATA_DIR, events, settingsService);
|
const agentService = new AgentService(DATA_DIR, events, settingsService);
|
||||||
const featureLoader = new FeatureLoader();
|
const featureLoader = new FeatureLoader();
|
||||||
const autoModeService = new AutoModeService(events, settingsService);
|
|
||||||
|
// Auto-mode services: compatibility layer provides old interface while using new architecture
|
||||||
|
const autoModeService = new AutoModeServiceCompat(events, settingsService, featureLoader);
|
||||||
const claudeUsageService = new ClaudeUsageService();
|
const claudeUsageService = new ClaudeUsageService();
|
||||||
const codexUsageService = new CodexUsageService();
|
const codexAppServerService = new CodexAppServerService();
|
||||||
|
const codexModelCacheService = new CodexModelCacheService(DATA_DIR, codexAppServerService);
|
||||||
|
const codexUsageService = new CodexUsageService(codexAppServerService);
|
||||||
|
const zaiUsageService = new ZaiUsageService();
|
||||||
|
const geminiUsageService = new GeminiUsageService();
|
||||||
const mcpTestService = new MCPTestService(settingsService);
|
const mcpTestService = new MCPTestService(settingsService);
|
||||||
const ideationService = new IdeationService(events, settingsService, featureLoader);
|
const ideationService = new IdeationService(events, settingsService, featureLoader);
|
||||||
|
|
||||||
|
// Initialize DevServerService with event emitter for real-time log streaming
|
||||||
|
const devServerService = getDevServerService();
|
||||||
|
devServerService.setEventEmitter(events);
|
||||||
|
|
||||||
|
// Initialize Notification Service with event emitter for real-time updates
|
||||||
|
const notificationService = getNotificationService();
|
||||||
|
notificationService.setEventEmitter(events);
|
||||||
|
|
||||||
|
// Initialize Event History Service
|
||||||
|
const eventHistoryService = getEventHistoryService();
|
||||||
|
|
||||||
|
// Initialize Test Runner Service with event emitter for real-time test output streaming
|
||||||
|
const testRunnerService = getTestRunnerService();
|
||||||
|
testRunnerService.setEventEmitter(events);
|
||||||
|
|
||||||
|
// Initialize Event Hook Service for custom event triggers (with history storage)
|
||||||
|
eventHookService.initialize(events, settingsService, eventHistoryService, featureLoader);
|
||||||
|
|
||||||
// Initialize services
|
// Initialize services
|
||||||
(async () => {
|
(async () => {
|
||||||
|
// Migrate settings from legacy Electron userData location if needed
|
||||||
|
// This handles users upgrading from versions that stored settings in ~/.config/Automaker (Linux),
|
||||||
|
// ~/Library/Application Support/Automaker (macOS), or %APPDATA%\Automaker (Windows)
|
||||||
|
// to the new shared ./data directory
|
||||||
|
try {
|
||||||
|
const migrationResult = await settingsService.migrateFromLegacyElectronPath();
|
||||||
|
if (migrationResult.migrated) {
|
||||||
|
logger.info(`Settings migrated from legacy location: ${migrationResult.legacyPath}`);
|
||||||
|
logger.info(`Migrated files: ${migrationResult.migratedFiles.join(', ')}`);
|
||||||
|
}
|
||||||
|
if (migrationResult.errors.length > 0) {
|
||||||
|
logger.warn('Migration errors:', migrationResult.errors);
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
logger.warn('Failed to check for legacy settings migration:', err);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch global settings once and reuse for logging config and feature reconciliation
|
||||||
|
let globalSettings: Awaited<ReturnType<typeof settingsService.getGlobalSettings>> | null = null;
|
||||||
|
try {
|
||||||
|
globalSettings = await settingsService.getGlobalSettings();
|
||||||
|
} catch {
|
||||||
|
logger.warn('Failed to load global settings, using defaults');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply logging settings from saved settings
|
||||||
|
if (globalSettings) {
|
||||||
|
try {
|
||||||
|
if (
|
||||||
|
globalSettings.serverLogLevel &&
|
||||||
|
LOG_LEVEL_MAP[globalSettings.serverLogLevel] !== undefined
|
||||||
|
) {
|
||||||
|
setLogLevel(LOG_LEVEL_MAP[globalSettings.serverLogLevel]);
|
||||||
|
logger.info(`Server log level set to: ${globalSettings.serverLogLevel}`);
|
||||||
|
}
|
||||||
|
// Apply request logging setting (default true if not set)
|
||||||
|
const enableRequestLog = globalSettings.enableRequestLogging ?? true;
|
||||||
|
setRequestLoggingEnabled(enableRequestLog);
|
||||||
|
logger.info(`HTTP request logging: ${enableRequestLog ? 'enabled' : 'disabled'}`);
|
||||||
|
} catch {
|
||||||
|
logger.warn('Failed to apply logging settings, using defaults');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
await agentService.initialize();
|
await agentService.initialize();
|
||||||
logger.info('Agent service initialized');
|
logger.info('Agent service initialized');
|
||||||
|
|
||||||
|
// Reconcile feature states on startup
|
||||||
|
// After any type of restart (clean, forced, crash), features may be stuck in
|
||||||
|
// transient states (in_progress, interrupted, pipeline_*) that don't match reality.
|
||||||
|
// Reconcile them back to resting states before the UI is served.
|
||||||
|
if (globalSettings) {
|
||||||
|
try {
|
||||||
|
if (globalSettings.projects && globalSettings.projects.length > 0) {
|
||||||
|
let totalReconciled = 0;
|
||||||
|
for (const project of globalSettings.projects) {
|
||||||
|
const count = await autoModeService.reconcileFeatureStates(project.path);
|
||||||
|
totalReconciled += count;
|
||||||
|
}
|
||||||
|
if (totalReconciled > 0) {
|
||||||
|
logger.info(
|
||||||
|
`[STARTUP] Reconciled ${totalReconciled} feature(s) across ${globalSettings.projects.length} project(s)`
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
logger.info('[STARTUP] Feature state reconciliation complete - no stale states found');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resume interrupted features in the background after reconciliation.
|
||||||
|
// This uses the saved execution state to identify features that were running
|
||||||
|
// before the restart (their statuses have been reset to ready/backlog by
|
||||||
|
// reconciliation above). Running in background so it doesn't block startup.
|
||||||
|
if (totalReconciled > 0) {
|
||||||
|
for (const project of globalSettings.projects) {
|
||||||
|
autoModeService.resumeInterruptedFeatures(project.path).catch((err) => {
|
||||||
|
logger.warn(
|
||||||
|
`[STARTUP] Failed to resume interrupted features for ${project.path}:`,
|
||||||
|
err
|
||||||
|
);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
logger.info('[STARTUP] Initiated background resume of interrupted features');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
logger.warn('[STARTUP] Failed to reconcile feature states:', err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bootstrap Codex model cache in background (don't block server startup)
|
||||||
|
void codexModelCacheService.getModels().catch((err) => {
|
||||||
|
logger.error('Failed to bootstrap Codex model cache:', err);
|
||||||
|
});
|
||||||
})();
|
})();
|
||||||
|
|
||||||
// Run stale validation cleanup every hour to prevent memory leaks from crashed validations
|
// Run stale validation cleanup every hour to prevent memory leaks from crashed validations
|
||||||
@@ -205,12 +478,14 @@ app.get('/api/health/detailed', createDetailedHandler());
|
|||||||
app.use('/api/fs', createFsRoutes(events));
|
app.use('/api/fs', createFsRoutes(events));
|
||||||
app.use('/api/agent', createAgentRoutes(agentService, events));
|
app.use('/api/agent', createAgentRoutes(agentService, events));
|
||||||
app.use('/api/sessions', createSessionsRoutes(agentService));
|
app.use('/api/sessions', createSessionsRoutes(agentService));
|
||||||
app.use('/api/features', createFeaturesRoutes(featureLoader));
|
app.use(
|
||||||
|
'/api/features',
|
||||||
|
createFeaturesRoutes(featureLoader, settingsService, events, autoModeService)
|
||||||
|
);
|
||||||
app.use('/api/auto-mode', createAutoModeRoutes(autoModeService));
|
app.use('/api/auto-mode', createAutoModeRoutes(autoModeService));
|
||||||
app.use('/api/enhance-prompt', createEnhancePromptRoutes(settingsService));
|
app.use('/api/enhance-prompt', createEnhancePromptRoutes(settingsService));
|
||||||
app.use('/api/worktree', createWorktreeRoutes());
|
app.use('/api/worktree', createWorktreeRoutes(events, settingsService));
|
||||||
app.use('/api/git', createGitRoutes());
|
app.use('/api/git', createGitRoutes());
|
||||||
app.use('/api/suggestions', createSuggestionsRoutes(events, settingsService));
|
|
||||||
app.use('/api/models', createModelsRoutes());
|
app.use('/api/models', createModelsRoutes());
|
||||||
app.use('/api/spec-regeneration', createSpecRegenerationRoutes(events, settingsService));
|
app.use('/api/spec-regeneration', createSpecRegenerationRoutes(events, settingsService));
|
||||||
app.use('/api/running-agents', createRunningAgentsRoutes(autoModeService));
|
app.use('/api/running-agents', createRunningAgentsRoutes(autoModeService));
|
||||||
@@ -219,13 +494,21 @@ app.use('/api/templates', createTemplatesRoutes());
|
|||||||
app.use('/api/terminal', createTerminalRoutes());
|
app.use('/api/terminal', createTerminalRoutes());
|
||||||
app.use('/api/settings', createSettingsRoutes(settingsService));
|
app.use('/api/settings', createSettingsRoutes(settingsService));
|
||||||
app.use('/api/claude', createClaudeRoutes(claudeUsageService));
|
app.use('/api/claude', createClaudeRoutes(claudeUsageService));
|
||||||
app.use('/api/codex', createCodexRoutes(codexUsageService));
|
app.use('/api/codex', createCodexRoutes(codexUsageService, codexModelCacheService));
|
||||||
|
app.use('/api/zai', createZaiRoutes(zaiUsageService, settingsService));
|
||||||
|
app.use('/api/gemini', createGeminiRoutes(geminiUsageService, events));
|
||||||
app.use('/api/github', createGitHubRoutes(events, settingsService));
|
app.use('/api/github', createGitHubRoutes(events, settingsService));
|
||||||
app.use('/api/context', createContextRoutes(settingsService));
|
app.use('/api/context', createContextRoutes(settingsService));
|
||||||
app.use('/api/backlog-plan', createBacklogPlanRoutes(events, settingsService));
|
app.use('/api/backlog-plan', createBacklogPlanRoutes(events, settingsService));
|
||||||
app.use('/api/mcp', createMCPRoutes(mcpTestService));
|
app.use('/api/mcp', createMCPRoutes(mcpTestService));
|
||||||
app.use('/api/pipeline', createPipelineRoutes(pipelineService));
|
app.use('/api/pipeline', createPipelineRoutes(pipelineService));
|
||||||
app.use('/api/ideation', createIdeationRoutes(events, ideationService, featureLoader));
|
app.use('/api/ideation', createIdeationRoutes(events, ideationService, featureLoader));
|
||||||
|
app.use('/api/notifications', createNotificationsRoutes(notificationService));
|
||||||
|
app.use('/api/event-history', createEventHistoryRoutes(eventHistoryService, settingsService));
|
||||||
|
app.use(
|
||||||
|
'/api/projects',
|
||||||
|
createProjectsRoutes(featureLoader, autoModeService, settingsService, notificationService)
|
||||||
|
);
|
||||||
|
|
||||||
// Create HTTP server
|
// Create HTTP server
|
||||||
const server = createServer(app);
|
const server = createServer(app);
|
||||||
@@ -233,7 +516,7 @@ const server = createServer(app);
|
|||||||
// WebSocket servers using noServer mode for proper multi-path support
|
// WebSocket servers using noServer mode for proper multi-path support
|
||||||
const wss = new WebSocketServer({ noServer: true });
|
const wss = new WebSocketServer({ noServer: true });
|
||||||
const terminalWss = new WebSocketServer({ noServer: true });
|
const terminalWss = new WebSocketServer({ noServer: true });
|
||||||
const terminalService = getTerminalService();
|
const terminalService = getTerminalService(settingsService);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Authenticate WebSocket upgrade requests
|
* Authenticate WebSocket upgrade requests
|
||||||
@@ -316,7 +599,7 @@ wss.on('connection', (ws: WebSocket) => {
|
|||||||
logger.info('Sending event to client:', {
|
logger.info('Sending event to client:', {
|
||||||
type,
|
type,
|
||||||
messageLength: message.length,
|
messageLength: message.length,
|
||||||
sessionId: (payload as any)?.sessionId,
|
sessionId: (payload as Record<string, unknown>)?.sessionId,
|
||||||
});
|
});
|
||||||
ws.send(message);
|
ws.send(message);
|
||||||
} else {
|
} else {
|
||||||
@@ -382,8 +665,15 @@ terminalWss.on('connection', (ws: WebSocket, req: import('http').IncomingMessage
|
|||||||
// Check if session exists
|
// Check if session exists
|
||||||
const session = terminalService.getSession(sessionId);
|
const session = terminalService.getSession(sessionId);
|
||||||
if (!session) {
|
if (!session) {
|
||||||
logger.info(`Session ${sessionId} not found`);
|
logger.warn(
|
||||||
ws.close(4004, 'Session not found');
|
`Terminal session ${sessionId} not found. ` +
|
||||||
|
`The session may have exited, been deleted, or was never created. ` +
|
||||||
|
`Active terminal sessions: ${terminalService.getSessionCount()}`
|
||||||
|
);
|
||||||
|
ws.close(
|
||||||
|
4004,
|
||||||
|
'Session not found. The terminal session may have expired or been closed. Please create a new terminal.'
|
||||||
|
);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -537,46 +827,81 @@ terminalWss.on('connection', (ws: WebSocket, req: import('http').IncomingMessage
|
|||||||
});
|
});
|
||||||
|
|
||||||
// Start server with error handling for port conflicts
|
// Start server with error handling for port conflicts
|
||||||
const startServer = (port: number) => {
|
const startServer = (port: number, host: string) => {
|
||||||
server.listen(port, () => {
|
server.listen(port, host, () => {
|
||||||
const terminalStatus = isTerminalEnabled()
|
const terminalStatus = isTerminalEnabled()
|
||||||
? isTerminalPasswordRequired()
|
? isTerminalPasswordRequired()
|
||||||
? 'enabled (password protected)'
|
? 'enabled (password protected)'
|
||||||
: 'enabled'
|
: 'enabled'
|
||||||
: 'disabled';
|
: 'disabled';
|
||||||
const portStr = port.toString().padEnd(4);
|
|
||||||
|
// Build URLs for display
|
||||||
|
const listenAddr = `${host}:${port}`;
|
||||||
|
const httpUrl = `http://${HOSTNAME}:${port}`;
|
||||||
|
const wsEventsUrl = `ws://${HOSTNAME}:${port}/api/events`;
|
||||||
|
const wsTerminalUrl = `ws://${HOSTNAME}:${port}/api/terminal/ws`;
|
||||||
|
const healthUrl = `http://${HOSTNAME}:${port}/api/health`;
|
||||||
|
|
||||||
|
const sHeader = '🚀 Automaker Backend Server'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const s1 = `Listening: ${listenAddr}`.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const s2 = `HTTP API: ${httpUrl}`.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const s3 = `WebSocket: ${wsEventsUrl}`.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const s4 = `Terminal WS: ${wsTerminalUrl}`.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const s5 = `Health: ${healthUrl}`.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const s6 = `Terminal: ${terminalStatus}`.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
|
||||||
logger.info(`
|
logger.info(`
|
||||||
╔═══════════════════════════════════════════════════════╗
|
╔═════════════════════════════════════════════════════════════════════╗
|
||||||
║ Automaker Backend Server ║
|
║ ${sHeader}║
|
||||||
╠═══════════════════════════════════════════════════════╣
|
╠═════════════════════════════════════════════════════════════════════╣
|
||||||
║ HTTP API: http://localhost:${portStr} ║
|
║ ║
|
||||||
║ WebSocket: ws://localhost:${portStr}/api/events ║
|
║ ${s1}║
|
||||||
║ Terminal: ws://localhost:${portStr}/api/terminal/ws ║
|
║ ${s2}║
|
||||||
║ Health: http://localhost:${portStr}/api/health ║
|
║ ${s3}║
|
||||||
║ Terminal: ${terminalStatus.padEnd(37)}║
|
║ ${s4}║
|
||||||
╚═══════════════════════════════════════════════════════╝
|
║ ${s5}║
|
||||||
|
║ ${s6}║
|
||||||
|
║ ║
|
||||||
|
╚═════════════════════════════════════════════════════════════════════╝
|
||||||
`);
|
`);
|
||||||
});
|
});
|
||||||
|
|
||||||
server.on('error', (error: NodeJS.ErrnoException) => {
|
server.on('error', (error: NodeJS.ErrnoException) => {
|
||||||
if (error.code === 'EADDRINUSE') {
|
if (error.code === 'EADDRINUSE') {
|
||||||
|
const portStr = port.toString();
|
||||||
|
const nextPortStr = (port + 1).toString();
|
||||||
|
const killCmd = `lsof -ti:${portStr} | xargs kill -9`;
|
||||||
|
const altCmd = `PORT=${nextPortStr} npm run dev:server`;
|
||||||
|
|
||||||
|
const eHeader = `❌ ERROR: Port ${portStr} is already in use`.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const e1 = 'Another process is using this port.'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const e2 = 'To fix this, try one of:'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const e3 = '1. Kill the process using the port:'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const e4 = ` ${killCmd}`.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const e5 = '2. Use a different port:'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const e6 = ` ${altCmd}`.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const e7 = '3. Use the init.sh script which handles this:'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const e8 = ' ./init.sh'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
|
||||||
logger.error(`
|
logger.error(`
|
||||||
╔═══════════════════════════════════════════════════════╗
|
╔═════════════════════════════════════════════════════════════════════╗
|
||||||
║ ❌ ERROR: Port ${port} is already in use ║
|
║ ${eHeader}║
|
||||||
╠═══════════════════════════════════════════════════════╣
|
╠═════════════════════════════════════════════════════════════════════╣
|
||||||
║ Another process is using this port. ║
|
|
||||||
║ ║
|
║ ║
|
||||||
║ To fix this, try one of: ║
|
║ ${e1}║
|
||||||
║ ║
|
║ ║
|
||||||
║ 1. Kill the process using the port: ║
|
║ ${e2}║
|
||||||
║ lsof -ti:${port} | xargs kill -9 ║
|
|
||||||
║ ║
|
║ ║
|
||||||
║ 2. Use a different port: ║
|
║ ${e3}║
|
||||||
║ PORT=${port + 1} npm run dev:server ║
|
║ ${e4}║
|
||||||
║ ║
|
║ ║
|
||||||
║ 3. Use the init.sh script which handles this: ║
|
║ ${e5}║
|
||||||
║ ./init.sh ║
|
║ ${e6}║
|
||||||
╚═══════════════════════════════════════════════════════╝
|
║ ║
|
||||||
|
║ ${e7}║
|
||||||
|
║ ${e8}║
|
||||||
|
║ ║
|
||||||
|
╚═════════════════════════════════════════════════════════════════════╝
|
||||||
`);
|
`);
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
} else {
|
} else {
|
||||||
@@ -586,23 +911,58 @@ const startServer = (port: number) => {
|
|||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
startServer(PORT);
|
startServer(PORT, HOST);
|
||||||
|
|
||||||
|
// Global error handlers to prevent crashes from uncaught errors
|
||||||
|
process.on('unhandledRejection', (reason: unknown, _promise: Promise<unknown>) => {
|
||||||
|
logger.error('Unhandled Promise Rejection:', {
|
||||||
|
reason: reason instanceof Error ? reason.message : String(reason),
|
||||||
|
stack: reason instanceof Error ? reason.stack : undefined,
|
||||||
|
});
|
||||||
|
// Don't exit - log the error and continue running
|
||||||
|
// This prevents the server from crashing due to unhandled rejections
|
||||||
|
});
|
||||||
|
|
||||||
|
process.on('uncaughtException', (error: Error) => {
|
||||||
|
logger.error('Uncaught Exception:', {
|
||||||
|
message: error.message,
|
||||||
|
stack: error.stack,
|
||||||
|
});
|
||||||
|
// Exit on uncaught exceptions to prevent undefined behavior
|
||||||
|
// The process is in an unknown state after an uncaught exception
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Graceful shutdown timeout (30 seconds)
|
||||||
|
const SHUTDOWN_TIMEOUT_MS = 30000;
|
||||||
|
|
||||||
|
// Graceful shutdown helper
|
||||||
|
const gracefulShutdown = async (signal: string) => {
|
||||||
|
logger.info(`${signal} received, shutting down...`);
|
||||||
|
|
||||||
|
// Set up a force-exit timeout to prevent hanging
|
||||||
|
const forceExitTimeout = setTimeout(() => {
|
||||||
|
logger.error(`Shutdown timed out after ${SHUTDOWN_TIMEOUT_MS}ms, forcing exit`);
|
||||||
|
process.exit(1);
|
||||||
|
}, SHUTDOWN_TIMEOUT_MS);
|
||||||
|
|
||||||
|
// Mark all running features as interrupted before shutdown
|
||||||
|
// This ensures they can be resumed when the server restarts
|
||||||
|
// Note: markAllRunningFeaturesInterrupted handles errors internally and never rejects
|
||||||
|
await autoModeService.markAllRunningFeaturesInterrupted(`${signal} signal received`);
|
||||||
|
|
||||||
// Graceful shutdown
|
|
||||||
process.on('SIGTERM', () => {
|
|
||||||
logger.info('SIGTERM received, shutting down...');
|
|
||||||
terminalService.cleanup();
|
terminalService.cleanup();
|
||||||
server.close(() => {
|
server.close(() => {
|
||||||
|
clearTimeout(forceExitTimeout);
|
||||||
logger.info('Server closed');
|
logger.info('Server closed');
|
||||||
process.exit(0);
|
process.exit(0);
|
||||||
});
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
process.on('SIGTERM', () => {
|
||||||
|
gracefulShutdown('SIGTERM');
|
||||||
});
|
});
|
||||||
|
|
||||||
process.on('SIGINT', () => {
|
process.on('SIGINT', () => {
|
||||||
logger.info('SIGINT received, shutting down...');
|
gracefulShutdown('SIGINT');
|
||||||
terminalService.cleanup();
|
|
||||||
server.close(() => {
|
|
||||||
logger.info('Server closed');
|
|
||||||
process.exit(0);
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -11,8 +11,12 @@ export { specOutputSchema } from '@automaker/types';
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Escape special XML characters
|
* Escape special XML characters
|
||||||
|
* Handles undefined/null values by converting them to empty strings
|
||||||
*/
|
*/
|
||||||
function escapeXml(str: string): string {
|
export function escapeXml(str: string | undefined | null): string {
|
||||||
|
if (str == null) {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
return str
|
return str
|
||||||
.replace(/&/g, '&')
|
.replace(/&/g, '&')
|
||||||
.replace(/</g, '<')
|
.replace(/</g, '<')
|
||||||
|
|||||||
@@ -23,6 +23,13 @@ const SESSION_COOKIE_NAME = 'automaker_session';
|
|||||||
const SESSION_MAX_AGE_MS = 30 * 24 * 60 * 60 * 1000; // 30 days
|
const SESSION_MAX_AGE_MS = 30 * 24 * 60 * 60 * 1000; // 30 days
|
||||||
const WS_TOKEN_MAX_AGE_MS = 5 * 60 * 1000; // 5 minutes for WebSocket connection tokens
|
const WS_TOKEN_MAX_AGE_MS = 5 * 60 * 1000; // 5 minutes for WebSocket connection tokens
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if an environment variable is set to 'true'
|
||||||
|
*/
|
||||||
|
function isEnvTrue(envVar: string | undefined): boolean {
|
||||||
|
return envVar === 'true';
|
||||||
|
}
|
||||||
|
|
||||||
// Session store - persisted to file for survival across server restarts
|
// Session store - persisted to file for survival across server restarts
|
||||||
const validSessions = new Map<string, { createdAt: number; expiresAt: number }>();
|
const validSessions = new Map<string, { createdAt: number; expiresAt: number }>();
|
||||||
|
|
||||||
@@ -130,19 +137,47 @@ function ensureApiKey(): string {
|
|||||||
// API key - always generated/loaded on startup for CSRF protection
|
// API key - always generated/loaded on startup for CSRF protection
|
||||||
const API_KEY = ensureApiKey();
|
const API_KEY = ensureApiKey();
|
||||||
|
|
||||||
|
// Width for log box content (excluding borders)
|
||||||
|
const BOX_CONTENT_WIDTH = 67;
|
||||||
|
|
||||||
// Print API key to console for web mode users (unless suppressed for production logging)
|
// Print API key to console for web mode users (unless suppressed for production logging)
|
||||||
if (process.env.AUTOMAKER_HIDE_API_KEY !== 'true') {
|
if (!isEnvTrue(process.env.AUTOMAKER_HIDE_API_KEY)) {
|
||||||
|
const autoLoginEnabled = isEnvTrue(process.env.AUTOMAKER_AUTO_LOGIN);
|
||||||
|
const autoLoginStatus = autoLoginEnabled ? 'enabled (auto-login active)' : 'disabled';
|
||||||
|
|
||||||
|
// Build box lines with exact padding
|
||||||
|
const header = '🔐 API Key for Web Mode Authentication'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const line1 = "When accessing via browser, you'll be prompted to enter this key:".padEnd(
|
||||||
|
BOX_CONTENT_WIDTH
|
||||||
|
);
|
||||||
|
const line2 = API_KEY.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const line3 = 'In Electron mode, authentication is handled automatically.'.padEnd(
|
||||||
|
BOX_CONTENT_WIDTH
|
||||||
|
);
|
||||||
|
const line4 = `Auto-login (AUTOMAKER_AUTO_LOGIN): ${autoLoginStatus}`.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const tipHeader = '💡 Tips'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const line5 = 'Set AUTOMAKER_API_KEY env var to use a fixed key'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
const line6 = 'Set AUTOMAKER_AUTO_LOGIN=true to skip the login prompt'.padEnd(BOX_CONTENT_WIDTH);
|
||||||
|
|
||||||
logger.info(`
|
logger.info(`
|
||||||
╔═══════════════════════════════════════════════════════════════════════╗
|
╔═════════════════════════════════════════════════════════════════════╗
|
||||||
║ 🔐 API Key for Web Mode Authentication ║
|
║ ${header}║
|
||||||
╠═══════════════════════════════════════════════════════════════════════╣
|
╠═════════════════════════════════════════════════════════════════════╣
|
||||||
║ ║
|
║ ║
|
||||||
║ When accessing via browser, you'll be prompted to enter this key: ║
|
║ ${line1}║
|
||||||
║ ║
|
║ ║
|
||||||
║ ${API_KEY}
|
║ ${line2}║
|
||||||
║ ║
|
║ ║
|
||||||
║ In Electron mode, authentication is handled automatically. ║
|
║ ${line3}║
|
||||||
╚═══════════════════════════════════════════════════════════════════════╝
|
║ ║
|
||||||
|
║ ${line4}║
|
||||||
|
║ ║
|
||||||
|
╠═════════════════════════════════════════════════════════════════════╣
|
||||||
|
║ ${tipHeader}║
|
||||||
|
╠═════════════════════════════════════════════════════════════════════╣
|
||||||
|
║ ${line5}║
|
||||||
|
║ ${line6}║
|
||||||
|
╚═════════════════════════════════════════════════════════════════════╝
|
||||||
`);
|
`);
|
||||||
} else {
|
} else {
|
||||||
logger.info('API key banner hidden (AUTOMAKER_HIDE_API_KEY=true)');
|
logger.info('API key banner hidden (AUTOMAKER_HIDE_API_KEY=true)');
|
||||||
@@ -318,6 +353,15 @@ function checkAuthentication(
|
|||||||
return { authenticated: false, errorType: 'invalid_api_key' };
|
return { authenticated: false, errorType: 'invalid_api_key' };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check for session token in query parameter (web mode - needed for image loads)
|
||||||
|
const queryToken = query.token;
|
||||||
|
if (queryToken) {
|
||||||
|
if (validateSession(queryToken)) {
|
||||||
|
return { authenticated: true };
|
||||||
|
}
|
||||||
|
return { authenticated: false, errorType: 'invalid_session' };
|
||||||
|
}
|
||||||
|
|
||||||
// Check for session cookie (web mode)
|
// Check for session cookie (web mode)
|
||||||
const sessionToken = cookies[SESSION_COOKIE_NAME];
|
const sessionToken = cookies[SESSION_COOKIE_NAME];
|
||||||
if (sessionToken && validateSession(sessionToken)) {
|
if (sessionToken && validateSession(sessionToken)) {
|
||||||
@@ -333,10 +377,17 @@ function checkAuthentication(
|
|||||||
* Accepts either:
|
* Accepts either:
|
||||||
* 1. X-API-Key header (for Electron mode)
|
* 1. X-API-Key header (for Electron mode)
|
||||||
* 2. X-Session-Token header (for web mode with explicit token)
|
* 2. X-Session-Token header (for web mode with explicit token)
|
||||||
* 3. apiKey query parameter (fallback for cases where headers can't be set)
|
* 3. apiKey query parameter (fallback for Electron, cases where headers can't be set)
|
||||||
* 4. Session cookie (for web mode)
|
* 4. token query parameter (fallback for web mode, needed for image loads via CSS/img tags)
|
||||||
|
* 5. Session cookie (for web mode)
|
||||||
*/
|
*/
|
||||||
export function authMiddleware(req: Request, res: Response, next: NextFunction): void {
|
export function authMiddleware(req: Request, res: Response, next: NextFunction): void {
|
||||||
|
// Allow disabling auth for local/trusted networks
|
||||||
|
if (isEnvTrue(process.env.AUTOMAKER_DISABLE_AUTH)) {
|
||||||
|
next();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
const result = checkAuthentication(
|
const result = checkAuthentication(
|
||||||
req.headers as Record<string, string | string[] | undefined>,
|
req.headers as Record<string, string | string[] | undefined>,
|
||||||
req.query as Record<string, string | undefined>,
|
req.query as Record<string, string | undefined>,
|
||||||
@@ -382,9 +433,10 @@ export function isAuthEnabled(): boolean {
|
|||||||
* Get authentication status for health endpoint
|
* Get authentication status for health endpoint
|
||||||
*/
|
*/
|
||||||
export function getAuthStatus(): { enabled: boolean; method: string } {
|
export function getAuthStatus(): { enabled: boolean; method: string } {
|
||||||
|
const disabled = isEnvTrue(process.env.AUTOMAKER_DISABLE_AUTH);
|
||||||
return {
|
return {
|
||||||
enabled: true,
|
enabled: !disabled,
|
||||||
method: 'api_key_or_session',
|
method: disabled ? 'disabled' : 'api_key_or_session',
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -392,6 +444,7 @@ export function getAuthStatus(): { enabled: boolean; method: string } {
|
|||||||
* Check if a request is authenticated (for status endpoint)
|
* Check if a request is authenticated (for status endpoint)
|
||||||
*/
|
*/
|
||||||
export function isRequestAuthenticated(req: Request): boolean {
|
export function isRequestAuthenticated(req: Request): boolean {
|
||||||
|
if (isEnvTrue(process.env.AUTOMAKER_DISABLE_AUTH)) return true;
|
||||||
const result = checkAuthentication(
|
const result = checkAuthentication(
|
||||||
req.headers as Record<string, string | string[] | undefined>,
|
req.headers as Record<string, string | string[] | undefined>,
|
||||||
req.query as Record<string, string | undefined>,
|
req.query as Record<string, string | undefined>,
|
||||||
@@ -409,5 +462,6 @@ export function checkRawAuthentication(
|
|||||||
query: Record<string, string | undefined>,
|
query: Record<string, string | undefined>,
|
||||||
cookies: Record<string, string | undefined>
|
cookies: Record<string, string | undefined>
|
||||||
): boolean {
|
): boolean {
|
||||||
|
if (isEnvTrue(process.env.AUTOMAKER_DISABLE_AUTH)) return true;
|
||||||
return checkAuthentication(headers, query, cookies).authenticated;
|
return checkAuthentication(headers, query, cookies).authenticated;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,9 +8,6 @@ import { spawn, execSync } from 'child_process';
|
|||||||
import * as fs from 'fs';
|
import * as fs from 'fs';
|
||||||
import * as path from 'path';
|
import * as path from 'path';
|
||||||
import * as os from 'os';
|
import * as os from 'os';
|
||||||
import { createLogger } from '@automaker/utils';
|
|
||||||
|
|
||||||
const logger = createLogger('CliDetection');
|
|
||||||
|
|
||||||
export interface CliInfo {
|
export interface CliInfo {
|
||||||
name: string;
|
name: string;
|
||||||
@@ -86,7 +83,7 @@ export async function detectCli(
|
|||||||
options: CliDetectionOptions = {}
|
options: CliDetectionOptions = {}
|
||||||
): Promise<CliDetectionResult> {
|
): Promise<CliDetectionResult> {
|
||||||
const config = CLI_CONFIGS[provider];
|
const config = CLI_CONFIGS[provider];
|
||||||
const { timeout = 5000, includeWsl = false, wslDistribution } = options;
|
const { timeout = 5000 } = options;
|
||||||
const issues: string[] = [];
|
const issues: string[] = [];
|
||||||
|
|
||||||
const cliInfo: CliInfo = {
|
const cliInfo: CliInfo = {
|
||||||
|
|||||||
@@ -5,9 +5,11 @@
|
|||||||
* Never assumes authenticated - only returns true if CLI confirms.
|
* Never assumes authenticated - only returns true if CLI confirms.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { spawnProcess, getCodexAuthPath } from '@automaker/platform';
|
import { spawnProcess } from '@automaker/platform';
|
||||||
import { findCodexCliPath } from '@automaker/platform';
|
import { findCodexCliPath } from '@automaker/platform';
|
||||||
import * as fs from 'fs';
|
import { createLogger } from '@automaker/utils';
|
||||||
|
|
||||||
|
const logger = createLogger('CodexAuth');
|
||||||
|
|
||||||
const CODEX_COMMAND = 'codex';
|
const CODEX_COMMAND = 'codex';
|
||||||
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
|
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
|
||||||
@@ -26,36 +28,16 @@ export interface CodexAuthCheckResult {
|
|||||||
export async function checkCodexAuthentication(
|
export async function checkCodexAuthentication(
|
||||||
cliPath?: string | null
|
cliPath?: string | null
|
||||||
): Promise<CodexAuthCheckResult> {
|
): Promise<CodexAuthCheckResult> {
|
||||||
console.log('[CodexAuth] checkCodexAuthentication called with cliPath:', cliPath);
|
|
||||||
|
|
||||||
const resolvedCliPath = cliPath || (await findCodexCliPath());
|
const resolvedCliPath = cliPath || (await findCodexCliPath());
|
||||||
const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
|
const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
|
||||||
|
|
||||||
console.log('[CodexAuth] resolvedCliPath:', resolvedCliPath);
|
|
||||||
console.log('[CodexAuth] hasApiKey:', hasApiKey);
|
|
||||||
|
|
||||||
// Debug: Check auth file
|
|
||||||
const authFilePath = getCodexAuthPath();
|
|
||||||
console.log('[CodexAuth] Auth file path:', authFilePath);
|
|
||||||
try {
|
|
||||||
const authFileExists = fs.existsSync(authFilePath);
|
|
||||||
console.log('[CodexAuth] Auth file exists:', authFileExists);
|
|
||||||
if (authFileExists) {
|
|
||||||
const authContent = fs.readFileSync(authFilePath, 'utf-8');
|
|
||||||
console.log('[CodexAuth] Auth file content:', authContent.substring(0, 500)); // First 500 chars
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
console.log('[CodexAuth] Error reading auth file:', error);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If CLI is not installed, cannot be authenticated
|
// If CLI is not installed, cannot be authenticated
|
||||||
if (!resolvedCliPath) {
|
if (!resolvedCliPath) {
|
||||||
console.log('[CodexAuth] No CLI path found, returning not authenticated');
|
logger.info('CLI not found');
|
||||||
return { authenticated: false, method: 'none' };
|
return { authenticated: false, method: 'none' };
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
console.log('[CodexAuth] Running: ' + resolvedCliPath + ' login status');
|
|
||||||
const result = await spawnProcess({
|
const result = await spawnProcess({
|
||||||
command: resolvedCliPath || CODEX_COMMAND,
|
command: resolvedCliPath || CODEX_COMMAND,
|
||||||
args: ['login', 'status'],
|
args: ['login', 'status'],
|
||||||
@@ -66,33 +48,21 @@ export async function checkCodexAuthentication(
|
|||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log('[CodexAuth] Command result:');
|
|
||||||
console.log('[CodexAuth] exitCode:', result.exitCode);
|
|
||||||
console.log('[CodexAuth] stdout:', JSON.stringify(result.stdout));
|
|
||||||
console.log('[CodexAuth] stderr:', JSON.stringify(result.stderr));
|
|
||||||
|
|
||||||
// Check both stdout and stderr for "logged in" - Codex CLI outputs to stderr
|
// Check both stdout and stderr for "logged in" - Codex CLI outputs to stderr
|
||||||
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
|
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
|
||||||
const isLoggedIn = combinedOutput.includes('logged in');
|
const isLoggedIn = combinedOutput.includes('logged in');
|
||||||
console.log('[CodexAuth] isLoggedIn (contains "logged in" in stdout or stderr):', isLoggedIn);
|
|
||||||
|
|
||||||
if (result.exitCode === 0 && isLoggedIn) {
|
if (result.exitCode === 0 && isLoggedIn) {
|
||||||
// Determine auth method based on what we know
|
// Determine auth method based on what we know
|
||||||
const method = hasApiKey ? 'api_key_env' : 'cli_authenticated';
|
const method = hasApiKey ? 'api_key_env' : 'cli_authenticated';
|
||||||
console.log('[CodexAuth] Authenticated! method:', method);
|
logger.info(`✓ Authenticated (${method})`);
|
||||||
return { authenticated: true, method };
|
return { authenticated: true, method };
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log(
|
logger.info('Not authenticated');
|
||||||
'[CodexAuth] Not authenticated. exitCode:',
|
|
||||||
result.exitCode,
|
|
||||||
'isLoggedIn:',
|
|
||||||
isLoggedIn
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
console.log('[CodexAuth] Error running command:', error);
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log('[CodexAuth] Returning not authenticated');
|
|
||||||
return { authenticated: false, method: 'none' };
|
return { authenticated: false, method: 'none' };
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Failed to check authentication:', error);
|
||||||
|
return { authenticated: false, method: 'none' };
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ export interface ErrorClassification {
|
|||||||
suggestedAction?: string;
|
suggestedAction?: string;
|
||||||
retryable: boolean;
|
retryable: boolean;
|
||||||
provider?: string;
|
provider?: string;
|
||||||
context?: Record<string, any>;
|
context?: Record<string, unknown>;
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface ErrorPattern {
|
export interface ErrorPattern {
|
||||||
@@ -180,7 +180,7 @@ const ERROR_PATTERNS: ErrorPattern[] = [
|
|||||||
export function classifyError(
|
export function classifyError(
|
||||||
error: unknown,
|
error: unknown,
|
||||||
provider?: string,
|
provider?: string,
|
||||||
context?: Record<string, any>
|
context?: Record<string, unknown>
|
||||||
): ErrorClassification {
|
): ErrorClassification {
|
||||||
const errorText = getErrorText(error);
|
const errorText = getErrorText(error);
|
||||||
|
|
||||||
@@ -281,18 +281,19 @@ function getErrorText(error: unknown): string {
|
|||||||
|
|
||||||
if (typeof error === 'object' && error !== null) {
|
if (typeof error === 'object' && error !== null) {
|
||||||
// Handle structured error objects
|
// Handle structured error objects
|
||||||
const errorObj = error as any;
|
const errorObj = error as Record<string, unknown>;
|
||||||
|
|
||||||
if (errorObj.message) {
|
if (typeof errorObj.message === 'string') {
|
||||||
return errorObj.message;
|
return errorObj.message;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (errorObj.error?.message) {
|
const nestedError = errorObj.error;
|
||||||
return errorObj.error.message;
|
if (typeof nestedError === 'object' && nestedError !== null && 'message' in nestedError) {
|
||||||
|
return String((nestedError as Record<string, unknown>).message);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (errorObj.error) {
|
if (nestedError) {
|
||||||
return typeof errorObj.error === 'string' ? errorObj.error : JSON.stringify(errorObj.error);
|
return typeof nestedError === 'string' ? nestedError : JSON.stringify(nestedError);
|
||||||
}
|
}
|
||||||
|
|
||||||
return JSON.stringify(error);
|
return JSON.stringify(error);
|
||||||
@@ -307,7 +308,7 @@ function getErrorText(error: unknown): string {
|
|||||||
export function createErrorResponse(
|
export function createErrorResponse(
|
||||||
error: unknown,
|
error: unknown,
|
||||||
provider?: string,
|
provider?: string,
|
||||||
context?: Record<string, any>
|
context?: Record<string, unknown>
|
||||||
): {
|
): {
|
||||||
success: false;
|
success: false;
|
||||||
error: string;
|
error: string;
|
||||||
@@ -335,7 +336,7 @@ export function logError(
|
|||||||
error: unknown,
|
error: unknown,
|
||||||
provider?: string,
|
provider?: string,
|
||||||
operation?: string,
|
operation?: string,
|
||||||
additionalContext?: Record<string, any>
|
additionalContext?: Record<string, unknown>
|
||||||
): void {
|
): void {
|
||||||
const classification = classifyError(error, provider, {
|
const classification = classifyError(error, provider, {
|
||||||
operation,
|
operation,
|
||||||
|
|||||||
62
apps/server/src/lib/git-log-parser.ts
Normal file
62
apps/server/src/lib/git-log-parser.ts
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
export interface CommitFields {
|
||||||
|
hash: string;
|
||||||
|
shortHash: string;
|
||||||
|
author: string;
|
||||||
|
authorEmail: string;
|
||||||
|
date: string;
|
||||||
|
subject: string;
|
||||||
|
body: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function parseGitLogOutput(output: string): CommitFields[] {
|
||||||
|
const commits: CommitFields[] = [];
|
||||||
|
|
||||||
|
// Split by NUL character to separate commits
|
||||||
|
const commitBlocks = output.split('\0').filter((block) => block.trim());
|
||||||
|
|
||||||
|
for (const block of commitBlocks) {
|
||||||
|
const allLines = block.split('\n');
|
||||||
|
|
||||||
|
// Skip leading empty lines that may appear at block boundaries
|
||||||
|
let startIndex = 0;
|
||||||
|
while (startIndex < allLines.length && allLines[startIndex].trim() === '') {
|
||||||
|
startIndex++;
|
||||||
|
}
|
||||||
|
const fields = allLines.slice(startIndex);
|
||||||
|
|
||||||
|
// Validate we have all expected fields (at least hash, shortHash, author, authorEmail, date, subject)
|
||||||
|
if (fields.length < 6) {
|
||||||
|
continue; // Skip malformed blocks
|
||||||
|
}
|
||||||
|
|
||||||
|
const commit: CommitFields = {
|
||||||
|
hash: fields[0].trim(),
|
||||||
|
shortHash: fields[1].trim(),
|
||||||
|
author: fields[2].trim(),
|
||||||
|
authorEmail: fields[3].trim(),
|
||||||
|
date: fields[4].trim(),
|
||||||
|
subject: fields[5].trim(),
|
||||||
|
body: fields.slice(6).join('\n').trim(),
|
||||||
|
};
|
||||||
|
|
||||||
|
commits.push(commit);
|
||||||
|
}
|
||||||
|
|
||||||
|
return commits;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a commit object from parsed fields, matching the expected API response format
|
||||||
|
*/
|
||||||
|
export function createCommitFromFields(fields: CommitFields, files?: string[]) {
|
||||||
|
return {
|
||||||
|
hash: fields.hash,
|
||||||
|
shortHash: fields.shortHash,
|
||||||
|
author: fields.author,
|
||||||
|
authorEmail: fields.authorEmail,
|
||||||
|
date: fields.date,
|
||||||
|
subject: fields.subject,
|
||||||
|
body: fields.body,
|
||||||
|
files: files || [],
|
||||||
|
};
|
||||||
|
}
|
||||||
208
apps/server/src/lib/git.ts
Normal file
208
apps/server/src/lib/git.ts
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
/**
|
||||||
|
* Shared git command execution utilities.
|
||||||
|
*
|
||||||
|
* This module provides the canonical `execGitCommand` helper and common
|
||||||
|
* git utilities used across services and routes. All consumers should
|
||||||
|
* import from here rather than defining their own copy.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import fs from 'fs/promises';
|
||||||
|
import path from 'path';
|
||||||
|
import { spawnProcess } from '@automaker/platform';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
|
||||||
|
const logger = createLogger('GitLib');
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Secure Command Execution
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute git command with array arguments to prevent command injection.
|
||||||
|
* Uses spawnProcess from @automaker/platform for secure, cross-platform execution.
|
||||||
|
*
|
||||||
|
* @param args - Array of git command arguments (e.g., ['worktree', 'add', path])
|
||||||
|
* @param cwd - Working directory to execute the command in
|
||||||
|
* @param env - Optional additional environment variables to pass to the git process.
|
||||||
|
* These are merged on top of the current process environment. Pass
|
||||||
|
* `{ LC_ALL: 'C' }` to force git to emit English output regardless of the
|
||||||
|
* system locale so that text-based output parsing remains reliable.
|
||||||
|
* @param abortController - Optional AbortController to cancel the git process.
|
||||||
|
* When the controller is aborted the underlying process is sent SIGTERM and
|
||||||
|
* the returned promise rejects with an Error whose message is 'Process aborted'.
|
||||||
|
* @returns Promise resolving to stdout output
|
||||||
|
* @throws Error with stderr/stdout message if command fails. The thrown error
|
||||||
|
* also has `stdout` and `stderr` string properties for structured access.
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* // Safe: no injection possible
|
||||||
|
* await execGitCommand(['branch', '-D', branchName], projectPath);
|
||||||
|
*
|
||||||
|
* // Force English output for reliable text parsing:
|
||||||
|
* await execGitCommand(['rebase', '--', 'main'], worktreePath, { LC_ALL: 'C' });
|
||||||
|
*
|
||||||
|
* // With a process-level timeout:
|
||||||
|
* const controller = new AbortController();
|
||||||
|
* const timerId = setTimeout(() => controller.abort(), 30_000);
|
||||||
|
* try {
|
||||||
|
* await execGitCommand(['fetch', '--all', '--quiet'], cwd, undefined, controller);
|
||||||
|
* } finally {
|
||||||
|
* clearTimeout(timerId);
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* // Instead of unsafe:
|
||||||
|
* // await execAsync(`git branch -D ${branchName}`, { cwd });
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
export async function execGitCommand(
|
||||||
|
args: string[],
|
||||||
|
cwd: string,
|
||||||
|
env?: Record<string, string>,
|
||||||
|
abortController?: AbortController
|
||||||
|
): Promise<string> {
|
||||||
|
const result = await spawnProcess({
|
||||||
|
command: 'git',
|
||||||
|
args,
|
||||||
|
cwd,
|
||||||
|
...(env !== undefined ? { env } : {}),
|
||||||
|
...(abortController !== undefined ? { abortController } : {}),
|
||||||
|
});
|
||||||
|
|
||||||
|
// spawnProcess returns { stdout, stderr, exitCode }
|
||||||
|
if (result.exitCode === 0) {
|
||||||
|
return result.stdout;
|
||||||
|
} else {
|
||||||
|
const errorMessage =
|
||||||
|
result.stderr || result.stdout || `Git command failed with code ${result.exitCode}`;
|
||||||
|
throw Object.assign(new Error(errorMessage), {
|
||||||
|
stdout: result.stdout,
|
||||||
|
stderr: result.stderr,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Common Git Utilities
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the current branch name for the given worktree.
|
||||||
|
*
|
||||||
|
* This is the canonical implementation shared across services. Services
|
||||||
|
* should import this rather than duplicating the logic locally.
|
||||||
|
*
|
||||||
|
* @param worktreePath - Path to the git worktree
|
||||||
|
* @returns The current branch name (trimmed)
|
||||||
|
*/
|
||||||
|
export async function getCurrentBranch(worktreePath: string): Promise<string> {
|
||||||
|
const branchOutput = await execGitCommand(['rev-parse', '--abbrev-ref', 'HEAD'], worktreePath);
|
||||||
|
return branchOutput.trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Index Lock Recovery
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check whether an error message indicates a stale git index lock file.
|
||||||
|
*
|
||||||
|
* Git operations that write to the index (e.g. `git stash push`) will fail
|
||||||
|
* with "could not write index" or "Unable to create ... .lock" when a
|
||||||
|
* `.git/index.lock` file exists from a previously interrupted operation.
|
||||||
|
*
|
||||||
|
* @param errorMessage - The error string from a failed git command
|
||||||
|
* @returns true if the error looks like a stale index lock issue
|
||||||
|
*/
|
||||||
|
export function isIndexLockError(errorMessage: string): boolean {
|
||||||
|
const lower = errorMessage.toLowerCase();
|
||||||
|
return (
|
||||||
|
lower.includes('could not write index') ||
|
||||||
|
(lower.includes('unable to create') && lower.includes('index.lock')) ||
|
||||||
|
lower.includes('index.lock')
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Attempt to remove a stale `.git/index.lock` file for the given worktree.
|
||||||
|
*
|
||||||
|
* Uses `git rev-parse --git-dir` to locate the correct `.git` directory,
|
||||||
|
* which works for both regular repositories and linked worktrees.
|
||||||
|
*
|
||||||
|
* @param worktreePath - Path to the git worktree (or main repo)
|
||||||
|
* @returns true if a lock file was found and removed, false otherwise
|
||||||
|
*/
|
||||||
|
export async function removeStaleIndexLock(worktreePath: string): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
// Resolve the .git directory (handles worktrees correctly)
|
||||||
|
const gitDirRaw = await execGitCommand(['rev-parse', '--git-dir'], worktreePath);
|
||||||
|
const gitDir = path.resolve(worktreePath, gitDirRaw.trim());
|
||||||
|
const lockFilePath = path.join(gitDir, 'index.lock');
|
||||||
|
|
||||||
|
// Check if the lock file exists
|
||||||
|
try {
|
||||||
|
await fs.access(lockFilePath);
|
||||||
|
} catch {
|
||||||
|
// Lock file does not exist — nothing to remove
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the stale lock file
|
||||||
|
await fs.unlink(lockFilePath);
|
||||||
|
logger.info('Removed stale index.lock file', { worktreePath, lockFilePath });
|
||||||
|
return true;
|
||||||
|
} catch (err) {
|
||||||
|
logger.warn('Failed to remove stale index.lock file', {
|
||||||
|
worktreePath,
|
||||||
|
error: err instanceof Error ? err.message : String(err),
|
||||||
|
});
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute a git command with automatic retry when a stale index.lock is detected.
|
||||||
|
*
|
||||||
|
* If the command fails with an error indicating a locked index file, this
|
||||||
|
* helper will attempt to remove the stale `.git/index.lock` and retry the
|
||||||
|
* command exactly once.
|
||||||
|
*
|
||||||
|
* This is particularly useful for `git stash push` which writes to the
|
||||||
|
* index and commonly fails when a previous git operation was interrupted.
|
||||||
|
*
|
||||||
|
* @param args - Array of git command arguments
|
||||||
|
* @param cwd - Working directory to execute the command in
|
||||||
|
* @param env - Optional additional environment variables
|
||||||
|
* @returns Promise resolving to stdout output
|
||||||
|
* @throws The original error if retry also fails, or a non-lock error
|
||||||
|
*/
|
||||||
|
export async function execGitCommandWithLockRetry(
|
||||||
|
args: string[],
|
||||||
|
cwd: string,
|
||||||
|
env?: Record<string, string>
|
||||||
|
): Promise<string> {
|
||||||
|
try {
|
||||||
|
return await execGitCommand(args, cwd, env);
|
||||||
|
} catch (error: unknown) {
|
||||||
|
const err = error as { message?: string; stderr?: string };
|
||||||
|
const errorMessage = err.stderr || err.message || '';
|
||||||
|
|
||||||
|
if (!isIndexLockError(errorMessage)) {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info('Git command failed due to index lock, attempting cleanup and retry', {
|
||||||
|
cwd,
|
||||||
|
args: args.join(' '),
|
||||||
|
});
|
||||||
|
|
||||||
|
const removed = await removeStaleIndexLock(cwd);
|
||||||
|
if (!removed) {
|
||||||
|
// Could not remove the lock file — re-throw the original error
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retry the command once after removing the lock file
|
||||||
|
return await execGitCommand(args, cwd, env);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -12,11 +12,18 @@ export interface PermissionCheckResult {
|
|||||||
reason?: string;
|
reason?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Minimal shape of a Cursor tool call used for permission checking */
|
||||||
|
interface CursorToolCall {
|
||||||
|
shellToolCall?: { args?: { command: string } };
|
||||||
|
readToolCall?: { args?: { path: string } };
|
||||||
|
writeToolCall?: { args?: { path: string } };
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if a tool call is allowed based on permissions
|
* Check if a tool call is allowed based on permissions
|
||||||
*/
|
*/
|
||||||
export function checkToolCallPermission(
|
export function checkToolCallPermission(
|
||||||
toolCall: any,
|
toolCall: CursorToolCall,
|
||||||
permissions: CursorCliConfigFile | null
|
permissions: CursorCliConfigFile | null
|
||||||
): PermissionCheckResult {
|
): PermissionCheckResult {
|
||||||
if (!permissions || !permissions.permissions) {
|
if (!permissions || !permissions.permissions) {
|
||||||
@@ -152,7 +159,11 @@ function matchesRule(toolName: string, rule: string): boolean {
|
|||||||
/**
|
/**
|
||||||
* Log permission violations
|
* Log permission violations
|
||||||
*/
|
*/
|
||||||
export function logPermissionViolation(toolCall: any, reason: string, sessionId?: string): void {
|
export function logPermissionViolation(
|
||||||
|
toolCall: CursorToolCall,
|
||||||
|
reason: string,
|
||||||
|
sessionId?: string
|
||||||
|
): void {
|
||||||
const sessionIdStr = sessionId ? ` [${sessionId}]` : '';
|
const sessionIdStr = sessionId ? ` [${sessionId}]` : '';
|
||||||
|
|
||||||
if (toolCall.shellToolCall?.args?.command) {
|
if (toolCall.shellToolCall?.args?.command) {
|
||||||
|
|||||||
@@ -129,10 +129,30 @@ export const TOOL_PRESETS = {
|
|||||||
specGeneration: ['Read', 'Glob', 'Grep'] as const,
|
specGeneration: ['Read', 'Glob', 'Grep'] as const,
|
||||||
|
|
||||||
/** Full tool access for feature implementation */
|
/** Full tool access for feature implementation */
|
||||||
fullAccess: ['Read', 'Write', 'Edit', 'Glob', 'Grep', 'Bash', 'WebSearch', 'WebFetch'] as const,
|
fullAccess: [
|
||||||
|
'Read',
|
||||||
|
'Write',
|
||||||
|
'Edit',
|
||||||
|
'Glob',
|
||||||
|
'Grep',
|
||||||
|
'Bash',
|
||||||
|
'WebSearch',
|
||||||
|
'WebFetch',
|
||||||
|
'TodoWrite',
|
||||||
|
] as const,
|
||||||
|
|
||||||
/** Tools for chat/interactive mode */
|
/** Tools for chat/interactive mode */
|
||||||
chat: ['Read', 'Write', 'Edit', 'Glob', 'Grep', 'Bash', 'WebSearch', 'WebFetch'] as const,
|
chat: [
|
||||||
|
'Read',
|
||||||
|
'Write',
|
||||||
|
'Edit',
|
||||||
|
'Glob',
|
||||||
|
'Grep',
|
||||||
|
'Bash',
|
||||||
|
'WebSearch',
|
||||||
|
'WebFetch',
|
||||||
|
'TodoWrite',
|
||||||
|
] as const,
|
||||||
} as const;
|
} as const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -233,11 +253,27 @@ function buildMcpOptions(config: CreateSdkOptionsConfig): McpOptions {
|
|||||||
/**
|
/**
|
||||||
* Build thinking options for SDK configuration.
|
* Build thinking options for SDK configuration.
|
||||||
* Converts ThinkingLevel to maxThinkingTokens for the Claude SDK.
|
* Converts ThinkingLevel to maxThinkingTokens for the Claude SDK.
|
||||||
|
* For adaptive thinking (Opus 4.6), omits maxThinkingTokens to let the model
|
||||||
|
* decide its own reasoning depth.
|
||||||
*
|
*
|
||||||
* @param thinkingLevel - The thinking level to convert
|
* @param thinkingLevel - The thinking level to convert
|
||||||
* @returns Object with maxThinkingTokens if thinking is enabled
|
* @returns Object with maxThinkingTokens if thinking is enabled with a budget
|
||||||
*/
|
*/
|
||||||
function buildThinkingOptions(thinkingLevel?: ThinkingLevel): Partial<Options> {
|
function buildThinkingOptions(thinkingLevel?: ThinkingLevel): Partial<Options> {
|
||||||
|
if (!thinkingLevel || thinkingLevel === 'none') {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adaptive thinking (Opus 4.6): don't set maxThinkingTokens
|
||||||
|
// The model will use adaptive thinking by default
|
||||||
|
if (thinkingLevel === 'adaptive') {
|
||||||
|
logger.debug(
|
||||||
|
`buildThinkingOptions: thinkingLevel="adaptive" -> no maxThinkingTokens (model decides)`
|
||||||
|
);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Manual budget-based thinking for Haiku/Sonnet
|
||||||
const maxThinkingTokens = getThinkingTokenBudget(thinkingLevel);
|
const maxThinkingTokens = getThinkingTokenBudget(thinkingLevel);
|
||||||
logger.debug(
|
logger.debug(
|
||||||
`buildThinkingOptions: thinkingLevel="${thinkingLevel}" -> maxThinkingTokens=${maxThinkingTokens}`
|
`buildThinkingOptions: thinkingLevel="${thinkingLevel}" -> maxThinkingTokens=${maxThinkingTokens}`
|
||||||
|
|||||||
@@ -5,12 +5,30 @@
|
|||||||
import type { SettingsService } from '../services/settings-service.js';
|
import type { SettingsService } from '../services/settings-service.js';
|
||||||
import type { ContextFilesResult, ContextFileInfo } from '@automaker/utils';
|
import type { ContextFilesResult, ContextFileInfo } from '@automaker/utils';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import type { MCPServerConfig, McpServerConfig, PromptCustomization } from '@automaker/types';
|
import type {
|
||||||
|
MCPServerConfig,
|
||||||
|
McpServerConfig,
|
||||||
|
PromptCustomization,
|
||||||
|
ClaudeApiProfile,
|
||||||
|
ClaudeCompatibleProvider,
|
||||||
|
PhaseModelKey,
|
||||||
|
PhaseModelEntry,
|
||||||
|
Credentials,
|
||||||
|
} from '@automaker/types';
|
||||||
|
import { DEFAULT_PHASE_MODELS } from '@automaker/types';
|
||||||
import {
|
import {
|
||||||
mergeAutoModePrompts,
|
mergeAutoModePrompts,
|
||||||
mergeAgentPrompts,
|
mergeAgentPrompts,
|
||||||
mergeBacklogPlanPrompts,
|
mergeBacklogPlanPrompts,
|
||||||
mergeEnhancementPrompts,
|
mergeEnhancementPrompts,
|
||||||
|
mergeCommitMessagePrompts,
|
||||||
|
mergeTitleGenerationPrompts,
|
||||||
|
mergeIssueValidationPrompts,
|
||||||
|
mergeIdeationPrompts,
|
||||||
|
mergeAppSpecPrompts,
|
||||||
|
mergeContextDescriptionPrompts,
|
||||||
|
mergeSuggestionsPrompts,
|
||||||
|
mergeTaskExecutionPrompts,
|
||||||
} from '@automaker/prompts';
|
} from '@automaker/prompts';
|
||||||
|
|
||||||
const logger = createLogger('SettingsHelper');
|
const logger = createLogger('SettingsHelper');
|
||||||
@@ -218,6 +236,14 @@ export async function getPromptCustomization(
|
|||||||
agent: ReturnType<typeof mergeAgentPrompts>;
|
agent: ReturnType<typeof mergeAgentPrompts>;
|
||||||
backlogPlan: ReturnType<typeof mergeBacklogPlanPrompts>;
|
backlogPlan: ReturnType<typeof mergeBacklogPlanPrompts>;
|
||||||
enhancement: ReturnType<typeof mergeEnhancementPrompts>;
|
enhancement: ReturnType<typeof mergeEnhancementPrompts>;
|
||||||
|
commitMessage: ReturnType<typeof mergeCommitMessagePrompts>;
|
||||||
|
titleGeneration: ReturnType<typeof mergeTitleGenerationPrompts>;
|
||||||
|
issueValidation: ReturnType<typeof mergeIssueValidationPrompts>;
|
||||||
|
ideation: ReturnType<typeof mergeIdeationPrompts>;
|
||||||
|
appSpec: ReturnType<typeof mergeAppSpecPrompts>;
|
||||||
|
contextDescription: ReturnType<typeof mergeContextDescriptionPrompts>;
|
||||||
|
suggestions: ReturnType<typeof mergeSuggestionsPrompts>;
|
||||||
|
taskExecution: ReturnType<typeof mergeTaskExecutionPrompts>;
|
||||||
}> {
|
}> {
|
||||||
let customization: PromptCustomization = {};
|
let customization: PromptCustomization = {};
|
||||||
|
|
||||||
@@ -239,6 +265,14 @@ export async function getPromptCustomization(
|
|||||||
agent: mergeAgentPrompts(customization.agent),
|
agent: mergeAgentPrompts(customization.agent),
|
||||||
backlogPlan: mergeBacklogPlanPrompts(customization.backlogPlan),
|
backlogPlan: mergeBacklogPlanPrompts(customization.backlogPlan),
|
||||||
enhancement: mergeEnhancementPrompts(customization.enhancement),
|
enhancement: mergeEnhancementPrompts(customization.enhancement),
|
||||||
|
commitMessage: mergeCommitMessagePrompts(customization.commitMessage),
|
||||||
|
titleGeneration: mergeTitleGenerationPrompts(customization.titleGeneration),
|
||||||
|
issueValidation: mergeIssueValidationPrompts(customization.issueValidation),
|
||||||
|
ideation: mergeIdeationPrompts(customization.ideation),
|
||||||
|
appSpec: mergeAppSpecPrompts(customization.appSpec),
|
||||||
|
contextDescription: mergeContextDescriptionPrompts(customization.contextDescription),
|
||||||
|
suggestions: mergeSuggestionsPrompts(customization.suggestions),
|
||||||
|
taskExecution: mergeTaskExecutionPrompts(customization.taskExecution),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -321,3 +355,376 @@ export async function getCustomSubagents(
|
|||||||
|
|
||||||
return Object.keys(merged).length > 0 ? merged : undefined;
|
return Object.keys(merged).length > 0 ? merged : undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Result from getActiveClaudeApiProfile */
|
||||||
|
export interface ActiveClaudeApiProfileResult {
|
||||||
|
/** The active profile, or undefined if using direct Anthropic API */
|
||||||
|
profile: ClaudeApiProfile | undefined;
|
||||||
|
/** Credentials for resolving 'credentials' apiKeySource */
|
||||||
|
credentials: import('@automaker/types').Credentials | undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the active Claude API profile and credentials from settings.
|
||||||
|
* Checks project settings first for per-project overrides, then falls back to global settings.
|
||||||
|
* Returns both the profile and credentials for resolving 'credentials' apiKeySource.
|
||||||
|
*
|
||||||
|
* @deprecated Use getProviderById and getPhaseModelWithOverrides instead for the new provider system.
|
||||||
|
* This function is kept for backward compatibility during migration.
|
||||||
|
*
|
||||||
|
* @param settingsService - Optional settings service instance
|
||||||
|
* @param logPrefix - Prefix for log messages (e.g., '[AgentService]')
|
||||||
|
* @param projectPath - Optional project path for per-project override
|
||||||
|
* @returns Promise resolving to object with profile and credentials
|
||||||
|
*/
|
||||||
|
export async function getActiveClaudeApiProfile(
|
||||||
|
settingsService?: SettingsService | null,
|
||||||
|
logPrefix = '[SettingsHelper]',
|
||||||
|
projectPath?: string
|
||||||
|
): Promise<ActiveClaudeApiProfileResult> {
|
||||||
|
if (!settingsService) {
|
||||||
|
return { profile: undefined, credentials: undefined };
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const globalSettings = await settingsService.getGlobalSettings();
|
||||||
|
const credentials = await settingsService.getCredentials();
|
||||||
|
const profiles = globalSettings.claudeApiProfiles || [];
|
||||||
|
|
||||||
|
// Check for project-level override first
|
||||||
|
let activeProfileId: string | null | undefined;
|
||||||
|
let isProjectOverride = false;
|
||||||
|
|
||||||
|
if (projectPath) {
|
||||||
|
const projectSettings = await settingsService.getProjectSettings(projectPath);
|
||||||
|
// undefined = use global, null = explicit no profile, string = specific profile
|
||||||
|
if (projectSettings.activeClaudeApiProfileId !== undefined) {
|
||||||
|
activeProfileId = projectSettings.activeClaudeApiProfileId;
|
||||||
|
isProjectOverride = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall back to global if project doesn't specify
|
||||||
|
if (activeProfileId === undefined && !isProjectOverride) {
|
||||||
|
activeProfileId = globalSettings.activeClaudeApiProfileId;
|
||||||
|
}
|
||||||
|
|
||||||
|
// No active profile selected - use direct Anthropic API
|
||||||
|
if (!activeProfileId) {
|
||||||
|
if (isProjectOverride && activeProfileId === null) {
|
||||||
|
logger.info(`${logPrefix} Project explicitly using Direct Anthropic API`);
|
||||||
|
}
|
||||||
|
return { profile: undefined, credentials };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the active profile by ID
|
||||||
|
const activeProfile = profiles.find((p) => p.id === activeProfileId);
|
||||||
|
|
||||||
|
if (activeProfile) {
|
||||||
|
const overrideSuffix = isProjectOverride ? ' (project override)' : '';
|
||||||
|
logger.info(`${logPrefix} Using Claude API profile: ${activeProfile.name}${overrideSuffix}`);
|
||||||
|
return { profile: activeProfile, credentials };
|
||||||
|
} else {
|
||||||
|
logger.warn(
|
||||||
|
`${logPrefix} Active profile ID "${activeProfileId}" not found, falling back to direct Anthropic API`
|
||||||
|
);
|
||||||
|
return { profile: undefined, credentials };
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
logger.error(`${logPrefix} Failed to load Claude API profile:`, error);
|
||||||
|
return { profile: undefined, credentials: undefined };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// New Provider System Helpers
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
/** Result from getProviderById */
|
||||||
|
export interface ProviderByIdResult {
|
||||||
|
/** The provider, or undefined if not found */
|
||||||
|
provider: ClaudeCompatibleProvider | undefined;
|
||||||
|
/** Credentials for resolving 'credentials' apiKeySource */
|
||||||
|
credentials: Credentials | undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a ClaudeCompatibleProvider by its ID.
|
||||||
|
* Returns the provider configuration and credentials for API key resolution.
|
||||||
|
*
|
||||||
|
* @param providerId - The provider ID to look up
|
||||||
|
* @param settingsService - Settings service instance
|
||||||
|
* @param logPrefix - Prefix for log messages
|
||||||
|
* @returns Promise resolving to object with provider and credentials
|
||||||
|
*/
|
||||||
|
export async function getProviderById(
|
||||||
|
providerId: string,
|
||||||
|
settingsService: SettingsService,
|
||||||
|
logPrefix = '[SettingsHelper]'
|
||||||
|
): Promise<ProviderByIdResult> {
|
||||||
|
try {
|
||||||
|
const globalSettings = await settingsService.getGlobalSettings();
|
||||||
|
const credentials = await settingsService.getCredentials();
|
||||||
|
const providers = globalSettings.claudeCompatibleProviders || [];
|
||||||
|
|
||||||
|
const provider = providers.find((p) => p.id === providerId);
|
||||||
|
|
||||||
|
if (provider) {
|
||||||
|
if (provider.enabled === false) {
|
||||||
|
logger.warn(`${logPrefix} Provider "${provider.name}" (${providerId}) is disabled`);
|
||||||
|
} else {
|
||||||
|
logger.debug(`${logPrefix} Found provider: ${provider.name}`);
|
||||||
|
}
|
||||||
|
return { provider, credentials };
|
||||||
|
} else {
|
||||||
|
logger.warn(`${logPrefix} Provider not found: ${providerId}`);
|
||||||
|
return { provider: undefined, credentials };
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
logger.error(`${logPrefix} Failed to load provider by ID:`, error);
|
||||||
|
return { provider: undefined, credentials: undefined };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Result from getPhaseModelWithOverrides */
|
||||||
|
export interface PhaseModelWithOverridesResult {
|
||||||
|
/** The resolved phase model entry */
|
||||||
|
phaseModel: PhaseModelEntry;
|
||||||
|
/** Whether a project override was applied */
|
||||||
|
isProjectOverride: boolean;
|
||||||
|
/** The provider if providerId is set and found */
|
||||||
|
provider: ClaudeCompatibleProvider | undefined;
|
||||||
|
/** Credentials for API key resolution */
|
||||||
|
credentials: Credentials | undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the phase model configuration for a specific phase, applying project overrides if available.
|
||||||
|
* Also resolves the provider if the phase model has a providerId.
|
||||||
|
*
|
||||||
|
* @param phase - The phase key (e.g., 'enhancementModel', 'specGenerationModel')
|
||||||
|
* @param settingsService - Optional settings service instance (returns defaults if undefined)
|
||||||
|
* @param projectPath - Optional project path for checking overrides
|
||||||
|
* @param logPrefix - Prefix for log messages
|
||||||
|
* @returns Promise resolving to phase model with provider info
|
||||||
|
*/
|
||||||
|
export async function getPhaseModelWithOverrides(
|
||||||
|
phase: PhaseModelKey,
|
||||||
|
settingsService?: SettingsService | null,
|
||||||
|
projectPath?: string,
|
||||||
|
logPrefix = '[SettingsHelper]'
|
||||||
|
): Promise<PhaseModelWithOverridesResult> {
|
||||||
|
// Handle undefined settingsService gracefully
|
||||||
|
if (!settingsService) {
|
||||||
|
logger.info(`${logPrefix} SettingsService not available, using default for ${phase}`);
|
||||||
|
return {
|
||||||
|
phaseModel: DEFAULT_PHASE_MODELS[phase] || { model: 'sonnet' },
|
||||||
|
isProjectOverride: false,
|
||||||
|
provider: undefined,
|
||||||
|
credentials: undefined,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const globalSettings = await settingsService.getGlobalSettings();
|
||||||
|
const credentials = await settingsService.getCredentials();
|
||||||
|
const globalPhaseModels = globalSettings.phaseModels || {};
|
||||||
|
|
||||||
|
// Start with global phase model
|
||||||
|
let phaseModel = globalPhaseModels[phase];
|
||||||
|
let isProjectOverride = false;
|
||||||
|
|
||||||
|
// Check for project override
|
||||||
|
if (projectPath) {
|
||||||
|
const projectSettings = await settingsService.getProjectSettings(projectPath);
|
||||||
|
const projectOverrides = projectSettings.phaseModelOverrides || {};
|
||||||
|
|
||||||
|
if (projectOverrides[phase]) {
|
||||||
|
phaseModel = projectOverrides[phase];
|
||||||
|
isProjectOverride = true;
|
||||||
|
logger.debug(`${logPrefix} Using project override for ${phase}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no phase model found, use per-phase default
|
||||||
|
if (!phaseModel) {
|
||||||
|
phaseModel = DEFAULT_PHASE_MODELS[phase] || { model: 'sonnet' };
|
||||||
|
logger.debug(`${logPrefix} No ${phase} configured, using default: ${phaseModel.model}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve provider if providerId is set
|
||||||
|
let provider: ClaudeCompatibleProvider | undefined;
|
||||||
|
if (phaseModel.providerId) {
|
||||||
|
const providers = globalSettings.claudeCompatibleProviders || [];
|
||||||
|
provider = providers.find((p) => p.id === phaseModel.providerId);
|
||||||
|
|
||||||
|
if (provider) {
|
||||||
|
if (provider.enabled === false) {
|
||||||
|
logger.warn(
|
||||||
|
`${logPrefix} Provider "${provider.name}" for ${phase} is disabled, falling back to direct API`
|
||||||
|
);
|
||||||
|
provider = undefined;
|
||||||
|
} else {
|
||||||
|
logger.debug(`${logPrefix} Using provider "${provider.name}" for ${phase}`);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logger.warn(
|
||||||
|
`${logPrefix} Provider ${phaseModel.providerId} not found for ${phase}, falling back to direct API`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
phaseModel,
|
||||||
|
isProjectOverride,
|
||||||
|
provider,
|
||||||
|
credentials,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
logger.error(`${logPrefix} Failed to get phase model with overrides:`, error);
|
||||||
|
// Return a safe default
|
||||||
|
return {
|
||||||
|
phaseModel: { model: 'sonnet' },
|
||||||
|
isProjectOverride: false,
|
||||||
|
provider: undefined,
|
||||||
|
credentials: undefined,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Result from getProviderByModelId */
|
||||||
|
export interface ProviderByModelIdResult {
|
||||||
|
/** The provider that contains this model, or undefined if not found */
|
||||||
|
provider: ClaudeCompatibleProvider | undefined;
|
||||||
|
/** The model configuration if found */
|
||||||
|
modelConfig: import('@automaker/types').ProviderModel | undefined;
|
||||||
|
/** Credentials for API key resolution */
|
||||||
|
credentials: Credentials | undefined;
|
||||||
|
/** The resolved Claude model ID to use for API calls (from mapsToClaudeModel) */
|
||||||
|
resolvedModel: string | undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find a ClaudeCompatibleProvider by one of its model IDs.
|
||||||
|
* Searches through all enabled providers to find one that contains the specified model.
|
||||||
|
* This is useful when you have a model string from the UI but need the provider config.
|
||||||
|
*
|
||||||
|
* Also resolves the `mapsToClaudeModel` field to get the actual Claude model ID to use
|
||||||
|
* when calling the API (e.g., "GLM-4.5-Air" -> "claude-haiku-4-5").
|
||||||
|
*
|
||||||
|
* @param modelId - The model ID to search for (e.g., "GLM-4.7", "MiniMax-M2.1")
|
||||||
|
* @param settingsService - Settings service instance
|
||||||
|
* @param logPrefix - Prefix for log messages
|
||||||
|
* @returns Promise resolving to object with provider, model config, credentials, and resolved model
|
||||||
|
*/
|
||||||
|
export async function getProviderByModelId(
|
||||||
|
modelId: string,
|
||||||
|
settingsService: SettingsService,
|
||||||
|
logPrefix = '[SettingsHelper]'
|
||||||
|
): Promise<ProviderByModelIdResult> {
|
||||||
|
try {
|
||||||
|
const globalSettings = await settingsService.getGlobalSettings();
|
||||||
|
const credentials = await settingsService.getCredentials();
|
||||||
|
const providers = globalSettings.claudeCompatibleProviders || [];
|
||||||
|
|
||||||
|
// Search through all enabled providers for this model
|
||||||
|
for (const provider of providers) {
|
||||||
|
// Skip disabled providers
|
||||||
|
if (provider.enabled === false) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this provider has the model
|
||||||
|
const modelConfig = provider.models?.find(
|
||||||
|
(m) => m.id === modelId || m.id.toLowerCase() === modelId.toLowerCase()
|
||||||
|
);
|
||||||
|
|
||||||
|
if (modelConfig) {
|
||||||
|
logger.info(`${logPrefix} Found model "${modelId}" in provider "${provider.name}"`);
|
||||||
|
|
||||||
|
// Resolve the mapped Claude model if specified
|
||||||
|
let resolvedModel: string | undefined;
|
||||||
|
if (modelConfig.mapsToClaudeModel) {
|
||||||
|
// Import resolveModelString to convert alias to full model ID
|
||||||
|
const { resolveModelString } = await import('@automaker/model-resolver');
|
||||||
|
resolvedModel = resolveModelString(modelConfig.mapsToClaudeModel);
|
||||||
|
logger.info(
|
||||||
|
`${logPrefix} Model "${modelId}" maps to Claude model "${modelConfig.mapsToClaudeModel}" -> "${resolvedModel}"`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return { provider, modelConfig, credentials, resolvedModel };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Model not found in any provider
|
||||||
|
logger.debug(`${logPrefix} Model "${modelId}" not found in any provider`);
|
||||||
|
return {
|
||||||
|
provider: undefined,
|
||||||
|
modelConfig: undefined,
|
||||||
|
credentials: undefined,
|
||||||
|
resolvedModel: undefined,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
logger.error(`${logPrefix} Failed to find provider by model ID:`, error);
|
||||||
|
return {
|
||||||
|
provider: undefined,
|
||||||
|
modelConfig: undefined,
|
||||||
|
credentials: undefined,
|
||||||
|
resolvedModel: undefined,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all enabled provider models for use in model dropdowns.
|
||||||
|
* Returns models from all enabled ClaudeCompatibleProviders.
|
||||||
|
*
|
||||||
|
* @param settingsService - Settings service instance
|
||||||
|
* @param logPrefix - Prefix for log messages
|
||||||
|
* @returns Promise resolving to array of provider models with their provider info
|
||||||
|
*/
|
||||||
|
export async function getAllProviderModels(
|
||||||
|
settingsService: SettingsService,
|
||||||
|
logPrefix = '[SettingsHelper]'
|
||||||
|
): Promise<
|
||||||
|
Array<{
|
||||||
|
providerId: string;
|
||||||
|
providerName: string;
|
||||||
|
model: import('@automaker/types').ProviderModel;
|
||||||
|
}>
|
||||||
|
> {
|
||||||
|
try {
|
||||||
|
const globalSettings = await settingsService.getGlobalSettings();
|
||||||
|
const providers = globalSettings.claudeCompatibleProviders || [];
|
||||||
|
|
||||||
|
const allModels: Array<{
|
||||||
|
providerId: string;
|
||||||
|
providerName: string;
|
||||||
|
model: import('@automaker/types').ProviderModel;
|
||||||
|
}> = [];
|
||||||
|
|
||||||
|
for (const provider of providers) {
|
||||||
|
// Skip disabled providers
|
||||||
|
if (provider.enabled === false) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const model of provider.models || []) {
|
||||||
|
allModels.push({
|
||||||
|
providerId: provider.id,
|
||||||
|
providerName: provider.name,
|
||||||
|
model,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
`${logPrefix} Found ${allModels.length} models from ${providers.length} providers`
|
||||||
|
);
|
||||||
|
return allModels;
|
||||||
|
} catch (error) {
|
||||||
|
logger.error(`${logPrefix} Failed to get all provider models:`, error);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
25
apps/server/src/lib/terminal-themes-data.ts
Normal file
25
apps/server/src/lib/terminal-themes-data.ts
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
/**
|
||||||
|
* Terminal Theme Data - Re-export terminal themes from platform package
|
||||||
|
*
|
||||||
|
* This module re-exports terminal theme data for use in the server.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { terminalThemeColors, getTerminalThemeColors as getThemeColors } from '@automaker/platform';
|
||||||
|
import type { ThemeMode } from '@automaker/types';
|
||||||
|
import type { TerminalTheme } from '@automaker/platform';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get terminal theme colors for a given theme mode
|
||||||
|
*/
|
||||||
|
export function getTerminalThemeColors(theme: ThemeMode): TerminalTheme {
|
||||||
|
return getThemeColors(theme);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all terminal themes
|
||||||
|
*/
|
||||||
|
export function getAllTerminalThemes(): Record<ThemeMode, TerminalTheme> {
|
||||||
|
return terminalThemeColors;
|
||||||
|
}
|
||||||
|
|
||||||
|
export default terminalThemeColors;
|
||||||
@@ -5,22 +5,24 @@
|
|||||||
|
|
||||||
import * as secureFs from './secure-fs.js';
|
import * as secureFs from './secure-fs.js';
|
||||||
import * as path from 'path';
|
import * as path from 'path';
|
||||||
|
import type { PRState, WorktreePRInfo } from '@automaker/types';
|
||||||
|
|
||||||
|
// Re-export types for backwards compatibility
|
||||||
|
export type { PRState, WorktreePRInfo };
|
||||||
|
|
||||||
/** Maximum length for sanitized branch names in filesystem paths */
|
/** Maximum length for sanitized branch names in filesystem paths */
|
||||||
const MAX_SANITIZED_BRANCH_PATH_LENGTH = 200;
|
const MAX_SANITIZED_BRANCH_PATH_LENGTH = 200;
|
||||||
|
|
||||||
export interface WorktreePRInfo {
|
|
||||||
number: number;
|
|
||||||
url: string;
|
|
||||||
title: string;
|
|
||||||
state: string;
|
|
||||||
createdAt: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface WorktreeMetadata {
|
export interface WorktreeMetadata {
|
||||||
branch: string;
|
branch: string;
|
||||||
createdAt: string;
|
createdAt: string;
|
||||||
pr?: WorktreePRInfo;
|
pr?: WorktreePRInfo;
|
||||||
|
/** Whether the init script has been executed for this worktree */
|
||||||
|
initScriptRan?: boolean;
|
||||||
|
/** Status of the init script execution */
|
||||||
|
initScriptStatus?: 'running' | 'success' | 'failed';
|
||||||
|
/** Error message if init script failed */
|
||||||
|
initScriptError?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -76,7 +78,7 @@ export async function readWorktreeMetadata(
|
|||||||
const metadataPath = getWorktreeMetadataPath(projectPath, branch);
|
const metadataPath = getWorktreeMetadataPath(projectPath, branch);
|
||||||
const content = (await secureFs.readFile(metadataPath, 'utf-8')) as string;
|
const content = (await secureFs.readFile(metadataPath, 'utf-8')) as string;
|
||||||
return JSON.parse(content) as WorktreeMetadata;
|
return JSON.parse(content) as WorktreeMetadata;
|
||||||
} catch (error) {
|
} catch (_error) {
|
||||||
// File doesn't exist or can't be read
|
// File doesn't exist or can't be read
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|||||||
611
apps/server/src/lib/xml-extractor.ts
Normal file
611
apps/server/src/lib/xml-extractor.ts
Normal file
@@ -0,0 +1,611 @@
|
|||||||
|
/**
|
||||||
|
* XML Extraction Utilities
|
||||||
|
*
|
||||||
|
* Robust XML parsing utilities for extracting and updating sections
|
||||||
|
* from app_spec.txt XML content. Uses regex-based parsing which is
|
||||||
|
* sufficient for our controlled XML structure.
|
||||||
|
*
|
||||||
|
* Note: If more complex XML parsing is needed in the future, consider
|
||||||
|
* using a library like 'fast-xml-parser' or 'xml2js'.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import type { SpecOutput } from '@automaker/types';
|
||||||
|
|
||||||
|
const logger = createLogger('XmlExtractor');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represents an implemented feature extracted from XML
|
||||||
|
*/
|
||||||
|
export interface ImplementedFeature {
|
||||||
|
name: string;
|
||||||
|
description: string;
|
||||||
|
file_locations?: string[];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Logger interface for optional custom logging
|
||||||
|
*/
|
||||||
|
export interface XmlExtractorLogger {
|
||||||
|
debug: (message: string, ...args: unknown[]) => void;
|
||||||
|
warn?: (message: string, ...args: unknown[]) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for XML extraction operations
|
||||||
|
*/
|
||||||
|
export interface ExtractXmlOptions {
|
||||||
|
/** Custom logger (defaults to internal logger) */
|
||||||
|
logger?: XmlExtractorLogger;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Escape special XML characters
|
||||||
|
* Handles undefined/null values by converting them to empty strings
|
||||||
|
*/
|
||||||
|
export function escapeXml(str: string | undefined | null): string {
|
||||||
|
if (str == null) {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
return str
|
||||||
|
.replace(/&/g, '&')
|
||||||
|
.replace(/</g, '<')
|
||||||
|
.replace(/>/g, '>')
|
||||||
|
.replace(/"/g, '"')
|
||||||
|
.replace(/'/g, ''');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unescape XML entities back to regular characters
|
||||||
|
*/
|
||||||
|
export function unescapeXml(str: string): string {
|
||||||
|
return str
|
||||||
|
.replace(/'/g, "'")
|
||||||
|
.replace(/"/g, '"')
|
||||||
|
.replace(/>/g, '>')
|
||||||
|
.replace(/</g, '<')
|
||||||
|
.replace(/&/g, '&');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract the content of a specific XML section
|
||||||
|
*
|
||||||
|
* @param xmlContent - The full XML content
|
||||||
|
* @param tagName - The tag name to extract (e.g., 'implemented_features')
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns The content between the tags, or null if not found
|
||||||
|
*/
|
||||||
|
export function extractXmlSection(
|
||||||
|
xmlContent: string,
|
||||||
|
tagName: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string | null {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
|
||||||
|
const regex = new RegExp(`<${tagName}>([\\s\\S]*?)<\\/${tagName}>`, 'i');
|
||||||
|
const match = xmlContent.match(regex);
|
||||||
|
|
||||||
|
if (match) {
|
||||||
|
log.debug(`Extracted <${tagName}> section`);
|
||||||
|
return match[1];
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug(`Section <${tagName}> not found`);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract all values from repeated XML elements
|
||||||
|
*
|
||||||
|
* @param xmlContent - The XML content to search
|
||||||
|
* @param tagName - The tag name to extract values from
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Array of extracted values (unescaped)
|
||||||
|
*/
|
||||||
|
export function extractXmlElements(
|
||||||
|
xmlContent: string,
|
||||||
|
tagName: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string[] {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
const values: string[] = [];
|
||||||
|
|
||||||
|
const regex = new RegExp(`<${tagName}>([\\s\\S]*?)<\\/${tagName}>`, 'g');
|
||||||
|
const matches = xmlContent.matchAll(regex);
|
||||||
|
|
||||||
|
for (const match of matches) {
|
||||||
|
values.push(unescapeXml(match[1].trim()));
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug(`Extracted ${values.length} <${tagName}> elements`);
|
||||||
|
return values;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract implemented features from app_spec.txt XML content
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content of app_spec.txt
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Array of implemented features with name, description, and optional file_locations
|
||||||
|
*/
|
||||||
|
export function extractImplementedFeatures(
|
||||||
|
specContent: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): ImplementedFeature[] {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
const features: ImplementedFeature[] = [];
|
||||||
|
|
||||||
|
// Match <implemented_features>...</implemented_features> section
|
||||||
|
const implementedSection = extractXmlSection(specContent, 'implemented_features', options);
|
||||||
|
|
||||||
|
if (!implementedSection) {
|
||||||
|
log.debug('No implemented_features section found');
|
||||||
|
return features;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract individual feature blocks
|
||||||
|
const featureRegex = /<feature>([\s\S]*?)<\/feature>/g;
|
||||||
|
const featureMatches = implementedSection.matchAll(featureRegex);
|
||||||
|
|
||||||
|
for (const featureMatch of featureMatches) {
|
||||||
|
const featureContent = featureMatch[1];
|
||||||
|
|
||||||
|
// Extract name
|
||||||
|
const nameMatch = featureContent.match(/<name>([\s\S]*?)<\/name>/);
|
||||||
|
const name = nameMatch ? unescapeXml(nameMatch[1].trim()) : '';
|
||||||
|
|
||||||
|
// Extract description
|
||||||
|
const descMatch = featureContent.match(/<description>([\s\S]*?)<\/description>/);
|
||||||
|
const description = descMatch ? unescapeXml(descMatch[1].trim()) : '';
|
||||||
|
|
||||||
|
// Extract file_locations if present
|
||||||
|
const locationsSection = extractXmlSection(featureContent, 'file_locations', options);
|
||||||
|
const file_locations = locationsSection
|
||||||
|
? extractXmlElements(locationsSection, 'location', options)
|
||||||
|
: undefined;
|
||||||
|
|
||||||
|
if (name) {
|
||||||
|
features.push({
|
||||||
|
name,
|
||||||
|
description,
|
||||||
|
...(file_locations && file_locations.length > 0 ? { file_locations } : {}),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug(`Extracted ${features.length} implemented features`);
|
||||||
|
return features;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract only the feature names from implemented_features section
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content of app_spec.txt
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Array of feature names
|
||||||
|
*/
|
||||||
|
export function extractImplementedFeatureNames(
|
||||||
|
specContent: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string[] {
|
||||||
|
const features = extractImplementedFeatures(specContent, options);
|
||||||
|
return features.map((f) => f.name);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate XML for a single implemented feature
|
||||||
|
*
|
||||||
|
* @param feature - The feature to convert to XML
|
||||||
|
* @param indent - The base indentation level (default: 2 spaces)
|
||||||
|
* @returns XML string for the feature
|
||||||
|
*/
|
||||||
|
export function featureToXml(feature: ImplementedFeature, indent: string = ' '): string {
|
||||||
|
const i2 = indent.repeat(2);
|
||||||
|
const i3 = indent.repeat(3);
|
||||||
|
const i4 = indent.repeat(4);
|
||||||
|
|
||||||
|
let xml = `${i2}<feature>
|
||||||
|
${i3}<name>${escapeXml(feature.name)}</name>
|
||||||
|
${i3}<description>${escapeXml(feature.description)}</description>`;
|
||||||
|
|
||||||
|
if (feature.file_locations && feature.file_locations.length > 0) {
|
||||||
|
xml += `
|
||||||
|
${i3}<file_locations>
|
||||||
|
${feature.file_locations.map((loc) => `${i4}<location>${escapeXml(loc)}</location>`).join('\n')}
|
||||||
|
${i3}</file_locations>`;
|
||||||
|
}
|
||||||
|
|
||||||
|
xml += `
|
||||||
|
${i2}</feature>`;
|
||||||
|
|
||||||
|
return xml;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate XML for an array of implemented features
|
||||||
|
*
|
||||||
|
* @param features - Array of features to convert to XML
|
||||||
|
* @param indent - The base indentation level (default: 2 spaces)
|
||||||
|
* @returns XML string for the implemented_features section content
|
||||||
|
*/
|
||||||
|
export function featuresToXml(features: ImplementedFeature[], indent: string = ' '): string {
|
||||||
|
return features.map((f) => featureToXml(f, indent)).join('\n');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update the implemented_features section in XML content
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param newFeatures - The new features to set
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Updated XML content with the new implemented_features section
|
||||||
|
*/
|
||||||
|
export function updateImplementedFeaturesSection(
|
||||||
|
specContent: string,
|
||||||
|
newFeatures: ImplementedFeature[],
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
const indent = ' ';
|
||||||
|
|
||||||
|
// Generate new section content
|
||||||
|
const newSectionContent = featuresToXml(newFeatures, indent);
|
||||||
|
|
||||||
|
// Build the new section
|
||||||
|
const newSection = `<implemented_features>
|
||||||
|
${newSectionContent}
|
||||||
|
${indent}</implemented_features>`;
|
||||||
|
|
||||||
|
// Check if section exists
|
||||||
|
const sectionRegex = /<implemented_features>[\s\S]*?<\/implemented_features>/;
|
||||||
|
|
||||||
|
if (sectionRegex.test(specContent)) {
|
||||||
|
log.debug('Replacing existing implemented_features section');
|
||||||
|
return specContent.replace(sectionRegex, newSection);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If section doesn't exist, try to insert after core_capabilities
|
||||||
|
const coreCapabilitiesEnd = '</core_capabilities>';
|
||||||
|
const insertIndex = specContent.indexOf(coreCapabilitiesEnd);
|
||||||
|
|
||||||
|
if (insertIndex !== -1) {
|
||||||
|
const insertPosition = insertIndex + coreCapabilitiesEnd.length;
|
||||||
|
log.debug('Inserting implemented_features after core_capabilities');
|
||||||
|
return (
|
||||||
|
specContent.slice(0, insertPosition) +
|
||||||
|
'\n\n' +
|
||||||
|
indent +
|
||||||
|
newSection +
|
||||||
|
specContent.slice(insertPosition)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// As a fallback, insert before </project_specification>
|
||||||
|
const projectSpecEnd = '</project_specification>';
|
||||||
|
const fallbackIndex = specContent.indexOf(projectSpecEnd);
|
||||||
|
|
||||||
|
if (fallbackIndex !== -1) {
|
||||||
|
log.debug('Inserting implemented_features before </project_specification>');
|
||||||
|
return (
|
||||||
|
specContent.slice(0, fallbackIndex) +
|
||||||
|
indent +
|
||||||
|
newSection +
|
||||||
|
'\n' +
|
||||||
|
specContent.slice(fallbackIndex)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
log.warn?.('Could not find appropriate insertion point for implemented_features');
|
||||||
|
log.debug('Could not find appropriate insertion point for implemented_features');
|
||||||
|
return specContent;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a new feature to the implemented_features section
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param newFeature - The feature to add
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Updated XML content with the new feature added
|
||||||
|
*/
|
||||||
|
export function addImplementedFeature(
|
||||||
|
specContent: string,
|
||||||
|
newFeature: ImplementedFeature,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
|
||||||
|
// Extract existing features
|
||||||
|
const existingFeatures = extractImplementedFeatures(specContent, options);
|
||||||
|
|
||||||
|
// Check for duplicates by name
|
||||||
|
const isDuplicate = existingFeatures.some(
|
||||||
|
(f) => f.name.toLowerCase() === newFeature.name.toLowerCase()
|
||||||
|
);
|
||||||
|
|
||||||
|
if (isDuplicate) {
|
||||||
|
log.debug(`Feature "${newFeature.name}" already exists, skipping`);
|
||||||
|
return specContent;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the new feature
|
||||||
|
const updatedFeatures = [...existingFeatures, newFeature];
|
||||||
|
|
||||||
|
log.debug(`Adding feature "${newFeature.name}"`);
|
||||||
|
return updateImplementedFeaturesSection(specContent, updatedFeatures, options);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove a feature from the implemented_features section by name
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param featureName - The name of the feature to remove
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Updated XML content with the feature removed
|
||||||
|
*/
|
||||||
|
export function removeImplementedFeature(
|
||||||
|
specContent: string,
|
||||||
|
featureName: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
|
||||||
|
// Extract existing features
|
||||||
|
const existingFeatures = extractImplementedFeatures(specContent, options);
|
||||||
|
|
||||||
|
// Filter out the feature to remove
|
||||||
|
const updatedFeatures = existingFeatures.filter(
|
||||||
|
(f) => f.name.toLowerCase() !== featureName.toLowerCase()
|
||||||
|
);
|
||||||
|
|
||||||
|
if (updatedFeatures.length === existingFeatures.length) {
|
||||||
|
log.debug(`Feature "${featureName}" not found, no changes made`);
|
||||||
|
return specContent;
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug(`Removing feature "${featureName}"`);
|
||||||
|
return updateImplementedFeaturesSection(specContent, updatedFeatures, options);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update an existing feature in the implemented_features section
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param featureName - The name of the feature to update
|
||||||
|
* @param updates - Partial updates to apply to the feature
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Updated XML content with the feature modified
|
||||||
|
*/
|
||||||
|
export function updateImplementedFeature(
|
||||||
|
specContent: string,
|
||||||
|
featureName: string,
|
||||||
|
updates: Partial<ImplementedFeature>,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
|
||||||
|
// Extract existing features
|
||||||
|
const existingFeatures = extractImplementedFeatures(specContent, options);
|
||||||
|
|
||||||
|
// Find and update the feature
|
||||||
|
let found = false;
|
||||||
|
const updatedFeatures = existingFeatures.map((f) => {
|
||||||
|
if (f.name.toLowerCase() === featureName.toLowerCase()) {
|
||||||
|
found = true;
|
||||||
|
return {
|
||||||
|
...f,
|
||||||
|
...updates,
|
||||||
|
// Preserve the original name if not explicitly updated
|
||||||
|
name: updates.name ?? f.name,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
return f;
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!found) {
|
||||||
|
log.debug(`Feature "${featureName}" not found, no changes made`);
|
||||||
|
return specContent;
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug(`Updating feature "${featureName}"`);
|
||||||
|
return updateImplementedFeaturesSection(specContent, updatedFeatures, options);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a feature exists in the implemented_features section
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param featureName - The name of the feature to check
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns True if the feature exists
|
||||||
|
*/
|
||||||
|
export function hasImplementedFeature(
|
||||||
|
specContent: string,
|
||||||
|
featureName: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): boolean {
|
||||||
|
const features = extractImplementedFeatures(specContent, options);
|
||||||
|
return features.some((f) => f.name.toLowerCase() === featureName.toLowerCase());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert extracted features to SpecOutput.implemented_features format
|
||||||
|
*
|
||||||
|
* @param features - Array of extracted features
|
||||||
|
* @returns Features in SpecOutput format
|
||||||
|
*/
|
||||||
|
export function toSpecOutputFeatures(
|
||||||
|
features: ImplementedFeature[]
|
||||||
|
): SpecOutput['implemented_features'] {
|
||||||
|
return features.map((f) => ({
|
||||||
|
name: f.name,
|
||||||
|
description: f.description,
|
||||||
|
...(f.file_locations && f.file_locations.length > 0
|
||||||
|
? { file_locations: f.file_locations }
|
||||||
|
: {}),
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert SpecOutput.implemented_features to ImplementedFeature format
|
||||||
|
*
|
||||||
|
* @param specFeatures - Features from SpecOutput
|
||||||
|
* @returns Features in ImplementedFeature format
|
||||||
|
*/
|
||||||
|
export function fromSpecOutputFeatures(
|
||||||
|
specFeatures: SpecOutput['implemented_features']
|
||||||
|
): ImplementedFeature[] {
|
||||||
|
return specFeatures.map((f) => ({
|
||||||
|
name: f.name,
|
||||||
|
description: f.description,
|
||||||
|
...(f.file_locations && f.file_locations.length > 0
|
||||||
|
? { file_locations: f.file_locations }
|
||||||
|
: {}),
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represents a roadmap phase extracted from XML
|
||||||
|
*/
|
||||||
|
export interface RoadmapPhase {
|
||||||
|
name: string;
|
||||||
|
status: string;
|
||||||
|
description?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract the technology stack from app_spec.txt XML content
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Array of technology names
|
||||||
|
*/
|
||||||
|
export function extractTechnologyStack(
|
||||||
|
specContent: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string[] {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
|
||||||
|
const techSection = extractXmlSection(specContent, 'technology_stack', options);
|
||||||
|
if (!techSection) {
|
||||||
|
log.debug('No technology_stack section found');
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
const technologies = extractXmlElements(techSection, 'technology', options);
|
||||||
|
log.debug(`Extracted ${technologies.length} technologies`);
|
||||||
|
return technologies;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update the technology_stack section in XML content
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param technologies - The new technology list
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Updated XML content
|
||||||
|
*/
|
||||||
|
export function updateTechnologyStack(
|
||||||
|
specContent: string,
|
||||||
|
technologies: string[],
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
const indent = ' ';
|
||||||
|
const i2 = indent.repeat(2);
|
||||||
|
|
||||||
|
// Generate new section content
|
||||||
|
const techXml = technologies
|
||||||
|
.map((t) => `${i2}<technology>${escapeXml(t)}</technology>`)
|
||||||
|
.join('\n');
|
||||||
|
const newSection = `<technology_stack>\n${techXml}\n${indent}</technology_stack>`;
|
||||||
|
|
||||||
|
// Check if section exists
|
||||||
|
const sectionRegex = /<technology_stack>[\s\S]*?<\/technology_stack>/;
|
||||||
|
|
||||||
|
if (sectionRegex.test(specContent)) {
|
||||||
|
log.debug('Replacing existing technology_stack section');
|
||||||
|
return specContent.replace(sectionRegex, newSection);
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug('No technology_stack section found to update');
|
||||||
|
return specContent;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract roadmap phases from app_spec.txt XML content
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Array of roadmap phases
|
||||||
|
*/
|
||||||
|
export function extractRoadmapPhases(
|
||||||
|
specContent: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): RoadmapPhase[] {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
const phases: RoadmapPhase[] = [];
|
||||||
|
|
||||||
|
const roadmapSection = extractXmlSection(specContent, 'implementation_roadmap', options);
|
||||||
|
if (!roadmapSection) {
|
||||||
|
log.debug('No implementation_roadmap section found');
|
||||||
|
return phases;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract individual phase blocks
|
||||||
|
const phaseRegex = /<phase>([\s\S]*?)<\/phase>/g;
|
||||||
|
const phaseMatches = roadmapSection.matchAll(phaseRegex);
|
||||||
|
|
||||||
|
for (const phaseMatch of phaseMatches) {
|
||||||
|
const phaseContent = phaseMatch[1];
|
||||||
|
|
||||||
|
const nameMatch = phaseContent.match(/<name>([\s\S]*?)<\/name>/);
|
||||||
|
const name = nameMatch ? unescapeXml(nameMatch[1].trim()) : '';
|
||||||
|
|
||||||
|
const statusMatch = phaseContent.match(/<status>([\s\S]*?)<\/status>/);
|
||||||
|
const status = statusMatch ? unescapeXml(statusMatch[1].trim()) : 'pending';
|
||||||
|
|
||||||
|
const descMatch = phaseContent.match(/<description>([\s\S]*?)<\/description>/);
|
||||||
|
const description = descMatch ? unescapeXml(descMatch[1].trim()) : undefined;
|
||||||
|
|
||||||
|
if (name) {
|
||||||
|
phases.push({ name, status, description });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug(`Extracted ${phases.length} roadmap phases`);
|
||||||
|
return phases;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update a roadmap phase status in XML content
|
||||||
|
*
|
||||||
|
* @param specContent - The full XML content
|
||||||
|
* @param phaseName - The name of the phase to update
|
||||||
|
* @param newStatus - The new status value
|
||||||
|
* @param options - Optional extraction options
|
||||||
|
* @returns Updated XML content
|
||||||
|
*/
|
||||||
|
export function updateRoadmapPhaseStatus(
|
||||||
|
specContent: string,
|
||||||
|
phaseName: string,
|
||||||
|
newStatus: string,
|
||||||
|
options: ExtractXmlOptions = {}
|
||||||
|
): string {
|
||||||
|
const log = options.logger || logger;
|
||||||
|
|
||||||
|
// Find the phase and update its status
|
||||||
|
// Match the phase block containing the specific name
|
||||||
|
const phaseRegex = new RegExp(
|
||||||
|
`(<phase>\\s*<name>\\s*${escapeXml(phaseName)}\\s*<\\/name>\\s*<status>)[\\s\\S]*?(<\\/status>)`,
|
||||||
|
'i'
|
||||||
|
);
|
||||||
|
|
||||||
|
if (phaseRegex.test(specContent)) {
|
||||||
|
log.debug(`Updating phase "${phaseName}" status to "${newStatus}"`);
|
||||||
|
return specContent.replace(phaseRegex, `$1${escapeXml(newStatus)}$2`);
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug(`Phase "${phaseName}" not found`);
|
||||||
|
return specContent;
|
||||||
|
}
|
||||||
@@ -8,12 +8,28 @@ import type { Request, Response, NextFunction } from 'express';
|
|||||||
import { validatePath, PathNotAllowedError } from '@automaker/platform';
|
import { validatePath, PathNotAllowedError } from '@automaker/platform';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a middleware that validates specified path parameters in req.body
|
* Helper to get parameter value from request (checks body first, then query)
|
||||||
|
*/
|
||||||
|
function getParamValue(req: Request, paramName: string): unknown {
|
||||||
|
// Check body first (for POST/PUT/PATCH requests)
|
||||||
|
if (req.body && req.body[paramName] !== undefined) {
|
||||||
|
return req.body[paramName];
|
||||||
|
}
|
||||||
|
// Fall back to query params (for GET requests)
|
||||||
|
if (req.query && req.query[paramName] !== undefined) {
|
||||||
|
return req.query[paramName];
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a middleware that validates specified path parameters in req.body or req.query
|
||||||
* @param paramNames - Names of parameters to validate (e.g., 'projectPath', 'worktreePath')
|
* @param paramNames - Names of parameters to validate (e.g., 'projectPath', 'worktreePath')
|
||||||
* @example
|
* @example
|
||||||
* router.post('/create', validatePathParams('projectPath'), handler);
|
* router.post('/create', validatePathParams('projectPath'), handler);
|
||||||
* router.post('/delete', validatePathParams('projectPath', 'worktreePath'), handler);
|
* router.post('/delete', validatePathParams('projectPath', 'worktreePath'), handler);
|
||||||
* router.post('/send', validatePathParams('workingDirectory?', 'imagePaths[]'), handler);
|
* router.post('/send', validatePathParams('workingDirectory?', 'imagePaths[]'), handler);
|
||||||
|
* router.get('/logs', validatePathParams('worktreePath'), handler); // Works with query params too
|
||||||
*
|
*
|
||||||
* Special syntax:
|
* Special syntax:
|
||||||
* - 'paramName?' - Optional parameter (only validated if present)
|
* - 'paramName?' - Optional parameter (only validated if present)
|
||||||
@@ -26,8 +42,8 @@ export function validatePathParams(...paramNames: string[]) {
|
|||||||
// Handle optional parameters (paramName?)
|
// Handle optional parameters (paramName?)
|
||||||
if (paramName.endsWith('?')) {
|
if (paramName.endsWith('?')) {
|
||||||
const actualName = paramName.slice(0, -1);
|
const actualName = paramName.slice(0, -1);
|
||||||
const value = req.body[actualName];
|
const value = getParamValue(req, actualName);
|
||||||
if (value) {
|
if (value && typeof value === 'string') {
|
||||||
validatePath(value);
|
validatePath(value);
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
@@ -36,18 +52,20 @@ export function validatePathParams(...paramNames: string[]) {
|
|||||||
// Handle array parameters (paramName[])
|
// Handle array parameters (paramName[])
|
||||||
if (paramName.endsWith('[]')) {
|
if (paramName.endsWith('[]')) {
|
||||||
const actualName = paramName.slice(0, -2);
|
const actualName = paramName.slice(0, -2);
|
||||||
const values = req.body[actualName];
|
const values = getParamValue(req, actualName);
|
||||||
if (Array.isArray(values) && values.length > 0) {
|
if (Array.isArray(values) && values.length > 0) {
|
||||||
for (const value of values) {
|
for (const value of values) {
|
||||||
|
if (typeof value === 'string') {
|
||||||
validatePath(value);
|
validatePath(value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle regular parameters
|
// Handle regular parameters
|
||||||
const value = req.body[paramName];
|
const value = getParamValue(req, paramName);
|
||||||
if (value) {
|
if (value && typeof value === 'string') {
|
||||||
validatePath(value);
|
validatePath(value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,12 +5,17 @@
|
|||||||
* with the provider architecture.
|
* with the provider architecture.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { query, type Options } from '@anthropic-ai/claude-agent-sdk';
|
import { query, type Options, type SDKUserMessage } from '@anthropic-ai/claude-agent-sdk';
|
||||||
import { BaseProvider } from './base-provider.js';
|
import { BaseProvider } from './base-provider.js';
|
||||||
import { classifyError, getUserFriendlyErrorMessage, createLogger } from '@automaker/utils';
|
import { classifyError, getUserFriendlyErrorMessage, createLogger } from '@automaker/utils';
|
||||||
|
import { getClaudeAuthIndicators } from '@automaker/platform';
|
||||||
const logger = createLogger('ClaudeProvider');
|
import {
|
||||||
import { getThinkingTokenBudget, validateBareModelId } from '@automaker/types';
|
getThinkingTokenBudget,
|
||||||
|
validateBareModelId,
|
||||||
|
type ClaudeApiProfile,
|
||||||
|
type ClaudeCompatibleProvider,
|
||||||
|
type Credentials,
|
||||||
|
} from '@automaker/types';
|
||||||
import type {
|
import type {
|
||||||
ExecuteOptions,
|
ExecuteOptions,
|
||||||
ProviderMessage,
|
ProviderMessage,
|
||||||
@@ -18,29 +23,142 @@ import type {
|
|||||||
ModelDefinition,
|
ModelDefinition,
|
||||||
} from './types.js';
|
} from './types.js';
|
||||||
|
|
||||||
// Explicit allowlist of environment variables to pass to the SDK.
|
const logger = createLogger('ClaudeProvider');
|
||||||
// Only these vars are passed - nothing else from process.env leaks through.
|
|
||||||
const ALLOWED_ENV_VARS = [
|
|
||||||
'ANTHROPIC_API_KEY',
|
|
||||||
'PATH',
|
|
||||||
'HOME',
|
|
||||||
'SHELL',
|
|
||||||
'TERM',
|
|
||||||
'USER',
|
|
||||||
'LANG',
|
|
||||||
'LC_ALL',
|
|
||||||
];
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Build environment for the SDK with only explicitly allowed variables
|
* ProviderConfig - Union type for provider configuration
|
||||||
|
*
|
||||||
|
* Accepts either the legacy ClaudeApiProfile or new ClaudeCompatibleProvider.
|
||||||
|
* Both share the same connection settings structure.
|
||||||
*/
|
*/
|
||||||
function buildEnv(): Record<string, string | undefined> {
|
type ProviderConfig = ClaudeApiProfile | ClaudeCompatibleProvider;
|
||||||
|
|
||||||
|
// System vars are always passed from process.env regardless of profile
|
||||||
|
const SYSTEM_ENV_VARS = ['PATH', 'HOME', 'SHELL', 'TERM', 'USER', 'LANG', 'LC_ALL'];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if the config is a ClaudeCompatibleProvider (new system)
|
||||||
|
* by checking for the 'models' array property
|
||||||
|
*/
|
||||||
|
function isClaudeCompatibleProvider(config: ProviderConfig): config is ClaudeCompatibleProvider {
|
||||||
|
return 'models' in config && Array.isArray(config.models);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build environment for the SDK with only explicitly allowed variables.
|
||||||
|
* When a provider/profile is provided, uses its configuration (clean switch - don't inherit from process.env).
|
||||||
|
* When no provider is provided, uses direct Anthropic API settings from process.env.
|
||||||
|
*
|
||||||
|
* Supports both:
|
||||||
|
* - ClaudeCompatibleProvider (new system with models[] array)
|
||||||
|
* - ClaudeApiProfile (legacy system with modelMappings)
|
||||||
|
*
|
||||||
|
* @param providerConfig - Optional provider configuration for alternative endpoint
|
||||||
|
* @param credentials - Optional credentials object for resolving 'credentials' apiKeySource
|
||||||
|
*/
|
||||||
|
function buildEnv(
|
||||||
|
providerConfig?: ProviderConfig,
|
||||||
|
credentials?: Credentials
|
||||||
|
): Record<string, string | undefined> {
|
||||||
const env: Record<string, string | undefined> = {};
|
const env: Record<string, string | undefined> = {};
|
||||||
for (const key of ALLOWED_ENV_VARS) {
|
|
||||||
|
if (providerConfig) {
|
||||||
|
// Use provider configuration (clean switch - don't inherit non-system vars from process.env)
|
||||||
|
logger.debug('[buildEnv] Using provider configuration:', {
|
||||||
|
name: providerConfig.name,
|
||||||
|
baseUrl: providerConfig.baseUrl,
|
||||||
|
apiKeySource: providerConfig.apiKeySource ?? 'inline',
|
||||||
|
isNewProvider: isClaudeCompatibleProvider(providerConfig),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Resolve API key based on source strategy
|
||||||
|
let apiKey: string | undefined;
|
||||||
|
const source = providerConfig.apiKeySource ?? 'inline'; // Default to inline for backwards compat
|
||||||
|
|
||||||
|
switch (source) {
|
||||||
|
case 'inline':
|
||||||
|
apiKey = providerConfig.apiKey;
|
||||||
|
break;
|
||||||
|
case 'env':
|
||||||
|
apiKey = process.env.ANTHROPIC_API_KEY;
|
||||||
|
break;
|
||||||
|
case 'credentials':
|
||||||
|
apiKey = credentials?.apiKeys?.anthropic;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warn if no API key found
|
||||||
|
if (!apiKey) {
|
||||||
|
logger.warn(`No API key found for provider "${providerConfig.name}" with source "${source}"`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Authentication
|
||||||
|
if (providerConfig.useAuthToken) {
|
||||||
|
env['ANTHROPIC_AUTH_TOKEN'] = apiKey;
|
||||||
|
} else {
|
||||||
|
env['ANTHROPIC_API_KEY'] = apiKey;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Endpoint configuration
|
||||||
|
env['ANTHROPIC_BASE_URL'] = providerConfig.baseUrl;
|
||||||
|
logger.debug(`[buildEnv] Set ANTHROPIC_BASE_URL to: ${providerConfig.baseUrl}`);
|
||||||
|
|
||||||
|
if (providerConfig.timeoutMs) {
|
||||||
|
env['API_TIMEOUT_MS'] = String(providerConfig.timeoutMs);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Model mappings - only for legacy ClaudeApiProfile
|
||||||
|
// For ClaudeCompatibleProvider, the model is passed directly (no mapping needed)
|
||||||
|
if (!isClaudeCompatibleProvider(providerConfig) && providerConfig.modelMappings) {
|
||||||
|
if (providerConfig.modelMappings.haiku) {
|
||||||
|
env['ANTHROPIC_DEFAULT_HAIKU_MODEL'] = providerConfig.modelMappings.haiku;
|
||||||
|
}
|
||||||
|
if (providerConfig.modelMappings.sonnet) {
|
||||||
|
env['ANTHROPIC_DEFAULT_SONNET_MODEL'] = providerConfig.modelMappings.sonnet;
|
||||||
|
}
|
||||||
|
if (providerConfig.modelMappings.opus) {
|
||||||
|
env['ANTHROPIC_DEFAULT_OPUS_MODEL'] = providerConfig.modelMappings.opus;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traffic control
|
||||||
|
if (providerConfig.disableNonessentialTraffic) {
|
||||||
|
env['CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC'] = '1';
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Use direct Anthropic API - pass through credentials or environment variables
|
||||||
|
// This supports:
|
||||||
|
// 1. API Key mode: ANTHROPIC_API_KEY from credentials (UI settings) or env
|
||||||
|
// 2. Claude Max plan: Uses CLI OAuth auth (SDK handles this automatically)
|
||||||
|
// 3. Custom endpoints via ANTHROPIC_BASE_URL env var (backward compatibility)
|
||||||
|
//
|
||||||
|
// Priority: credentials file (UI settings) -> environment variable
|
||||||
|
// Note: Only auth and endpoint vars are passed. Model mappings and traffic
|
||||||
|
// control are NOT passed (those require a profile for explicit configuration).
|
||||||
|
if (credentials?.apiKeys?.anthropic) {
|
||||||
|
env['ANTHROPIC_API_KEY'] = credentials.apiKeys.anthropic;
|
||||||
|
} else if (process.env.ANTHROPIC_API_KEY) {
|
||||||
|
env['ANTHROPIC_API_KEY'] = process.env.ANTHROPIC_API_KEY;
|
||||||
|
}
|
||||||
|
// If using Claude Max plan via CLI auth, the SDK handles auth automatically
|
||||||
|
// when no API key is provided. We don't set ANTHROPIC_AUTH_TOKEN here
|
||||||
|
// unless it was explicitly set in process.env (rare edge case).
|
||||||
|
if (process.env.ANTHROPIC_AUTH_TOKEN) {
|
||||||
|
env['ANTHROPIC_AUTH_TOKEN'] = process.env.ANTHROPIC_AUTH_TOKEN;
|
||||||
|
}
|
||||||
|
// Pass through ANTHROPIC_BASE_URL if set in environment (backward compatibility)
|
||||||
|
if (process.env.ANTHROPIC_BASE_URL) {
|
||||||
|
env['ANTHROPIC_BASE_URL'] = process.env.ANTHROPIC_BASE_URL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Always add system vars from process.env
|
||||||
|
for (const key of SYSTEM_ENV_VARS) {
|
||||||
if (process.env[key]) {
|
if (process.env[key]) {
|
||||||
env[key] = process.env[key];
|
env[key] = process.env[key];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return env;
|
return env;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -62,16 +180,26 @@ export class ClaudeProvider extends BaseProvider {
|
|||||||
model,
|
model,
|
||||||
cwd,
|
cwd,
|
||||||
systemPrompt,
|
systemPrompt,
|
||||||
maxTurns = 20,
|
maxTurns = 100,
|
||||||
allowedTools,
|
allowedTools,
|
||||||
abortController,
|
abortController,
|
||||||
conversationHistory,
|
conversationHistory,
|
||||||
sdkSessionId,
|
sdkSessionId,
|
||||||
thinkingLevel,
|
thinkingLevel,
|
||||||
|
claudeApiProfile,
|
||||||
|
claudeCompatibleProvider,
|
||||||
|
credentials,
|
||||||
} = options;
|
} = options;
|
||||||
|
|
||||||
// Convert thinking level to token budget
|
// Determine which provider config to use
|
||||||
const maxThinkingTokens = getThinkingTokenBudget(thinkingLevel);
|
// claudeCompatibleProvider takes precedence over claudeApiProfile
|
||||||
|
const providerConfig = claudeCompatibleProvider || claudeApiProfile;
|
||||||
|
|
||||||
|
// Build thinking configuration
|
||||||
|
// Adaptive thinking (Opus 4.6): don't set maxThinkingTokens, model uses adaptive by default
|
||||||
|
// Manual thinking (Haiku/Sonnet): use budget_tokens
|
||||||
|
const maxThinkingTokens =
|
||||||
|
thinkingLevel === 'adaptive' ? undefined : getThinkingTokenBudget(thinkingLevel);
|
||||||
|
|
||||||
// Build Claude SDK options
|
// Build Claude SDK options
|
||||||
const sdkOptions: Options = {
|
const sdkOptions: Options = {
|
||||||
@@ -80,7 +208,9 @@ export class ClaudeProvider extends BaseProvider {
|
|||||||
maxTurns,
|
maxTurns,
|
||||||
cwd,
|
cwd,
|
||||||
// Pass only explicitly allowed environment variables to SDK
|
// Pass only explicitly allowed environment variables to SDK
|
||||||
env: buildEnv(),
|
// When a provider is active, uses provider settings (clean switch)
|
||||||
|
// When no provider, uses direct Anthropic API (from process.env or CLI OAuth)
|
||||||
|
env: buildEnv(providerConfig, credentials),
|
||||||
// Pass through allowedTools if provided by caller (decided by sdk-options.ts)
|
// Pass through allowedTools if provided by caller (decided by sdk-options.ts)
|
||||||
...(allowedTools && { allowedTools }),
|
...(allowedTools && { allowedTools }),
|
||||||
// AUTONOMOUS MODE: Always bypass permissions for fully autonomous operation
|
// AUTONOMOUS MODE: Always bypass permissions for fully autonomous operation
|
||||||
@@ -99,17 +229,19 @@ export class ClaudeProvider extends BaseProvider {
|
|||||||
...(maxThinkingTokens && { maxThinkingTokens }),
|
...(maxThinkingTokens && { maxThinkingTokens }),
|
||||||
// Subagents configuration for specialized task delegation
|
// Subagents configuration for specialized task delegation
|
||||||
...(options.agents && { agents: options.agents }),
|
...(options.agents && { agents: options.agents }),
|
||||||
|
// Pass through outputFormat for structured JSON outputs
|
||||||
|
...(options.outputFormat && { outputFormat: options.outputFormat }),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Build prompt payload
|
// Build prompt payload
|
||||||
let promptPayload: string | AsyncIterable<any>;
|
let promptPayload: string | AsyncIterable<SDKUserMessage>;
|
||||||
|
|
||||||
if (Array.isArray(prompt)) {
|
if (Array.isArray(prompt)) {
|
||||||
// Multi-part prompt (with images)
|
// Multi-part prompt (with images)
|
||||||
promptPayload = (async function* () {
|
promptPayload = (async function* () {
|
||||||
const multiPartPrompt = {
|
const multiPartPrompt: SDKUserMessage = {
|
||||||
type: 'user' as const,
|
type: 'user' as const,
|
||||||
session_id: '',
|
session_id: sdkSessionId || '',
|
||||||
message: {
|
message: {
|
||||||
role: 'user' as const,
|
role: 'user' as const,
|
||||||
content: prompt,
|
content: prompt,
|
||||||
@@ -123,6 +255,18 @@ export class ClaudeProvider extends BaseProvider {
|
|||||||
promptPayload = prompt;
|
promptPayload = prompt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Log the environment being passed to the SDK for debugging
|
||||||
|
const envForSdk = sdkOptions.env as Record<string, string | undefined>;
|
||||||
|
logger.debug('[ClaudeProvider] SDK Configuration:', {
|
||||||
|
model: sdkOptions.model,
|
||||||
|
baseUrl: envForSdk?.['ANTHROPIC_BASE_URL'] || '(default Anthropic API)',
|
||||||
|
hasApiKey: !!envForSdk?.['ANTHROPIC_API_KEY'],
|
||||||
|
hasAuthToken: !!envForSdk?.['ANTHROPIC_AUTH_TOKEN'],
|
||||||
|
providerName: providerConfig?.name || '(direct Anthropic)',
|
||||||
|
maxTurns: sdkOptions.maxTurns,
|
||||||
|
maxThinkingTokens: sdkOptions.maxThinkingTokens,
|
||||||
|
});
|
||||||
|
|
||||||
// Execute via Claude Agent SDK
|
// Execute via Claude Agent SDK
|
||||||
try {
|
try {
|
||||||
const stream = query({ prompt: promptPayload, options: sdkOptions });
|
const stream = query({ prompt: promptPayload, options: sdkOptions });
|
||||||
@@ -149,12 +293,16 @@ export class ClaudeProvider extends BaseProvider {
|
|||||||
? `${userMessage}\n\nTip: If you're running multiple features in auto-mode, consider reducing concurrency (maxConcurrency setting) to avoid hitting rate limits.`
|
? `${userMessage}\n\nTip: If you're running multiple features in auto-mode, consider reducing concurrency (maxConcurrency setting) to avoid hitting rate limits.`
|
||||||
: userMessage;
|
: userMessage;
|
||||||
|
|
||||||
const enhancedError = new Error(message);
|
const enhancedError = new Error(message) as Error & {
|
||||||
(enhancedError as any).originalError = error;
|
originalError: unknown;
|
||||||
(enhancedError as any).type = errorInfo.type;
|
type: string;
|
||||||
|
retryAfter?: number;
|
||||||
|
};
|
||||||
|
enhancedError.originalError = error;
|
||||||
|
enhancedError.type = errorInfo.type;
|
||||||
|
|
||||||
if (errorInfo.isRateLimit) {
|
if (errorInfo.isRateLimit) {
|
||||||
(enhancedError as any).retryAfter = errorInfo.retryAfter;
|
enhancedError.retryAfter = errorInfo.retryAfter;
|
||||||
}
|
}
|
||||||
|
|
||||||
throw enhancedError;
|
throw enhancedError;
|
||||||
@@ -166,13 +314,37 @@ export class ClaudeProvider extends BaseProvider {
|
|||||||
*/
|
*/
|
||||||
async detectInstallation(): Promise<InstallationStatus> {
|
async detectInstallation(): Promise<InstallationStatus> {
|
||||||
// Claude SDK is always available since it's a dependency
|
// Claude SDK is always available since it's a dependency
|
||||||
const hasApiKey = !!process.env.ANTHROPIC_API_KEY;
|
// Check all four supported auth methods, mirroring the logic in buildEnv():
|
||||||
|
// 1. ANTHROPIC_API_KEY environment variable
|
||||||
|
// 2. ANTHROPIC_AUTH_TOKEN environment variable
|
||||||
|
// 3. credentials?.apiKeys?.anthropic (credentials file, checked via platform indicators)
|
||||||
|
// 4. Claude Max CLI OAuth (SDK handles this automatically; detected via getClaudeAuthIndicators)
|
||||||
|
const hasEnvApiKey = !!process.env.ANTHROPIC_API_KEY;
|
||||||
|
const hasEnvAuthToken = !!process.env.ANTHROPIC_AUTH_TOKEN;
|
||||||
|
|
||||||
|
// Check credentials file and CLI OAuth indicators (same sources used by buildEnv)
|
||||||
|
let hasCredentialsApiKey = false;
|
||||||
|
let hasCliOAuth = false;
|
||||||
|
try {
|
||||||
|
const indicators = await getClaudeAuthIndicators();
|
||||||
|
hasCredentialsApiKey = !!indicators.credentials?.hasApiKey;
|
||||||
|
hasCliOAuth = !!(
|
||||||
|
indicators.credentials?.hasOAuthToken ||
|
||||||
|
indicators.hasStatsCacheWithActivity ||
|
||||||
|
(indicators.hasSettingsFile && indicators.hasProjectsSessions)
|
||||||
|
);
|
||||||
|
} catch {
|
||||||
|
// If we can't check indicators, fall back to env vars only
|
||||||
|
}
|
||||||
|
|
||||||
|
const hasApiKey = hasEnvApiKey || hasCredentialsApiKey;
|
||||||
|
const authenticated = hasEnvApiKey || hasEnvAuthToken || hasCredentialsApiKey || hasCliOAuth;
|
||||||
|
|
||||||
const status: InstallationStatus = {
|
const status: InstallationStatus = {
|
||||||
installed: true,
|
installed: true,
|
||||||
method: 'sdk',
|
method: 'sdk',
|
||||||
hasApiKey,
|
hasApiKey,
|
||||||
authenticated: hasApiKey,
|
authenticated,
|
||||||
};
|
};
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
@@ -184,18 +356,30 @@ export class ClaudeProvider extends BaseProvider {
|
|||||||
getAvailableModels(): ModelDefinition[] {
|
getAvailableModels(): ModelDefinition[] {
|
||||||
const models = [
|
const models = [
|
||||||
{
|
{
|
||||||
id: 'claude-opus-4-5-20251101',
|
id: 'claude-opus-4-6',
|
||||||
name: 'Claude Opus 4.5',
|
name: 'Claude Opus 4.6',
|
||||||
modelString: 'claude-opus-4-5-20251101',
|
modelString: 'claude-opus-4-6',
|
||||||
provider: 'anthropic',
|
provider: 'anthropic',
|
||||||
description: 'Most capable Claude model',
|
description: 'Most capable Claude model with adaptive thinking',
|
||||||
contextWindow: 200000,
|
contextWindow: 200000,
|
||||||
maxOutputTokens: 16000,
|
maxOutputTokens: 128000,
|
||||||
supportsVision: true,
|
supportsVision: true,
|
||||||
supportsTools: true,
|
supportsTools: true,
|
||||||
tier: 'premium' as const,
|
tier: 'premium' as const,
|
||||||
default: true,
|
default: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: 'claude-sonnet-4-6',
|
||||||
|
name: 'Claude Sonnet 4.6',
|
||||||
|
modelString: 'claude-sonnet-4-6',
|
||||||
|
provider: 'anthropic',
|
||||||
|
description: 'Balanced performance and cost with enhanced reasoning',
|
||||||
|
contextWindow: 200000,
|
||||||
|
maxOutputTokens: 64000,
|
||||||
|
supportsVision: true,
|
||||||
|
supportsTools: true,
|
||||||
|
tier: 'standard' as const,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
id: 'claude-sonnet-4-20250514',
|
id: 'claude-sonnet-4-20250514',
|
||||||
name: 'Claude Sonnet 4',
|
name: 'Claude Sonnet 4',
|
||||||
|
|||||||
@@ -26,22 +26,23 @@
|
|||||||
* ```
|
* ```
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { execSync } from 'child_process';
|
|
||||||
import * as fs from 'fs';
|
|
||||||
import * as path from 'path';
|
|
||||||
import * as os from 'os';
|
|
||||||
import { BaseProvider } from './base-provider.js';
|
|
||||||
import type { ProviderConfig, ExecuteOptions, ProviderMessage } from './types.js';
|
|
||||||
import {
|
import {
|
||||||
spawnJSONLProcess,
|
|
||||||
type SubprocessOptions,
|
|
||||||
isWslAvailable,
|
|
||||||
findCliInWsl,
|
|
||||||
createWslCommand,
|
createWslCommand,
|
||||||
|
findCliInWsl,
|
||||||
|
isWslAvailable,
|
||||||
|
spawnJSONLProcess,
|
||||||
windowsToWslPath,
|
windowsToWslPath,
|
||||||
|
type SubprocessOptions,
|
||||||
type WslCliResult,
|
type WslCliResult,
|
||||||
} from '@automaker/platform';
|
} from '@automaker/platform';
|
||||||
|
import { calculateReasoningTimeout } from '@automaker/types';
|
||||||
import { createLogger, isAbortError } from '@automaker/utils';
|
import { createLogger, isAbortError } from '@automaker/utils';
|
||||||
|
import { execSync } from 'child_process';
|
||||||
|
import * as fs from 'fs';
|
||||||
|
import * as os from 'os';
|
||||||
|
import * as path from 'path';
|
||||||
|
import { BaseProvider } from './base-provider.js';
|
||||||
|
import type { ExecuteOptions, ProviderConfig, ProviderMessage } from './types.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Spawn strategy for CLI tools on Windows
|
* Spawn strategy for CLI tools on Windows
|
||||||
@@ -107,6 +108,15 @@ export interface CliDetectionResult {
|
|||||||
// Create logger for CLI operations
|
// Create logger for CLI operations
|
||||||
const cliLogger = createLogger('CliProvider');
|
const cliLogger = createLogger('CliProvider');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base timeout for CLI operations in milliseconds.
|
||||||
|
* CLI tools have longer startup and processing times compared to direct API calls,
|
||||||
|
* so we use a higher base timeout (120s) than the default provider timeout (30s).
|
||||||
|
* This is multiplied by reasoning effort multipliers when applicable.
|
||||||
|
* @see calculateReasoningTimeout from @automaker/types
|
||||||
|
*/
|
||||||
|
const CLI_BASE_TIMEOUT_MS = 120000;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Abstract base class for CLI-based providers
|
* Abstract base class for CLI-based providers
|
||||||
*
|
*
|
||||||
@@ -450,6 +460,10 @@ export abstract class CliProvider extends BaseProvider {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Calculate dynamic timeout based on reasoning effort.
|
||||||
|
// This addresses GitHub issue #530 where reasoning models with 'xhigh' effort would timeout.
|
||||||
|
const timeout = calculateReasoningTimeout(options.reasoningEffort, CLI_BASE_TIMEOUT_MS);
|
||||||
|
|
||||||
// WSL strategy
|
// WSL strategy
|
||||||
if (this.useWsl && this.wslCliPath) {
|
if (this.useWsl && this.wslCliPath) {
|
||||||
const wslCwd = windowsToWslPath(cwd);
|
const wslCwd = windowsToWslPath(cwd);
|
||||||
@@ -473,7 +487,7 @@ export abstract class CliProvider extends BaseProvider {
|
|||||||
cwd, // Windows cwd for spawn
|
cwd, // Windows cwd for spawn
|
||||||
env: filteredEnv,
|
env: filteredEnv,
|
||||||
abortController: options.abortController,
|
abortController: options.abortController,
|
||||||
timeout: 120000, // CLI operations may take longer
|
timeout,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -488,7 +502,7 @@ export abstract class CliProvider extends BaseProvider {
|
|||||||
cwd,
|
cwd,
|
||||||
env: filteredEnv,
|
env: filteredEnv,
|
||||||
abortController: options.abortController,
|
abortController: options.abortController,
|
||||||
timeout: 120000,
|
timeout,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -501,7 +515,7 @@ export abstract class CliProvider extends BaseProvider {
|
|||||||
cwd,
|
cwd,
|
||||||
env: filteredEnv,
|
env: filteredEnv,
|
||||||
abortController: options.abortController,
|
abortController: options.abortController,
|
||||||
timeout: 120000,
|
timeout,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -522,8 +536,13 @@ export abstract class CliProvider extends BaseProvider {
|
|||||||
throw new Error(`${this.getCliName()} CLI not found. ${this.getInstallInstructions()}`);
|
throw new Error(`${this.getCliName()} CLI not found. ${this.getInstallInstructions()}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
const cliArgs = this.buildCliArgs(options);
|
// Many CLI-based providers do not support a separate "system" message.
|
||||||
const subprocessOptions = this.buildSubprocessOptions(options, cliArgs);
|
// If a systemPrompt is provided, embed it into the prompt so downstream models
|
||||||
|
// still receive critical formatting/schema instructions (e.g., JSON-only outputs).
|
||||||
|
const effectiveOptions = this.embedSystemPromptIntoPrompt(options);
|
||||||
|
|
||||||
|
const cliArgs = this.buildCliArgs(effectiveOptions);
|
||||||
|
const subprocessOptions = this.buildSubprocessOptions(effectiveOptions, cliArgs);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
for await (const rawEvent of spawnJSONLProcess(subprocessOptions)) {
|
for await (const rawEvent of spawnJSONLProcess(subprocessOptions)) {
|
||||||
@@ -555,4 +574,52 @@ export abstract class CliProvider extends BaseProvider {
|
|||||||
throw error;
|
throw error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Embed system prompt text into the user prompt for CLI providers.
|
||||||
|
*
|
||||||
|
* Most CLI providers we integrate with only accept a single prompt via stdin/args.
|
||||||
|
* When upstream code supplies `options.systemPrompt`, we prepend it to the prompt
|
||||||
|
* content and clear `systemPrompt` to avoid any accidental double-injection by
|
||||||
|
* subclasses.
|
||||||
|
*/
|
||||||
|
protected embedSystemPromptIntoPrompt(options: ExecuteOptions): ExecuteOptions {
|
||||||
|
if (!options.systemPrompt) {
|
||||||
|
return options;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only string system prompts can be reliably embedded for CLI providers.
|
||||||
|
// Presets are provider-specific (e.g., Claude SDK) and cannot be represented
|
||||||
|
// universally. If a preset is provided, we only embed its optional `append`.
|
||||||
|
const systemText =
|
||||||
|
typeof options.systemPrompt === 'string'
|
||||||
|
? options.systemPrompt
|
||||||
|
: options.systemPrompt.append
|
||||||
|
? options.systemPrompt.append
|
||||||
|
: '';
|
||||||
|
|
||||||
|
if (!systemText) {
|
||||||
|
return { ...options, systemPrompt: undefined };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preserve original prompt structure.
|
||||||
|
if (typeof options.prompt === 'string') {
|
||||||
|
return {
|
||||||
|
...options,
|
||||||
|
prompt: `${systemText}\n\n---\n\n${options.prompt}`,
|
||||||
|
systemPrompt: undefined,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Array.isArray(options.prompt)) {
|
||||||
|
return {
|
||||||
|
...options,
|
||||||
|
prompt: [{ type: 'text', text: systemText }, ...options.prompt],
|
||||||
|
systemPrompt: undefined,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should be unreachable due to ExecuteOptions typing, but keep safe.
|
||||||
|
return { ...options, systemPrompt: undefined };
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,12 +19,11 @@ const MAX_OUTPUT_16K = 16000;
|
|||||||
export const CODEX_MODELS: ModelDefinition[] = [
|
export const CODEX_MODELS: ModelDefinition[] = [
|
||||||
// ========== Recommended Codex Models ==========
|
// ========== Recommended Codex Models ==========
|
||||||
{
|
{
|
||||||
id: CODEX_MODEL_MAP.gpt52Codex,
|
id: CODEX_MODEL_MAP.gpt53Codex,
|
||||||
name: 'GPT-5.2-Codex',
|
name: 'GPT-5.3-Codex',
|
||||||
modelString: CODEX_MODEL_MAP.gpt52Codex,
|
modelString: CODEX_MODEL_MAP.gpt53Codex,
|
||||||
provider: 'openai',
|
provider: 'openai',
|
||||||
description:
|
description: 'Latest frontier agentic coding model.',
|
||||||
'Most advanced agentic coding model for complex software engineering (default for ChatGPT users).',
|
|
||||||
contextWindow: CONTEXT_WINDOW_256K,
|
contextWindow: CONTEXT_WINDOW_256K,
|
||||||
maxOutputTokens: MAX_OUTPUT_32K,
|
maxOutputTokens: MAX_OUTPUT_32K,
|
||||||
supportsVision: true,
|
supportsVision: true,
|
||||||
@@ -33,12 +32,38 @@ export const CODEX_MODELS: ModelDefinition[] = [
|
|||||||
default: true,
|
default: true,
|
||||||
hasReasoning: true,
|
hasReasoning: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: CODEX_MODEL_MAP.gpt53CodexSpark,
|
||||||
|
name: 'GPT-5.3-Codex-Spark',
|
||||||
|
modelString: CODEX_MODEL_MAP.gpt53CodexSpark,
|
||||||
|
provider: 'openai',
|
||||||
|
description: 'Near-instant real-time coding model, 1000+ tokens/sec.',
|
||||||
|
contextWindow: CONTEXT_WINDOW_256K,
|
||||||
|
maxOutputTokens: MAX_OUTPUT_32K,
|
||||||
|
supportsVision: true,
|
||||||
|
supportsTools: true,
|
||||||
|
tier: 'premium' as const,
|
||||||
|
hasReasoning: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: CODEX_MODEL_MAP.gpt52Codex,
|
||||||
|
name: 'GPT-5.2-Codex',
|
||||||
|
modelString: CODEX_MODEL_MAP.gpt52Codex,
|
||||||
|
provider: 'openai',
|
||||||
|
description: 'Frontier agentic coding model.',
|
||||||
|
contextWindow: CONTEXT_WINDOW_256K,
|
||||||
|
maxOutputTokens: MAX_OUTPUT_32K,
|
||||||
|
supportsVision: true,
|
||||||
|
supportsTools: true,
|
||||||
|
tier: 'premium' as const,
|
||||||
|
hasReasoning: true,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
id: CODEX_MODEL_MAP.gpt51CodexMax,
|
id: CODEX_MODEL_MAP.gpt51CodexMax,
|
||||||
name: 'GPT-5.1-Codex-Max',
|
name: 'GPT-5.1-Codex-Max',
|
||||||
modelString: CODEX_MODEL_MAP.gpt51CodexMax,
|
modelString: CODEX_MODEL_MAP.gpt51CodexMax,
|
||||||
provider: 'openai',
|
provider: 'openai',
|
||||||
description: 'Optimized for long-horizon, agentic coding tasks in Codex.',
|
description: 'Codex-optimized flagship for deep and fast reasoning.',
|
||||||
contextWindow: CONTEXT_WINDOW_256K,
|
contextWindow: CONTEXT_WINDOW_256K,
|
||||||
maxOutputTokens: MAX_OUTPUT_32K,
|
maxOutputTokens: MAX_OUTPUT_32K,
|
||||||
supportsVision: true,
|
supportsVision: true,
|
||||||
@@ -51,7 +76,46 @@ export const CODEX_MODELS: ModelDefinition[] = [
|
|||||||
name: 'GPT-5.1-Codex-Mini',
|
name: 'GPT-5.1-Codex-Mini',
|
||||||
modelString: CODEX_MODEL_MAP.gpt51CodexMini,
|
modelString: CODEX_MODEL_MAP.gpt51CodexMini,
|
||||||
provider: 'openai',
|
provider: 'openai',
|
||||||
description: 'Smaller, more cost-effective version for faster workflows.',
|
description: 'Optimized for codex. Cheaper, faster, but less capable.',
|
||||||
|
contextWindow: CONTEXT_WINDOW_128K,
|
||||||
|
maxOutputTokens: MAX_OUTPUT_16K,
|
||||||
|
supportsVision: true,
|
||||||
|
supportsTools: true,
|
||||||
|
tier: 'basic' as const,
|
||||||
|
hasReasoning: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: CODEX_MODEL_MAP.gpt51Codex,
|
||||||
|
name: 'GPT-5.1-Codex',
|
||||||
|
modelString: CODEX_MODEL_MAP.gpt51Codex,
|
||||||
|
provider: 'openai',
|
||||||
|
description: 'Original GPT-5.1 Codex agentic coding model.',
|
||||||
|
contextWindow: CONTEXT_WINDOW_256K,
|
||||||
|
maxOutputTokens: MAX_OUTPUT_32K,
|
||||||
|
supportsVision: true,
|
||||||
|
supportsTools: true,
|
||||||
|
tier: 'standard' as const,
|
||||||
|
hasReasoning: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: CODEX_MODEL_MAP.gpt5Codex,
|
||||||
|
name: 'GPT-5-Codex',
|
||||||
|
modelString: CODEX_MODEL_MAP.gpt5Codex,
|
||||||
|
provider: 'openai',
|
||||||
|
description: 'Original GPT-5 Codex model.',
|
||||||
|
contextWindow: CONTEXT_WINDOW_128K,
|
||||||
|
maxOutputTokens: MAX_OUTPUT_16K,
|
||||||
|
supportsVision: true,
|
||||||
|
supportsTools: true,
|
||||||
|
tier: 'standard' as const,
|
||||||
|
hasReasoning: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: CODEX_MODEL_MAP.gpt5CodexMini,
|
||||||
|
name: 'GPT-5-Codex-Mini',
|
||||||
|
modelString: CODEX_MODEL_MAP.gpt5CodexMini,
|
||||||
|
provider: 'openai',
|
||||||
|
description: 'Smaller, cheaper GPT-5 Codex variant.',
|
||||||
contextWindow: CONTEXT_WINDOW_128K,
|
contextWindow: CONTEXT_WINDOW_128K,
|
||||||
maxOutputTokens: MAX_OUTPUT_16K,
|
maxOutputTokens: MAX_OUTPUT_16K,
|
||||||
supportsVision: true,
|
supportsVision: true,
|
||||||
@@ -66,7 +130,7 @@ export const CODEX_MODELS: ModelDefinition[] = [
|
|||||||
name: 'GPT-5.2',
|
name: 'GPT-5.2',
|
||||||
modelString: CODEX_MODEL_MAP.gpt52,
|
modelString: CODEX_MODEL_MAP.gpt52,
|
||||||
provider: 'openai',
|
provider: 'openai',
|
||||||
description: 'Best general agentic model for tasks across industries and domains.',
|
description: 'Latest frontier model with improvements across knowledge, reasoning and coding.',
|
||||||
contextWindow: CONTEXT_WINDOW_256K,
|
contextWindow: CONTEXT_WINDOW_256K,
|
||||||
maxOutputTokens: MAX_OUTPUT_32K,
|
maxOutputTokens: MAX_OUTPUT_32K,
|
||||||
supportsVision: true,
|
supportsVision: true,
|
||||||
@@ -87,6 +151,19 @@ export const CODEX_MODELS: ModelDefinition[] = [
|
|||||||
tier: 'standard' as const,
|
tier: 'standard' as const,
|
||||||
hasReasoning: true,
|
hasReasoning: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: CODEX_MODEL_MAP.gpt5,
|
||||||
|
name: 'GPT-5',
|
||||||
|
modelString: CODEX_MODEL_MAP.gpt5,
|
||||||
|
provider: 'openai',
|
||||||
|
description: 'Base GPT-5 model.',
|
||||||
|
contextWindow: CONTEXT_WINDOW_128K,
|
||||||
|
maxOutputTokens: MAX_OUTPUT_16K,
|
||||||
|
supportsVision: true,
|
||||||
|
supportsTools: true,
|
||||||
|
tier: 'standard' as const,
|
||||||
|
hasReasoning: true,
|
||||||
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ import {
|
|||||||
extractTextFromContent,
|
extractTextFromContent,
|
||||||
classifyError,
|
classifyError,
|
||||||
getUserFriendlyErrorMessage,
|
getUserFriendlyErrorMessage,
|
||||||
|
createLogger,
|
||||||
} from '@automaker/utils';
|
} from '@automaker/utils';
|
||||||
import type {
|
import type {
|
||||||
ExecuteOptions,
|
ExecuteOptions,
|
||||||
@@ -29,9 +30,10 @@ import type {
|
|||||||
ModelDefinition,
|
ModelDefinition,
|
||||||
} from './types.js';
|
} from './types.js';
|
||||||
import {
|
import {
|
||||||
CODEX_MODEL_MAP,
|
|
||||||
supportsReasoningEffort,
|
supportsReasoningEffort,
|
||||||
validateBareModelId,
|
validateBareModelId,
|
||||||
|
calculateReasoningTimeout,
|
||||||
|
DEFAULT_TIMEOUT_MS,
|
||||||
type CodexApprovalPolicy,
|
type CodexApprovalPolicy,
|
||||||
type CodexSandboxMode,
|
type CodexSandboxMode,
|
||||||
type CodexAuthStatus,
|
type CodexAuthStatus,
|
||||||
@@ -44,6 +46,7 @@ import {
|
|||||||
getCodexTodoToolName,
|
getCodexTodoToolName,
|
||||||
} from './codex-tool-mapping.js';
|
} from './codex-tool-mapping.js';
|
||||||
import { SettingsService } from '../services/settings-service.js';
|
import { SettingsService } from '../services/settings-service.js';
|
||||||
|
import { createTempEnvOverride } from '../lib/auth-utils.js';
|
||||||
import { checkSandboxCompatibility } from '../lib/sdk-options.js';
|
import { checkSandboxCompatibility } from '../lib/sdk-options.js';
|
||||||
import { CODEX_MODELS } from './codex-models.js';
|
import { CODEX_MODELS } from './codex-models.js';
|
||||||
|
|
||||||
@@ -52,15 +55,10 @@ const CODEX_EXEC_SUBCOMMAND = 'exec';
|
|||||||
const CODEX_JSON_FLAG = '--json';
|
const CODEX_JSON_FLAG = '--json';
|
||||||
const CODEX_MODEL_FLAG = '--model';
|
const CODEX_MODEL_FLAG = '--model';
|
||||||
const CODEX_VERSION_FLAG = '--version';
|
const CODEX_VERSION_FLAG = '--version';
|
||||||
const CODEX_SANDBOX_FLAG = '--sandbox';
|
|
||||||
const CODEX_APPROVAL_FLAG = '--ask-for-approval';
|
|
||||||
const CODEX_SEARCH_FLAG = '--search';
|
|
||||||
const CODEX_OUTPUT_SCHEMA_FLAG = '--output-schema';
|
|
||||||
const CODEX_CONFIG_FLAG = '--config';
|
const CODEX_CONFIG_FLAG = '--config';
|
||||||
const CODEX_IMAGE_FLAG = '--image';
|
|
||||||
const CODEX_ADD_DIR_FLAG = '--add-dir';
|
const CODEX_ADD_DIR_FLAG = '--add-dir';
|
||||||
|
const CODEX_OUTPUT_SCHEMA_FLAG = '--output-schema';
|
||||||
const CODEX_SKIP_GIT_REPO_CHECK_FLAG = '--skip-git-repo-check';
|
const CODEX_SKIP_GIT_REPO_CHECK_FLAG = '--skip-git-repo-check';
|
||||||
const CODEX_RESUME_FLAG = 'resume';
|
|
||||||
const CODEX_REASONING_EFFORT_KEY = 'reasoning_effort';
|
const CODEX_REASONING_EFFORT_KEY = 'reasoning_effort';
|
||||||
const CODEX_YOLO_FLAG = '--dangerously-bypass-approvals-and-sandbox';
|
const CODEX_YOLO_FLAG = '--dangerously-bypass-approvals-and-sandbox';
|
||||||
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
|
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
|
||||||
@@ -89,10 +87,19 @@ const CODEX_ITEM_TYPES = {
|
|||||||
const SYSTEM_PROMPT_LABEL = 'System instructions';
|
const SYSTEM_PROMPT_LABEL = 'System instructions';
|
||||||
const HISTORY_HEADER = 'Current request:\n';
|
const HISTORY_HEADER = 'Current request:\n';
|
||||||
const TEXT_ENCODING = 'utf-8';
|
const TEXT_ENCODING = 'utf-8';
|
||||||
const DEFAULT_TIMEOUT_MS = 30000;
|
/**
|
||||||
const CONTEXT_WINDOW_256K = 256000;
|
* Default timeout for Codex CLI operations in milliseconds.
|
||||||
const MAX_OUTPUT_32K = 32000;
|
* This is the "no output" timeout - if the CLI doesn't produce any JSONL output
|
||||||
const MAX_OUTPUT_16K = 16000;
|
* for this duration, the process is killed. For reasoning models with high
|
||||||
|
* reasoning effort, this timeout is dynamically extended via calculateReasoningTimeout().
|
||||||
|
*
|
||||||
|
* For feature generation (which can generate 50+ features), we use a much longer
|
||||||
|
* base timeout (5 minutes) since Codex models are slower at generating large JSON responses.
|
||||||
|
*
|
||||||
|
* @see calculateReasoningTimeout from @automaker/types
|
||||||
|
*/
|
||||||
|
const CODEX_CLI_TIMEOUT_MS = DEFAULT_TIMEOUT_MS;
|
||||||
|
const CODEX_FEATURE_GENERATION_BASE_TIMEOUT_MS = 300000; // 5 minutes for feature generation
|
||||||
const SYSTEM_PROMPT_SEPARATOR = '\n\n';
|
const SYSTEM_PROMPT_SEPARATOR = '\n\n';
|
||||||
const CODEX_INSTRUCTIONS_DIR = '.codex';
|
const CODEX_INSTRUCTIONS_DIR = '.codex';
|
||||||
const CODEX_INSTRUCTIONS_SECTION = 'Codex Project Instructions';
|
const CODEX_INSTRUCTIONS_SECTION = 'Codex Project Instructions';
|
||||||
@@ -141,6 +148,7 @@ type CodexExecutionMode = typeof CODEX_EXECUTION_MODE_CLI | typeof CODEX_EXECUTI
|
|||||||
type CodexExecutionPlan = {
|
type CodexExecutionPlan = {
|
||||||
mode: CodexExecutionMode;
|
mode: CodexExecutionMode;
|
||||||
cliPath: string | null;
|
cliPath: string | null;
|
||||||
|
openAiApiKey?: string | null;
|
||||||
};
|
};
|
||||||
|
|
||||||
const ALLOWED_ENV_VARS = [
|
const ALLOWED_ENV_VARS = [
|
||||||
@@ -165,6 +173,22 @@ function buildEnv(): Record<string, string> {
|
|||||||
return env;
|
return env;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function resolveOpenAiApiKey(): Promise<string | null> {
|
||||||
|
const envKey = process.env[OPENAI_API_KEY_ENV];
|
||||||
|
if (envKey) {
|
||||||
|
return envKey;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const settingsService = new SettingsService(getCodexSettingsDir());
|
||||||
|
const credentials = await settingsService.getCredentials();
|
||||||
|
const storedKey = credentials.apiKeys.openai?.trim();
|
||||||
|
return storedKey ? storedKey : null;
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function hasMcpServersConfigured(options: ExecuteOptions): boolean {
|
function hasMcpServersConfigured(options: ExecuteOptions): boolean {
|
||||||
return Boolean(options.mcpServers && Object.keys(options.mcpServers).length > 0);
|
return Boolean(options.mcpServers && Object.keys(options.mcpServers).length > 0);
|
||||||
}
|
}
|
||||||
@@ -177,21 +201,60 @@ function isSdkEligible(options: ExecuteOptions): boolean {
|
|||||||
return isNoToolsRequested(options) && !hasMcpServersConfigured(options);
|
return isNoToolsRequested(options) && !hasMcpServersConfigured(options);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function isSdkEligibleWithApiKey(options: ExecuteOptions): boolean {
|
||||||
|
// When using an API key (not CLI OAuth), prefer SDK over CLI to avoid OAuth issues.
|
||||||
|
// SDK mode is used when MCP servers are not configured (MCP requires CLI).
|
||||||
|
// Tool requests are handled by the SDK, so we allow SDK mode even with tools.
|
||||||
|
return !hasMcpServersConfigured(options);
|
||||||
|
}
|
||||||
|
|
||||||
async function resolveCodexExecutionPlan(options: ExecuteOptions): Promise<CodexExecutionPlan> {
|
async function resolveCodexExecutionPlan(options: ExecuteOptions): Promise<CodexExecutionPlan> {
|
||||||
const cliPath = await findCodexCliPath();
|
const cliPath = await findCodexCliPath();
|
||||||
const authIndicators = await getCodexAuthIndicators();
|
const authIndicators = await getCodexAuthIndicators();
|
||||||
const hasApiKey = Boolean(process.env[OPENAI_API_KEY_ENV]);
|
const openAiApiKey = await resolveOpenAiApiKey();
|
||||||
const cliAuthenticated = authIndicators.hasOAuthToken || authIndicators.hasApiKey || hasApiKey;
|
const hasApiKey = Boolean(openAiApiKey);
|
||||||
const sdkEligible = isSdkEligible(options);
|
|
||||||
const cliAvailable = Boolean(cliPath);
|
const cliAvailable = Boolean(cliPath);
|
||||||
|
// CLI OAuth login takes priority: if the user has logged in via `codex login`,
|
||||||
|
// use the CLI regardless of whether an API key is also stored.
|
||||||
|
// hasOAuthToken = OAuth session from `codex login`
|
||||||
|
// authIndicators.hasApiKey = API key stored in Codex's own auth file (via `codex login --api-key`)
|
||||||
|
// Both are "CLI-native" auth — distinct from an API key stored in Automaker's credentials.
|
||||||
|
const hasCliNativeAuth = authIndicators.hasOAuthToken || authIndicators.hasApiKey;
|
||||||
|
const sdkEligible = isSdkEligible(options);
|
||||||
|
|
||||||
if (sdkEligible) {
|
// If CLI is available and the user authenticated via the CLI (`codex login`),
|
||||||
if (hasApiKey) {
|
// prefer CLI mode over SDK. This ensures `codex login` sessions take priority
|
||||||
|
// over API keys stored in Automaker's credentials.
|
||||||
|
if (cliAvailable && hasCliNativeAuth) {
|
||||||
|
return {
|
||||||
|
mode: CODEX_EXECUTION_MODE_CLI,
|
||||||
|
cliPath,
|
||||||
|
openAiApiKey,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// No CLI-native auth — prefer SDK when an API key is available.
|
||||||
|
// Using SDK with an API key avoids OAuth issues that can arise with the CLI.
|
||||||
|
// MCP servers still require CLI mode since the SDK doesn't support MCP.
|
||||||
|
if (hasApiKey && isSdkEligibleWithApiKey(options)) {
|
||||||
return {
|
return {
|
||||||
mode: CODEX_EXECUTION_MODE_SDK,
|
mode: CODEX_EXECUTION_MODE_SDK,
|
||||||
cliPath,
|
cliPath,
|
||||||
|
openAiApiKey,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MCP servers are requested with an API key but no CLI-native auth — use CLI mode
|
||||||
|
// with the API key passed as an environment variable.
|
||||||
|
if (hasApiKey && cliAvailable) {
|
||||||
|
return {
|
||||||
|
mode: CODEX_EXECUTION_MODE_CLI,
|
||||||
|
cliPath,
|
||||||
|
openAiApiKey,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sdkEligible) {
|
||||||
if (!cliAvailable) {
|
if (!cliAvailable) {
|
||||||
throw new Error(ERROR_CODEX_SDK_AUTH_REQUIRED);
|
throw new Error(ERROR_CODEX_SDK_AUTH_REQUIRED);
|
||||||
}
|
}
|
||||||
@@ -201,14 +264,9 @@ async function resolveCodexExecutionPlan(options: ExecuteOptions): Promise<Codex
|
|||||||
throw new Error(ERROR_CODEX_CLI_REQUIRED);
|
throw new Error(ERROR_CODEX_CLI_REQUIRED);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cliAuthenticated) {
|
// At this point, neither hasCliNativeAuth nor hasApiKey is true,
|
||||||
|
// so authentication is required regardless.
|
||||||
throw new Error(ERROR_CODEX_AUTH_REQUIRED);
|
throw new Error(ERROR_CODEX_AUTH_REQUIRED);
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
mode: CODEX_EXECUTION_MODE_CLI,
|
|
||||||
cliPath,
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function getEventType(event: Record<string, unknown>): string | null {
|
function getEventType(event: Record<string, unknown>): string | null {
|
||||||
@@ -658,6 +716,8 @@ async function loadCodexInstructions(cwd: string, enabled: boolean): Promise<str
|
|||||||
.join('\n\n');
|
.join('\n\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const logger = createLogger('CodexProvider');
|
||||||
|
|
||||||
export class CodexProvider extends BaseProvider {
|
export class CodexProvider extends BaseProvider {
|
||||||
getName(): string {
|
getName(): string {
|
||||||
return 'codex';
|
return 'codex';
|
||||||
@@ -698,7 +758,14 @@ export class CodexProvider extends BaseProvider {
|
|||||||
|
|
||||||
const executionPlan = await resolveCodexExecutionPlan(options);
|
const executionPlan = await resolveCodexExecutionPlan(options);
|
||||||
if (executionPlan.mode === CODEX_EXECUTION_MODE_SDK) {
|
if (executionPlan.mode === CODEX_EXECUTION_MODE_SDK) {
|
||||||
|
const cleanupEnv = executionPlan.openAiApiKey
|
||||||
|
? createTempEnvOverride({ [OPENAI_API_KEY_ENV]: executionPlan.openAiApiKey })
|
||||||
|
: null;
|
||||||
|
try {
|
||||||
yield* executeCodexSdkQuery(options, combinedSystemPrompt);
|
yield* executeCodexSdkQuery(options, combinedSystemPrompt);
|
||||||
|
} finally {
|
||||||
|
cleanupEnv?.();
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -712,15 +779,12 @@ export class CodexProvider extends BaseProvider {
|
|||||||
options.cwd,
|
options.cwd,
|
||||||
codexSettings.sandboxMode !== 'danger-full-access'
|
codexSettings.sandboxMode !== 'danger-full-access'
|
||||||
);
|
);
|
||||||
const resolvedSandboxMode = sandboxCheck.enabled
|
|
||||||
? codexSettings.sandboxMode
|
|
||||||
: 'danger-full-access';
|
|
||||||
if (!sandboxCheck.enabled && sandboxCheck.message) {
|
if (!sandboxCheck.enabled && sandboxCheck.message) {
|
||||||
console.warn(`[CodexProvider] ${sandboxCheck.message}`);
|
console.warn(`[CodexProvider] ${sandboxCheck.message}`);
|
||||||
}
|
}
|
||||||
const searchEnabled =
|
const searchEnabled =
|
||||||
codexSettings.enableWebSearch || resolveSearchEnabled(resolvedAllowedTools, restrictTools);
|
codexSettings.enableWebSearch || resolveSearchEnabled(resolvedAllowedTools, restrictTools);
|
||||||
const outputSchemaPath = await writeOutputSchemaFile(options.cwd, options.outputFormat);
|
const schemaPath = await writeOutputSchemaFile(options.cwd, options.outputFormat);
|
||||||
const imageBlocks = codexSettings.enableImages ? extractImageBlocks(options.prompt) : [];
|
const imageBlocks = codexSettings.enableImages ? extractImageBlocks(options.prompt) : [];
|
||||||
const imagePaths = await writeImageFiles(options.cwd, imageBlocks);
|
const imagePaths = await writeImageFiles(options.cwd, imageBlocks);
|
||||||
const approvalPolicy =
|
const approvalPolicy =
|
||||||
@@ -755,7 +819,7 @@ export class CodexProvider extends BaseProvider {
|
|||||||
overrides.push({ key: 'features.web_search_request', value: true });
|
overrides.push({ key: 'features.web_search_request', value: true });
|
||||||
}
|
}
|
||||||
|
|
||||||
const configOverrides = buildConfigOverrides(overrides);
|
const configOverrideArgs = buildConfigOverrides(overrides);
|
||||||
const preExecArgs: string[] = [];
|
const preExecArgs: string[] = [];
|
||||||
|
|
||||||
// Add additional directories with write access
|
// Add additional directories with write access
|
||||||
@@ -765,6 +829,12 @@ export class CodexProvider extends BaseProvider {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If images were written to disk, add the image directory so the CLI can access them
|
||||||
|
if (imagePaths.length > 0) {
|
||||||
|
const imageDir = path.join(options.cwd, CODEX_INSTRUCTIONS_DIR, IMAGE_TEMP_DIR);
|
||||||
|
preExecArgs.push(CODEX_ADD_DIR_FLAG, imageDir);
|
||||||
|
}
|
||||||
|
|
||||||
// Model is already bare (no prefix) - validated by executeQuery
|
// Model is already bare (no prefix) - validated by executeQuery
|
||||||
const args = [
|
const args = [
|
||||||
CODEX_EXEC_SUBCOMMAND,
|
CODEX_EXEC_SUBCOMMAND,
|
||||||
@@ -774,16 +844,36 @@ export class CodexProvider extends BaseProvider {
|
|||||||
CODEX_MODEL_FLAG,
|
CODEX_MODEL_FLAG,
|
||||||
options.model,
|
options.model,
|
||||||
CODEX_JSON_FLAG,
|
CODEX_JSON_FLAG,
|
||||||
|
...configOverrideArgs,
|
||||||
|
...(schemaPath ? [CODEX_OUTPUT_SCHEMA_FLAG, schemaPath] : []),
|
||||||
'-', // Read prompt from stdin to avoid shell escaping issues
|
'-', // Read prompt from stdin to avoid shell escaping issues
|
||||||
];
|
];
|
||||||
|
|
||||||
|
const envOverrides = buildEnv();
|
||||||
|
if (executionPlan.openAiApiKey && !envOverrides[OPENAI_API_KEY_ENV]) {
|
||||||
|
envOverrides[OPENAI_API_KEY_ENV] = executionPlan.openAiApiKey;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate dynamic timeout based on reasoning effort.
|
||||||
|
// Higher reasoning effort (e.g., 'xhigh' for "xtra thinking" mode) requires more time
|
||||||
|
// for the model to generate reasoning tokens before producing output.
|
||||||
|
// This fixes GitHub issue #530 where features would get stuck with reasoning models.
|
||||||
|
//
|
||||||
|
// For feature generation with 'xhigh', use the extended 5-minute base timeout
|
||||||
|
// since generating 50+ features takes significantly longer than normal operations.
|
||||||
|
const baseTimeout =
|
||||||
|
options.reasoningEffort === 'xhigh'
|
||||||
|
? CODEX_FEATURE_GENERATION_BASE_TIMEOUT_MS
|
||||||
|
: CODEX_CLI_TIMEOUT_MS;
|
||||||
|
const timeout = calculateReasoningTimeout(options.reasoningEffort, baseTimeout);
|
||||||
|
|
||||||
const stream = spawnJSONLProcess({
|
const stream = spawnJSONLProcess({
|
||||||
command: commandPath,
|
command: commandPath,
|
||||||
args,
|
args,
|
||||||
cwd: options.cwd,
|
cwd: options.cwd,
|
||||||
env: buildEnv(),
|
env: envOverrides,
|
||||||
abortController: options.abortController,
|
abortController: options.abortController,
|
||||||
timeout: DEFAULT_TIMEOUT_MS,
|
timeout,
|
||||||
stdinData: promptText, // Pass prompt via stdin
|
stdinData: promptText, // Pass prompt via stdin
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -802,16 +892,36 @@ export class CodexProvider extends BaseProvider {
|
|||||||
|
|
||||||
// Enhance error message with helpful context
|
// Enhance error message with helpful context
|
||||||
let enhancedError = errorText;
|
let enhancedError = errorText;
|
||||||
if (errorText.toLowerCase().includes('rate limit')) {
|
const errorLower = errorText.toLowerCase();
|
||||||
|
if (errorLower.includes('rate limit')) {
|
||||||
enhancedError = `${errorText}\n\nTip: You're being rate limited. Try reducing concurrent tasks or waiting a few minutes before retrying.`;
|
enhancedError = `${errorText}\n\nTip: You're being rate limited. Try reducing concurrent tasks or waiting a few minutes before retrying.`;
|
||||||
|
} else if (errorLower.includes('authentication') || errorLower.includes('unauthorized')) {
|
||||||
|
enhancedError = `${errorText}\n\nTip: Check that your OPENAI_API_KEY is set correctly or run 'codex login' to authenticate.`;
|
||||||
} else if (
|
} else if (
|
||||||
errorText.toLowerCase().includes('authentication') ||
|
errorLower.includes('model does not exist') ||
|
||||||
errorText.toLowerCase().includes('unauthorized')
|
errorLower.includes('requested model does not exist') ||
|
||||||
|
errorLower.includes('do not have access') ||
|
||||||
|
errorLower.includes('model_not_found') ||
|
||||||
|
errorLower.includes('invalid_model')
|
||||||
) {
|
) {
|
||||||
enhancedError = `${errorText}\n\nTip: Check that your OPENAI_API_KEY is set correctly or run 'codex auth login' to authenticate.`;
|
enhancedError =
|
||||||
|
`${errorText}\n\nTip: The model '${options.model}' may not be available on your OpenAI plan. ` +
|
||||||
|
`See https://platform.openai.com/docs/models for available models. ` +
|
||||||
|
`Some models require a ChatGPT Pro/Plus subscription—authenticate with 'codex login' instead of an API key.`;
|
||||||
} else if (
|
} else if (
|
||||||
errorText.toLowerCase().includes('not found') ||
|
errorLower.includes('stream disconnected') ||
|
||||||
errorText.toLowerCase().includes('command not found')
|
errorLower.includes('stream ended') ||
|
||||||
|
errorLower.includes('connection reset')
|
||||||
|
) {
|
||||||
|
enhancedError =
|
||||||
|
`${errorText}\n\nTip: The connection to OpenAI was interrupted. This can happen due to:\n` +
|
||||||
|
`- Network instability\n` +
|
||||||
|
`- The model not being available on your plan\n` +
|
||||||
|
`- Server-side timeouts for long-running requests\n` +
|
||||||
|
`Try again, or switch to a different model.`;
|
||||||
|
} else if (
|
||||||
|
errorLower.includes('command not found') ||
|
||||||
|
errorLower.includes('is not recognized as an internal or external command')
|
||||||
) {
|
) {
|
||||||
enhancedError = `${errorText}\n\nTip: Make sure the Codex CLI is installed. Run 'npm install -g @openai/codex-cli' to install.`;
|
enhancedError = `${errorText}\n\nTip: Make sure the Codex CLI is installed. Run 'npm install -g @openai/codex-cli' to install.`;
|
||||||
}
|
}
|
||||||
@@ -967,21 +1077,10 @@ export class CodexProvider extends BaseProvider {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async detectInstallation(): Promise<InstallationStatus> {
|
async detectInstallation(): Promise<InstallationStatus> {
|
||||||
console.log('[CodexProvider.detectInstallation] Starting...');
|
|
||||||
|
|
||||||
const cliPath = await findCodexCliPath();
|
const cliPath = await findCodexCliPath();
|
||||||
const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
|
const hasApiKey = Boolean(await resolveOpenAiApiKey());
|
||||||
const authIndicators = await getCodexAuthIndicators();
|
|
||||||
const installed = !!cliPath;
|
const installed = !!cliPath;
|
||||||
|
|
||||||
console.log('[CodexProvider.detectInstallation] cliPath:', cliPath);
|
|
||||||
console.log('[CodexProvider.detectInstallation] hasApiKey:', hasApiKey);
|
|
||||||
console.log(
|
|
||||||
'[CodexProvider.detectInstallation] authIndicators:',
|
|
||||||
JSON.stringify(authIndicators)
|
|
||||||
);
|
|
||||||
console.log('[CodexProvider.detectInstallation] installed:', installed);
|
|
||||||
|
|
||||||
let version = '';
|
let version = '';
|
||||||
if (installed) {
|
if (installed) {
|
||||||
try {
|
try {
|
||||||
@@ -991,20 +1090,16 @@ export class CodexProvider extends BaseProvider {
|
|||||||
cwd: process.cwd(),
|
cwd: process.cwd(),
|
||||||
});
|
});
|
||||||
version = result.stdout.trim();
|
version = result.stdout.trim();
|
||||||
console.log('[CodexProvider.detectInstallation] version:', version);
|
} catch {
|
||||||
} catch (error) {
|
|
||||||
console.log('[CodexProvider.detectInstallation] Error getting version:', error);
|
|
||||||
version = '';
|
version = '';
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine auth status - always verify with CLI, never assume authenticated
|
// Determine auth status - always verify with CLI, never assume authenticated
|
||||||
console.log('[CodexProvider.detectInstallation] Calling checkCodexAuthentication...');
|
|
||||||
const authCheck = await checkCodexAuthentication(cliPath);
|
const authCheck = await checkCodexAuthentication(cliPath);
|
||||||
console.log('[CodexProvider.detectInstallation] authCheck result:', JSON.stringify(authCheck));
|
|
||||||
const authenticated = authCheck.authenticated;
|
const authenticated = authCheck.authenticated;
|
||||||
|
|
||||||
const result = {
|
return {
|
||||||
installed,
|
installed,
|
||||||
path: cliPath || undefined,
|
path: cliPath || undefined,
|
||||||
version: version || undefined,
|
version: version || undefined,
|
||||||
@@ -1012,8 +1107,6 @@ export class CodexProvider extends BaseProvider {
|
|||||||
hasApiKey,
|
hasApiKey,
|
||||||
authenticated,
|
authenticated,
|
||||||
};
|
};
|
||||||
console.log('[CodexProvider.detectInstallation] Final result:', JSON.stringify(result));
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
getAvailableModels(): ModelDefinition[] {
|
getAvailableModels(): ModelDefinition[] {
|
||||||
@@ -1025,36 +1118,24 @@ export class CodexProvider extends BaseProvider {
|
|||||||
* Check authentication status for Codex CLI
|
* Check authentication status for Codex CLI
|
||||||
*/
|
*/
|
||||||
async checkAuth(): Promise<CodexAuthStatus> {
|
async checkAuth(): Promise<CodexAuthStatus> {
|
||||||
console.log('[CodexProvider.checkAuth] Starting auth check...');
|
|
||||||
|
|
||||||
const cliPath = await findCodexCliPath();
|
const cliPath = await findCodexCliPath();
|
||||||
const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
|
const hasApiKey = Boolean(await resolveOpenAiApiKey());
|
||||||
const authIndicators = await getCodexAuthIndicators();
|
const authIndicators = await getCodexAuthIndicators();
|
||||||
|
|
||||||
console.log('[CodexProvider.checkAuth] cliPath:', cliPath);
|
|
||||||
console.log('[CodexProvider.checkAuth] hasApiKey:', hasApiKey);
|
|
||||||
console.log('[CodexProvider.checkAuth] authIndicators:', JSON.stringify(authIndicators));
|
|
||||||
|
|
||||||
// Check for API key in environment
|
// Check for API key in environment
|
||||||
if (hasApiKey) {
|
if (hasApiKey) {
|
||||||
console.log('[CodexProvider.checkAuth] Has API key, returning authenticated');
|
|
||||||
return { authenticated: true, method: 'api_key' };
|
return { authenticated: true, method: 'api_key' };
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for OAuth/token from Codex CLI
|
// Check for OAuth/token from Codex CLI
|
||||||
if (authIndicators.hasOAuthToken || authIndicators.hasApiKey) {
|
if (authIndicators.hasOAuthToken || authIndicators.hasApiKey) {
|
||||||
console.log(
|
|
||||||
'[CodexProvider.checkAuth] Has OAuth token or API key in auth file, returning authenticated'
|
|
||||||
);
|
|
||||||
return { authenticated: true, method: 'oauth' };
|
return { authenticated: true, method: 'oauth' };
|
||||||
}
|
}
|
||||||
|
|
||||||
// CLI is installed but not authenticated via indicators - try CLI command
|
// CLI is installed but not authenticated via indicators - try CLI command
|
||||||
console.log('[CodexProvider.checkAuth] No indicators found, trying CLI command...');
|
|
||||||
if (cliPath) {
|
if (cliPath) {
|
||||||
try {
|
try {
|
||||||
// Try 'codex login status' first (same as checkCodexAuthentication)
|
// Try 'codex login status' first (same as checkCodexAuthentication)
|
||||||
console.log('[CodexProvider.checkAuth] Running: ' + cliPath + ' login status');
|
|
||||||
const result = await spawnProcess({
|
const result = await spawnProcess({
|
||||||
command: cliPath || CODEX_COMMAND,
|
command: cliPath || CODEX_COMMAND,
|
||||||
args: ['login', 'status'],
|
args: ['login', 'status'],
|
||||||
@@ -1064,26 +1145,19 @@ export class CodexProvider extends BaseProvider {
|
|||||||
TERM: 'dumb',
|
TERM: 'dumb',
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
console.log('[CodexProvider.checkAuth] login status result:');
|
|
||||||
console.log('[CodexProvider.checkAuth] exitCode:', result.exitCode);
|
|
||||||
console.log('[CodexProvider.checkAuth] stdout:', JSON.stringify(result.stdout));
|
|
||||||
console.log('[CodexProvider.checkAuth] stderr:', JSON.stringify(result.stderr));
|
|
||||||
|
|
||||||
// Check both stdout and stderr - Codex CLI outputs to stderr
|
// Check both stdout and stderr - Codex CLI outputs to stderr
|
||||||
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
|
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
|
||||||
const isLoggedIn = combinedOutput.includes('logged in');
|
const isLoggedIn = combinedOutput.includes('logged in');
|
||||||
console.log('[CodexProvider.checkAuth] isLoggedIn:', isLoggedIn);
|
|
||||||
|
|
||||||
if (result.exitCode === 0 && isLoggedIn) {
|
if (result.exitCode === 0 && isLoggedIn) {
|
||||||
console.log('[CodexProvider.checkAuth] CLI says logged in, returning authenticated');
|
|
||||||
return { authenticated: true, method: 'oauth' };
|
return { authenticated: true, method: 'oauth' };
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.log('[CodexProvider.checkAuth] Error running login status:', error);
|
logger.warn('Error running login status command during auth check:', error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log('[CodexProvider.checkAuth] Not authenticated');
|
|
||||||
return { authenticated: false, method: 'none' };
|
return { authenticated: false, method: 'none' };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,6 +15,9 @@ const SDK_HISTORY_HEADER = 'Current request:\n';
|
|||||||
const DEFAULT_RESPONSE_TEXT = '';
|
const DEFAULT_RESPONSE_TEXT = '';
|
||||||
const SDK_ERROR_DETAILS_LABEL = 'Details:';
|
const SDK_ERROR_DETAILS_LABEL = 'Details:';
|
||||||
|
|
||||||
|
type SdkReasoningEffort = 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
|
||||||
|
const SDK_REASONING_EFFORTS = new Set<string>(['minimal', 'low', 'medium', 'high', 'xhigh']);
|
||||||
|
|
||||||
type PromptBlock = {
|
type PromptBlock = {
|
||||||
type: string;
|
type: string;
|
||||||
text?: string;
|
text?: string;
|
||||||
@@ -99,38 +102,52 @@ export async function* executeCodexSdkQuery(
|
|||||||
const apiKey = resolveApiKey();
|
const apiKey = resolveApiKey();
|
||||||
const codex = new Codex({ apiKey });
|
const codex = new Codex({ apiKey });
|
||||||
|
|
||||||
|
// Build thread options with model
|
||||||
|
// The model must be passed to startThread/resumeThread so the SDK
|
||||||
|
// knows which model to use for the conversation. Without this,
|
||||||
|
// the SDK may use a default model that the user doesn't have access to.
|
||||||
|
const threadOptions: {
|
||||||
|
model?: string;
|
||||||
|
modelReasoningEffort?: SdkReasoningEffort;
|
||||||
|
} = {};
|
||||||
|
|
||||||
|
if (options.model) {
|
||||||
|
threadOptions.model = options.model;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add reasoning effort to thread options if model supports it
|
||||||
|
if (
|
||||||
|
options.reasoningEffort &&
|
||||||
|
options.model &&
|
||||||
|
supportsReasoningEffort(options.model) &&
|
||||||
|
options.reasoningEffort !== 'none' &&
|
||||||
|
SDK_REASONING_EFFORTS.has(options.reasoningEffort)
|
||||||
|
) {
|
||||||
|
threadOptions.modelReasoningEffort = options.reasoningEffort as SdkReasoningEffort;
|
||||||
|
}
|
||||||
|
|
||||||
// Resume existing thread or start new one
|
// Resume existing thread or start new one
|
||||||
let thread;
|
let thread;
|
||||||
if (options.sdkSessionId) {
|
if (options.sdkSessionId) {
|
||||||
try {
|
try {
|
||||||
thread = codex.resumeThread(options.sdkSessionId);
|
thread = codex.resumeThread(options.sdkSessionId, threadOptions);
|
||||||
} catch {
|
} catch {
|
||||||
// If resume fails, start a new thread
|
// If resume fails, start a new thread
|
||||||
thread = codex.startThread();
|
thread = codex.startThread(threadOptions);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
thread = codex.startThread();
|
thread = codex.startThread(threadOptions);
|
||||||
}
|
}
|
||||||
|
|
||||||
const promptText = buildPromptText(options, systemPrompt);
|
const promptText = buildPromptText(options, systemPrompt);
|
||||||
|
|
||||||
// Build run options with reasoning effort if supported
|
// Build run options
|
||||||
const runOptions: {
|
const runOptions: {
|
||||||
signal?: AbortSignal;
|
signal?: AbortSignal;
|
||||||
reasoning?: { effort: string };
|
|
||||||
} = {
|
} = {
|
||||||
signal: options.abortController?.signal,
|
signal: options.abortController?.signal,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Add reasoning effort if model supports it and reasoningEffort is specified
|
|
||||||
if (
|
|
||||||
options.reasoningEffort &&
|
|
||||||
supportsReasoningEffort(options.model) &&
|
|
||||||
options.reasoningEffort !== 'none'
|
|
||||||
) {
|
|
||||||
runOptions.reasoning = { effort: options.reasoningEffort };
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the query
|
// Run the query
|
||||||
const result = await thread.run(promptText, runOptions);
|
const result = await thread.run(promptText, runOptions);
|
||||||
|
|
||||||
@@ -160,10 +177,42 @@ export async function* executeCodexSdkQuery(
|
|||||||
} catch (error) {
|
} catch (error) {
|
||||||
const errorInfo = classifyError(error);
|
const errorInfo = classifyError(error);
|
||||||
const userMessage = getUserFriendlyErrorMessage(error);
|
const userMessage = getUserFriendlyErrorMessage(error);
|
||||||
const combinedMessage = buildSdkErrorMessage(errorInfo.message, userMessage);
|
let combinedMessage = buildSdkErrorMessage(errorInfo.message, userMessage);
|
||||||
|
|
||||||
|
// Enhance error messages with actionable tips for common Codex issues
|
||||||
|
// Normalize inputs to avoid crashes from nullish values
|
||||||
|
const errorLower = (errorInfo?.message ?? '').toLowerCase();
|
||||||
|
const modelLabel = options?.model ?? '<unknown model>';
|
||||||
|
|
||||||
|
if (
|
||||||
|
errorLower.includes('does not exist') ||
|
||||||
|
errorLower.includes('model_not_found') ||
|
||||||
|
errorLower.includes('invalid_model')
|
||||||
|
) {
|
||||||
|
// Model not found - provide helpful guidance
|
||||||
|
combinedMessage +=
|
||||||
|
`\n\nTip: The model '${modelLabel}' may not be available on your OpenAI plan. ` +
|
||||||
|
`Some models (like gpt-5.3-codex) require a ChatGPT Pro/Plus subscription and OAuth login via 'codex login'. ` +
|
||||||
|
`Try using a different model (e.g., gpt-5.1 or gpt-5.2), or authenticate with 'codex login' instead of an API key.`;
|
||||||
|
} else if (
|
||||||
|
errorLower.includes('stream disconnected') ||
|
||||||
|
errorLower.includes('stream ended') ||
|
||||||
|
errorLower.includes('connection reset') ||
|
||||||
|
errorLower.includes('socket hang up')
|
||||||
|
) {
|
||||||
|
// Stream disconnection - provide helpful guidance
|
||||||
|
combinedMessage +=
|
||||||
|
`\n\nTip: The connection to OpenAI was interrupted. This can happen due to:\n` +
|
||||||
|
`- Network instability\n` +
|
||||||
|
`- The model not being available on your plan (try 'codex login' for OAuth authentication)\n` +
|
||||||
|
`- Server-side timeouts for long-running requests\n` +
|
||||||
|
`Try again, or switch to a different model.`;
|
||||||
|
}
|
||||||
|
|
||||||
console.error('[CodexSDK] executeQuery() error during execution:', {
|
console.error('[CodexSDK] executeQuery() error during execution:', {
|
||||||
type: errorInfo.type,
|
type: errorInfo.type,
|
||||||
message: errorInfo.message,
|
message: errorInfo.message,
|
||||||
|
model: options.model,
|
||||||
isRateLimit: errorInfo.isRateLimit,
|
isRateLimit: errorInfo.isRateLimit,
|
||||||
retryAfter: errorInfo.retryAfter,
|
retryAfter: errorInfo.retryAfter,
|
||||||
stack: error instanceof Error ? error.stack : undefined,
|
stack: error instanceof Error ? error.stack : undefined,
|
||||||
|
|||||||
938
apps/server/src/providers/copilot-provider.ts
Normal file
938
apps/server/src/providers/copilot-provider.ts
Normal file
@@ -0,0 +1,938 @@
|
|||||||
|
/**
|
||||||
|
* Copilot Provider - Executes queries using the GitHub Copilot SDK
|
||||||
|
*
|
||||||
|
* Uses the official @github/copilot-sdk for:
|
||||||
|
* - Session management and streaming responses
|
||||||
|
* - GitHub OAuth authentication (via gh CLI)
|
||||||
|
* - Tool call handling and permission management
|
||||||
|
* - Runtime model discovery
|
||||||
|
*
|
||||||
|
* Based on https://github.com/github/copilot-sdk
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { execSync } from 'child_process';
|
||||||
|
import * as fs from 'fs/promises';
|
||||||
|
import * as path from 'path';
|
||||||
|
import * as os from 'os';
|
||||||
|
import { CliProvider, type CliSpawnConfig, type CliErrorInfo } from './cli-provider.js';
|
||||||
|
import type {
|
||||||
|
ProviderConfig,
|
||||||
|
ExecuteOptions,
|
||||||
|
ProviderMessage,
|
||||||
|
InstallationStatus,
|
||||||
|
ModelDefinition,
|
||||||
|
} from './types.js';
|
||||||
|
// Note: validateBareModelId is not used because Copilot's bare model IDs
|
||||||
|
// legitimately contain prefixes like claude-, gemini-, gpt-
|
||||||
|
import {
|
||||||
|
COPILOT_MODEL_MAP,
|
||||||
|
type CopilotAuthStatus,
|
||||||
|
type CopilotRuntimeModel,
|
||||||
|
} from '@automaker/types';
|
||||||
|
import { createLogger, isAbortError } from '@automaker/utils';
|
||||||
|
import { CopilotClient, type PermissionRequest } from '@github/copilot-sdk';
|
||||||
|
import {
|
||||||
|
normalizeTodos,
|
||||||
|
normalizeFilePathInput,
|
||||||
|
normalizeCommandInput,
|
||||||
|
normalizePatternInput,
|
||||||
|
} from './tool-normalization.js';
|
||||||
|
|
||||||
|
// Create logger for this module
|
||||||
|
const logger = createLogger('CopilotProvider');
|
||||||
|
|
||||||
|
// Default bare model (without copilot- prefix) for SDK calls
|
||||||
|
const DEFAULT_BARE_MODEL = 'claude-sonnet-4.6';
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// SDK Event Types (from @github/copilot-sdk)
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* SDK session event data types
|
||||||
|
*/
|
||||||
|
interface SdkEvent {
|
||||||
|
type: string;
|
||||||
|
data?: unknown;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface SdkMessageEvent extends SdkEvent {
|
||||||
|
type: 'assistant.message';
|
||||||
|
data: {
|
||||||
|
content: string;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: SdkMessageDeltaEvent is not used - we skip delta events to reduce noise
|
||||||
|
// The final assistant.message event contains the complete content
|
||||||
|
|
||||||
|
interface SdkToolExecutionStartEvent extends SdkEvent {
|
||||||
|
type: 'tool.execution_start';
|
||||||
|
data: {
|
||||||
|
toolName: string;
|
||||||
|
toolCallId: string;
|
||||||
|
input?: Record<string, unknown>;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
interface SdkToolExecutionEndEvent extends SdkEvent {
|
||||||
|
type: 'tool.execution_end';
|
||||||
|
data: {
|
||||||
|
toolName: string;
|
||||||
|
toolCallId: string;
|
||||||
|
result?: string;
|
||||||
|
error?: string;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
interface SdkSessionErrorEvent extends SdkEvent {
|
||||||
|
type: 'session.error';
|
||||||
|
data: {
|
||||||
|
message: string;
|
||||||
|
code?: string;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// Error Codes
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
export enum CopilotErrorCode {
|
||||||
|
NOT_INSTALLED = 'COPILOT_NOT_INSTALLED',
|
||||||
|
NOT_AUTHENTICATED = 'COPILOT_NOT_AUTHENTICATED',
|
||||||
|
RATE_LIMITED = 'COPILOT_RATE_LIMITED',
|
||||||
|
MODEL_UNAVAILABLE = 'COPILOT_MODEL_UNAVAILABLE',
|
||||||
|
NETWORK_ERROR = 'COPILOT_NETWORK_ERROR',
|
||||||
|
PROCESS_CRASHED = 'COPILOT_PROCESS_CRASHED',
|
||||||
|
TIMEOUT = 'COPILOT_TIMEOUT',
|
||||||
|
CLI_ERROR = 'COPILOT_CLI_ERROR',
|
||||||
|
SDK_ERROR = 'COPILOT_SDK_ERROR',
|
||||||
|
UNKNOWN = 'COPILOT_UNKNOWN_ERROR',
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface CopilotError extends Error {
|
||||||
|
code: CopilotErrorCode;
|
||||||
|
recoverable: boolean;
|
||||||
|
suggestion?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// Tool Name Normalization
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Copilot SDK tool name to standard tool name mapping
|
||||||
|
*
|
||||||
|
* Maps Copilot CLI tool names to our standard tool names for consistent UI display.
|
||||||
|
* Tool names are case-insensitive (normalized to lowercase before lookup).
|
||||||
|
*/
|
||||||
|
const COPILOT_TOOL_NAME_MAP: Record<string, string> = {
|
||||||
|
// File operations
|
||||||
|
read_file: 'Read',
|
||||||
|
read: 'Read',
|
||||||
|
view: 'Read', // Copilot uses 'view' for reading files
|
||||||
|
read_many_files: 'Read',
|
||||||
|
write_file: 'Write',
|
||||||
|
write: 'Write',
|
||||||
|
create_file: 'Write',
|
||||||
|
edit_file: 'Edit',
|
||||||
|
edit: 'Edit',
|
||||||
|
replace: 'Edit',
|
||||||
|
patch: 'Edit',
|
||||||
|
// Shell operations
|
||||||
|
run_shell: 'Bash',
|
||||||
|
run_shell_command: 'Bash',
|
||||||
|
shell: 'Bash',
|
||||||
|
bash: 'Bash',
|
||||||
|
execute: 'Bash',
|
||||||
|
terminal: 'Bash',
|
||||||
|
// Search operations
|
||||||
|
search: 'Grep',
|
||||||
|
grep: 'Grep',
|
||||||
|
search_file_content: 'Grep',
|
||||||
|
find_files: 'Glob',
|
||||||
|
glob: 'Glob',
|
||||||
|
list_dir: 'Ls',
|
||||||
|
list_directory: 'Ls',
|
||||||
|
ls: 'Ls',
|
||||||
|
// Web operations
|
||||||
|
web_fetch: 'WebFetch',
|
||||||
|
fetch: 'WebFetch',
|
||||||
|
web_search: 'WebSearch',
|
||||||
|
search_web: 'WebSearch',
|
||||||
|
google_web_search: 'WebSearch',
|
||||||
|
// Todo operations
|
||||||
|
todo_write: 'TodoWrite',
|
||||||
|
write_todos: 'TodoWrite',
|
||||||
|
update_todos: 'TodoWrite',
|
||||||
|
// Planning/intent operations (Copilot-specific)
|
||||||
|
report_intent: 'ReportIntent', // Keep as-is, it's a planning tool
|
||||||
|
think: 'Think',
|
||||||
|
plan: 'Plan',
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize Copilot tool names to standard tool names
|
||||||
|
*/
|
||||||
|
function normalizeCopilotToolName(copilotToolName: string): string {
|
||||||
|
const lowerName = copilotToolName.toLowerCase();
|
||||||
|
return COPILOT_TOOL_NAME_MAP[lowerName] || copilotToolName;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize Copilot tool input parameters to standard format
|
||||||
|
*
|
||||||
|
* Maps Copilot's parameter names to our standard parameter names.
|
||||||
|
* Uses shared utilities from tool-normalization.ts for common normalizations.
|
||||||
|
*/
|
||||||
|
function normalizeCopilotToolInput(
|
||||||
|
toolName: string,
|
||||||
|
input: Record<string, unknown>
|
||||||
|
): Record<string, unknown> {
|
||||||
|
const normalizedName = normalizeCopilotToolName(toolName);
|
||||||
|
|
||||||
|
// Normalize todo_write / write_todos: ensure proper format
|
||||||
|
if (normalizedName === 'TodoWrite' && Array.isArray(input.todos)) {
|
||||||
|
return { todos: normalizeTodos(input.todos) };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize file path parameters for Read/Write/Edit tools
|
||||||
|
if (normalizedName === 'Read' || normalizedName === 'Write' || normalizedName === 'Edit') {
|
||||||
|
return normalizeFilePathInput(input);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize shell command parameters for Bash tool
|
||||||
|
if (normalizedName === 'Bash') {
|
||||||
|
return normalizeCommandInput(input);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize search parameters for Grep tool
|
||||||
|
if (normalizedName === 'Grep') {
|
||||||
|
return normalizePatternInput(input);
|
||||||
|
}
|
||||||
|
|
||||||
|
return input;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* CopilotProvider - Integrates GitHub Copilot SDK as an AI provider
|
||||||
|
*
|
||||||
|
* Features:
|
||||||
|
* - GitHub OAuth authentication
|
||||||
|
* - SDK-based session management
|
||||||
|
* - Runtime model discovery
|
||||||
|
* - Tool call normalization
|
||||||
|
* - Per-execution working directory support
|
||||||
|
*/
|
||||||
|
export class CopilotProvider extends CliProvider {
|
||||||
|
private runtimeModels: CopilotRuntimeModel[] | null = null;
|
||||||
|
|
||||||
|
constructor(config: ProviderConfig = {}) {
|
||||||
|
super(config);
|
||||||
|
// Trigger CLI detection on construction
|
||||||
|
this.ensureCliDetected();
|
||||||
|
}
|
||||||
|
|
||||||
|
// ==========================================================================
|
||||||
|
// CliProvider Abstract Method Implementations
|
||||||
|
// ==========================================================================
|
||||||
|
|
||||||
|
getName(): string {
|
||||||
|
return 'copilot';
|
||||||
|
}
|
||||||
|
|
||||||
|
getCliName(): string {
|
||||||
|
return 'copilot';
|
||||||
|
}
|
||||||
|
|
||||||
|
getSpawnConfig(): CliSpawnConfig {
|
||||||
|
return {
|
||||||
|
windowsStrategy: 'npx', // Copilot CLI can be run via npx
|
||||||
|
npxPackage: '@github/copilot', // Official GitHub Copilot CLI package
|
||||||
|
commonPaths: {
|
||||||
|
linux: [
|
||||||
|
path.join(os.homedir(), '.local/bin/copilot'),
|
||||||
|
'/usr/local/bin/copilot',
|
||||||
|
path.join(os.homedir(), '.npm-global/bin/copilot'),
|
||||||
|
],
|
||||||
|
darwin: [
|
||||||
|
path.join(os.homedir(), '.local/bin/copilot'),
|
||||||
|
'/usr/local/bin/copilot',
|
||||||
|
'/opt/homebrew/bin/copilot',
|
||||||
|
path.join(os.homedir(), '.npm-global/bin/copilot'),
|
||||||
|
],
|
||||||
|
win32: [
|
||||||
|
path.join(os.homedir(), 'AppData', 'Roaming', 'npm', 'copilot.cmd'),
|
||||||
|
path.join(os.homedir(), '.npm-global', 'copilot.cmd'),
|
||||||
|
],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract prompt text from ExecuteOptions
|
||||||
|
*
|
||||||
|
* Note: CopilotProvider does not yet support vision/image inputs.
|
||||||
|
* If non-text content is provided, an error is thrown.
|
||||||
|
*/
|
||||||
|
private extractPromptText(options: ExecuteOptions): string {
|
||||||
|
if (typeof options.prompt === 'string') {
|
||||||
|
return options.prompt;
|
||||||
|
} else if (Array.isArray(options.prompt)) {
|
||||||
|
// Check for non-text content (images, etc.) which we don't support yet
|
||||||
|
const hasNonText = options.prompt.some((p) => p.type !== 'text');
|
||||||
|
if (hasNonText) {
|
||||||
|
throw new Error(
|
||||||
|
'CopilotProvider does not yet support non-text prompt parts (e.g., images). ' +
|
||||||
|
'Please use text-only prompts or switch to a provider that supports vision.'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return options.prompt
|
||||||
|
.filter((p) => p.type === 'text' && p.text)
|
||||||
|
.map((p) => p.text)
|
||||||
|
.join('\n');
|
||||||
|
} else {
|
||||||
|
throw new Error('Invalid prompt format');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Not used with SDK approach - kept for interface compatibility
|
||||||
|
*/
|
||||||
|
buildCliArgs(_options: ExecuteOptions): string[] {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert SDK event to AutoMaker ProviderMessage format
|
||||||
|
*/
|
||||||
|
normalizeEvent(event: unknown): ProviderMessage | null {
|
||||||
|
const sdkEvent = event as SdkEvent;
|
||||||
|
|
||||||
|
switch (sdkEvent.type) {
|
||||||
|
case 'assistant.message': {
|
||||||
|
const messageEvent = sdkEvent as SdkMessageEvent;
|
||||||
|
return {
|
||||||
|
type: 'assistant',
|
||||||
|
message: {
|
||||||
|
role: 'assistant',
|
||||||
|
content: [{ type: 'text', text: messageEvent.data.content }],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
case 'assistant.message_delta': {
|
||||||
|
// Skip delta events - they create too much noise
|
||||||
|
// The final assistant.message event has the complete content
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
case 'tool.execution_start': {
|
||||||
|
const toolEvent = sdkEvent as SdkToolExecutionStartEvent;
|
||||||
|
const normalizedName = normalizeCopilotToolName(toolEvent.data.toolName);
|
||||||
|
const normalizedInput = toolEvent.data.input
|
||||||
|
? normalizeCopilotToolInput(toolEvent.data.toolName, toolEvent.data.input)
|
||||||
|
: {};
|
||||||
|
|
||||||
|
return {
|
||||||
|
type: 'assistant',
|
||||||
|
message: {
|
||||||
|
role: 'assistant',
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'tool_use',
|
||||||
|
name: normalizedName,
|
||||||
|
tool_use_id: toolEvent.data.toolCallId,
|
||||||
|
input: normalizedInput,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
case 'tool.execution_end': {
|
||||||
|
const toolResultEvent = sdkEvent as SdkToolExecutionEndEvent;
|
||||||
|
const isError = !!toolResultEvent.data.error;
|
||||||
|
const content = isError
|
||||||
|
? `[ERROR] ${toolResultEvent.data.error}`
|
||||||
|
: toolResultEvent.data.result || '';
|
||||||
|
|
||||||
|
return {
|
||||||
|
type: 'assistant',
|
||||||
|
message: {
|
||||||
|
role: 'assistant',
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'tool_result',
|
||||||
|
tool_use_id: toolResultEvent.data.toolCallId,
|
||||||
|
content,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
case 'session.idle': {
|
||||||
|
logger.debug('Copilot session idle');
|
||||||
|
return {
|
||||||
|
type: 'result',
|
||||||
|
subtype: 'success',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
case 'session.error': {
|
||||||
|
const errorEvent = sdkEvent as SdkSessionErrorEvent;
|
||||||
|
return {
|
||||||
|
type: 'error',
|
||||||
|
error: errorEvent.data.message || 'Unknown error',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
logger.debug(`Unknown Copilot SDK event type: ${sdkEvent.type}`);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ==========================================================================
|
||||||
|
// CliProvider Overrides
|
||||||
|
// ==========================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Override error mapping for Copilot-specific error codes
|
||||||
|
*/
|
||||||
|
protected mapError(stderr: string, exitCode: number | null): CliErrorInfo {
|
||||||
|
const lower = stderr.toLowerCase();
|
||||||
|
|
||||||
|
if (
|
||||||
|
lower.includes('not authenticated') ||
|
||||||
|
lower.includes('please log in') ||
|
||||||
|
lower.includes('unauthorized') ||
|
||||||
|
lower.includes('login required') ||
|
||||||
|
lower.includes('authentication required') ||
|
||||||
|
lower.includes('github login')
|
||||||
|
) {
|
||||||
|
return {
|
||||||
|
code: CopilotErrorCode.NOT_AUTHENTICATED,
|
||||||
|
message: 'GitHub Copilot is not authenticated',
|
||||||
|
recoverable: true,
|
||||||
|
suggestion: 'Run "gh auth login" or "copilot auth login" to authenticate with GitHub',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
lower.includes('rate limit') ||
|
||||||
|
lower.includes('too many requests') ||
|
||||||
|
lower.includes('429') ||
|
||||||
|
lower.includes('quota exceeded')
|
||||||
|
) {
|
||||||
|
return {
|
||||||
|
code: CopilotErrorCode.RATE_LIMITED,
|
||||||
|
message: 'Copilot API rate limit exceeded',
|
||||||
|
recoverable: true,
|
||||||
|
suggestion: 'Wait a few minutes and try again',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
lower.includes('model not available') ||
|
||||||
|
lower.includes('invalid model') ||
|
||||||
|
lower.includes('unknown model') ||
|
||||||
|
lower.includes('model not found') ||
|
||||||
|
(lower.includes('not found') && lower.includes('404'))
|
||||||
|
) {
|
||||||
|
return {
|
||||||
|
code: CopilotErrorCode.MODEL_UNAVAILABLE,
|
||||||
|
message: 'Requested model is not available',
|
||||||
|
recoverable: true,
|
||||||
|
suggestion: `Try using "${DEFAULT_BARE_MODEL}" or select a different model`,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
lower.includes('network') ||
|
||||||
|
lower.includes('connection') ||
|
||||||
|
lower.includes('econnrefused') ||
|
||||||
|
lower.includes('timeout')
|
||||||
|
) {
|
||||||
|
return {
|
||||||
|
code: CopilotErrorCode.NETWORK_ERROR,
|
||||||
|
message: 'Network connection error',
|
||||||
|
recoverable: true,
|
||||||
|
suggestion: 'Check your internet connection and try again',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (exitCode === 137 || lower.includes('killed') || lower.includes('sigterm')) {
|
||||||
|
return {
|
||||||
|
code: CopilotErrorCode.PROCESS_CRASHED,
|
||||||
|
message: 'Copilot CLI process was terminated',
|
||||||
|
recoverable: true,
|
||||||
|
suggestion: 'The process may have run out of memory. Try a simpler task.',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
code: CopilotErrorCode.UNKNOWN,
|
||||||
|
message: stderr || `Copilot CLI exited with code ${exitCode}`,
|
||||||
|
recoverable: false,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Override install instructions for Copilot-specific guidance
|
||||||
|
*/
|
||||||
|
protected getInstallInstructions(): string {
|
||||||
|
return 'Install with: npm install -g @github/copilot (or visit https://github.com/github/copilot)';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute a prompt using Copilot SDK with real-time streaming
|
||||||
|
*
|
||||||
|
* Creates a new CopilotClient for each execution with the correct working directory.
|
||||||
|
* Streams tool execution events in real-time for UI display.
|
||||||
|
*/
|
||||||
|
async *executeQuery(options: ExecuteOptions): AsyncGenerator<ProviderMessage> {
|
||||||
|
this.ensureCliDetected();
|
||||||
|
|
||||||
|
// Note: We don't use validateBareModelId here because Copilot's model IDs
|
||||||
|
// legitimately contain prefixes like claude-, gemini-, gpt- which are the
|
||||||
|
// actual model names from the Copilot CLI. We only need to ensure the
|
||||||
|
// copilot- prefix has been stripped by the ProviderFactory.
|
||||||
|
if (options.model?.startsWith('copilot-')) {
|
||||||
|
throw new Error(
|
||||||
|
`[CopilotProvider] Model ID should not have 'copilot-' prefix. Got: '${options.model}'. ` +
|
||||||
|
`The ProviderFactory should strip this prefix before passing to the provider.`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this.cliPath) {
|
||||||
|
throw this.createError(
|
||||||
|
CopilotErrorCode.NOT_INSTALLED,
|
||||||
|
'Copilot CLI is not installed',
|
||||||
|
true,
|
||||||
|
this.getInstallInstructions()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const promptText = this.extractPromptText(options);
|
||||||
|
const bareModel = options.model || DEFAULT_BARE_MODEL;
|
||||||
|
const workingDirectory = options.cwd || process.cwd();
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
`CopilotProvider.executeQuery called with model: "${bareModel}", cwd: "${workingDirectory}"`
|
||||||
|
);
|
||||||
|
logger.debug(`Prompt length: ${promptText.length} characters`);
|
||||||
|
|
||||||
|
// Create a client for this execution with the correct working directory
|
||||||
|
const client = new CopilotClient({
|
||||||
|
logLevel: 'warning',
|
||||||
|
autoRestart: false,
|
||||||
|
cwd: workingDirectory,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Use an async queue to bridge callback-based SDK events to async generator
|
||||||
|
const eventQueue: SdkEvent[] = [];
|
||||||
|
let resolveWaiting: (() => void) | null = null;
|
||||||
|
let sessionComplete = false;
|
||||||
|
let sessionError: Error | null = null;
|
||||||
|
|
||||||
|
const pushEvent = (event: SdkEvent) => {
|
||||||
|
eventQueue.push(event);
|
||||||
|
if (resolveWaiting) {
|
||||||
|
resolveWaiting();
|
||||||
|
resolveWaiting = null;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const waitForEvent = (): Promise<void> => {
|
||||||
|
if (eventQueue.length > 0 || sessionComplete) {
|
||||||
|
return Promise.resolve();
|
||||||
|
}
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
resolveWaiting = resolve;
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
try {
|
||||||
|
await client.start();
|
||||||
|
logger.debug(`CopilotClient started with cwd: ${workingDirectory}`);
|
||||||
|
|
||||||
|
// Create session with streaming enabled for real-time events
|
||||||
|
const session = await client.createSession({
|
||||||
|
model: bareModel,
|
||||||
|
streaming: true,
|
||||||
|
// AUTONOMOUS MODE: Auto-approve all permission requests.
|
||||||
|
// AutoMaker is designed for fully autonomous AI agent operation.
|
||||||
|
// Security boundary is provided by Docker containerization (see CLAUDE.md).
|
||||||
|
// User is warned about this at app startup.
|
||||||
|
onPermissionRequest: async (
|
||||||
|
request: PermissionRequest
|
||||||
|
): Promise<{ kind: 'approved' } | { kind: 'denied-interactively-by-user' }> => {
|
||||||
|
logger.debug(`Permission request: ${request.kind}`);
|
||||||
|
return { kind: 'approved' };
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const sessionId = session.sessionId;
|
||||||
|
logger.debug(`Session created: ${sessionId}`);
|
||||||
|
|
||||||
|
// Set up event handler to push events to queue
|
||||||
|
session.on((event: SdkEvent) => {
|
||||||
|
logger.debug(`SDK event: ${event.type}`);
|
||||||
|
|
||||||
|
if (event.type === 'session.idle') {
|
||||||
|
sessionComplete = true;
|
||||||
|
pushEvent(event);
|
||||||
|
} else if (event.type === 'session.error') {
|
||||||
|
const errorEvent = event as SdkSessionErrorEvent;
|
||||||
|
sessionError = new Error(errorEvent.data.message);
|
||||||
|
sessionComplete = true;
|
||||||
|
pushEvent(event);
|
||||||
|
} else {
|
||||||
|
// Push all other events (tool.execution_start, tool.execution_end, assistant.message, etc.)
|
||||||
|
pushEvent(event);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Send the prompt (non-blocking)
|
||||||
|
await session.send({ prompt: promptText });
|
||||||
|
|
||||||
|
// Process events as they arrive
|
||||||
|
while (!sessionComplete || eventQueue.length > 0) {
|
||||||
|
await waitForEvent();
|
||||||
|
|
||||||
|
// Check for errors first (before processing events to avoid race condition)
|
||||||
|
if (sessionError) {
|
||||||
|
await session.destroy();
|
||||||
|
await client.stop();
|
||||||
|
throw sessionError;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process all queued events
|
||||||
|
while (eventQueue.length > 0) {
|
||||||
|
const event = eventQueue.shift()!;
|
||||||
|
const normalized = this.normalizeEvent(event);
|
||||||
|
if (normalized) {
|
||||||
|
// Add session_id if not present
|
||||||
|
if (!normalized.session_id) {
|
||||||
|
normalized.session_id = sessionId;
|
||||||
|
}
|
||||||
|
yield normalized;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
await session.destroy();
|
||||||
|
await client.stop();
|
||||||
|
logger.debug('CopilotClient stopped successfully');
|
||||||
|
} catch (error) {
|
||||||
|
// Ensure client is stopped on error
|
||||||
|
try {
|
||||||
|
await client.stop();
|
||||||
|
} catch (cleanupError) {
|
||||||
|
// Log but don't throw cleanup errors - the original error is more important
|
||||||
|
logger.debug(`Failed to stop client during cleanup: ${cleanupError}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isAbortError(error)) {
|
||||||
|
logger.debug('Query aborted');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map errors to CopilotError
|
||||||
|
if (error instanceof Error) {
|
||||||
|
logger.error(`Copilot SDK error: ${error.message}`);
|
||||||
|
const errorInfo = this.mapError(error.message, null);
|
||||||
|
throw this.createError(
|
||||||
|
errorInfo.code as CopilotErrorCode,
|
||||||
|
errorInfo.message,
|
||||||
|
errorInfo.recoverable,
|
||||||
|
errorInfo.suggestion
|
||||||
|
);
|
||||||
|
}
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ==========================================================================
|
||||||
|
// Copilot-Specific Methods
|
||||||
|
// ==========================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a CopilotError with details
|
||||||
|
*/
|
||||||
|
private createError(
|
||||||
|
code: CopilotErrorCode,
|
||||||
|
message: string,
|
||||||
|
recoverable: boolean = false,
|
||||||
|
suggestion?: string
|
||||||
|
): CopilotError {
|
||||||
|
const error = new Error(message) as CopilotError;
|
||||||
|
error.code = code;
|
||||||
|
error.recoverable = recoverable;
|
||||||
|
error.suggestion = suggestion;
|
||||||
|
error.name = 'CopilotError';
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get Copilot CLI version
|
||||||
|
*/
|
||||||
|
async getVersion(): Promise<string | null> {
|
||||||
|
this.ensureCliDetected();
|
||||||
|
if (!this.cliPath) return null;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const result = execSync(`"${this.cliPath}" --version`, {
|
||||||
|
encoding: 'utf8',
|
||||||
|
timeout: 5000,
|
||||||
|
stdio: 'pipe',
|
||||||
|
}).trim();
|
||||||
|
return result;
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check authentication status
|
||||||
|
*
|
||||||
|
* Uses GitHub CLI (gh) to check Copilot authentication status.
|
||||||
|
* The Copilot CLI relies on gh auth for authentication.
|
||||||
|
*/
|
||||||
|
async checkAuth(): Promise<CopilotAuthStatus> {
|
||||||
|
this.ensureCliDetected();
|
||||||
|
if (!this.cliPath) {
|
||||||
|
logger.debug('checkAuth: CLI not found');
|
||||||
|
return { authenticated: false, method: 'none' };
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug('checkAuth: Starting credential check');
|
||||||
|
|
||||||
|
// Try to check GitHub CLI authentication status first
|
||||||
|
// The Copilot CLI uses gh auth for authentication
|
||||||
|
try {
|
||||||
|
const ghStatus = execSync('gh auth status --hostname github.com', {
|
||||||
|
encoding: 'utf8',
|
||||||
|
timeout: 10000,
|
||||||
|
stdio: 'pipe',
|
||||||
|
});
|
||||||
|
|
||||||
|
logger.debug(`checkAuth: gh auth status output: ${ghStatus.substring(0, 200)}`);
|
||||||
|
|
||||||
|
// Parse gh auth status output
|
||||||
|
const loggedInMatch = ghStatus.match(/Logged in to github\.com account (\S+)/);
|
||||||
|
if (loggedInMatch) {
|
||||||
|
return {
|
||||||
|
authenticated: true,
|
||||||
|
method: 'oauth',
|
||||||
|
login: loggedInMatch[1],
|
||||||
|
host: 'github.com',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for token auth
|
||||||
|
if (ghStatus.includes('Logged in') || ghStatus.includes('Token:')) {
|
||||||
|
return {
|
||||||
|
authenticated: true,
|
||||||
|
method: 'oauth',
|
||||||
|
host: 'github.com',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} catch (ghError) {
|
||||||
|
logger.debug(`checkAuth: gh auth status failed: ${ghError}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try Copilot-specific auth check if gh is not available
|
||||||
|
try {
|
||||||
|
const result = execSync(`"${this.cliPath}" auth status`, {
|
||||||
|
encoding: 'utf8',
|
||||||
|
timeout: 10000,
|
||||||
|
stdio: 'pipe',
|
||||||
|
});
|
||||||
|
|
||||||
|
logger.debug(`checkAuth: copilot auth status output: ${result.substring(0, 200)}`);
|
||||||
|
|
||||||
|
if (result.includes('authenticated') || result.includes('logged in')) {
|
||||||
|
return {
|
||||||
|
authenticated: true,
|
||||||
|
method: 'cli',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} catch (copilotError) {
|
||||||
|
logger.debug(`checkAuth: copilot auth status failed: ${copilotError}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for GITHUB_TOKEN environment variable
|
||||||
|
if (process.env.GITHUB_TOKEN) {
|
||||||
|
logger.debug('checkAuth: Found GITHUB_TOKEN environment variable');
|
||||||
|
return {
|
||||||
|
authenticated: true,
|
||||||
|
method: 'oauth',
|
||||||
|
statusMessage: 'Using GITHUB_TOKEN environment variable',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for gh config file
|
||||||
|
const ghConfigPath = path.join(os.homedir(), '.config', 'gh', 'hosts.yml');
|
||||||
|
try {
|
||||||
|
await fs.access(ghConfigPath);
|
||||||
|
const content = await fs.readFile(ghConfigPath, 'utf8');
|
||||||
|
if (content.includes('github.com') && content.includes('oauth_token')) {
|
||||||
|
logger.debug('checkAuth: Found gh config with oauth_token');
|
||||||
|
return {
|
||||||
|
authenticated: true,
|
||||||
|
method: 'oauth',
|
||||||
|
host: 'github.com',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
logger.debug('checkAuth: No gh config found');
|
||||||
|
}
|
||||||
|
|
||||||
|
// No credentials found
|
||||||
|
logger.debug('checkAuth: No valid credentials found');
|
||||||
|
return {
|
||||||
|
authenticated: false,
|
||||||
|
method: 'none',
|
||||||
|
error:
|
||||||
|
'No authentication configured. Run "gh auth login" or install GitHub Copilot extension.',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetch available models from the CLI at runtime
|
||||||
|
*/
|
||||||
|
async fetchRuntimeModels(): Promise<CopilotRuntimeModel[]> {
|
||||||
|
this.ensureCliDetected();
|
||||||
|
if (!this.cliPath) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Try to list models using the CLI
|
||||||
|
const result = execSync(`"${this.cliPath}" models list --format json`, {
|
||||||
|
encoding: 'utf8',
|
||||||
|
timeout: 15000,
|
||||||
|
stdio: 'pipe',
|
||||||
|
});
|
||||||
|
|
||||||
|
const models = JSON.parse(result) as CopilotRuntimeModel[];
|
||||||
|
this.runtimeModels = models;
|
||||||
|
logger.debug(`Fetched ${models.length} runtime models from Copilot CLI`);
|
||||||
|
return models;
|
||||||
|
} catch (error) {
|
||||||
|
// Clear cache on failure to avoid returning stale data
|
||||||
|
this.runtimeModels = null;
|
||||||
|
logger.debug(`Failed to fetch runtime models: ${error}`);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Detect installation status (required by BaseProvider)
|
||||||
|
*/
|
||||||
|
async detectInstallation(): Promise<InstallationStatus> {
|
||||||
|
const installed = await this.isInstalled();
|
||||||
|
const version = installed ? await this.getVersion() : undefined;
|
||||||
|
const auth = await this.checkAuth();
|
||||||
|
|
||||||
|
return {
|
||||||
|
installed,
|
||||||
|
version: version || undefined,
|
||||||
|
path: this.cliPath || undefined,
|
||||||
|
method: 'cli',
|
||||||
|
authenticated: auth.authenticated,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the detected CLI path (public accessor for status endpoints)
|
||||||
|
*/
|
||||||
|
getCliPath(): string | null {
|
||||||
|
this.ensureCliDetected();
|
||||||
|
return this.cliPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get available Copilot models
|
||||||
|
*
|
||||||
|
* Returns both static model definitions and runtime-discovered models
|
||||||
|
*/
|
||||||
|
getAvailableModels(): ModelDefinition[] {
|
||||||
|
// Start with static model definitions - explicitly typed to allow runtime models
|
||||||
|
const staticModels: ModelDefinition[] = Object.entries(COPILOT_MODEL_MAP).map(
|
||||||
|
([id, config]) => ({
|
||||||
|
id, // Full model ID with copilot- prefix
|
||||||
|
name: config.label,
|
||||||
|
modelString: id.replace('copilot-', ''), // Bare model for CLI
|
||||||
|
provider: 'copilot',
|
||||||
|
description: config.description,
|
||||||
|
supportsTools: config.supportsTools,
|
||||||
|
supportsVision: config.supportsVision,
|
||||||
|
contextWindow: config.contextWindow,
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
// Add runtime models if available (discovered via CLI)
|
||||||
|
if (this.runtimeModels) {
|
||||||
|
for (const runtimeModel of this.runtimeModels) {
|
||||||
|
// Skip if already in static list
|
||||||
|
const staticId = `copilot-${runtimeModel.id}`;
|
||||||
|
if (staticModels.some((m) => m.id === staticId)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
staticModels.push({
|
||||||
|
id: staticId,
|
||||||
|
name: runtimeModel.name || runtimeModel.id,
|
||||||
|
modelString: runtimeModel.id,
|
||||||
|
provider: 'copilot',
|
||||||
|
description: `Dynamic model: ${runtimeModel.name || runtimeModel.id}`,
|
||||||
|
supportsTools: true,
|
||||||
|
supportsVision: runtimeModel.capabilities?.supportsVision ?? false,
|
||||||
|
contextWindow: runtimeModel.capabilities?.maxInputTokens,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return staticModels;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a feature is supported
|
||||||
|
*
|
||||||
|
* Note: Vision is NOT currently supported - the SDK doesn't handle image inputs yet.
|
||||||
|
* This may change in future versions of the Copilot SDK.
|
||||||
|
*/
|
||||||
|
supportsFeature(feature: string): boolean {
|
||||||
|
const supported = ['tools', 'text', 'streaming'];
|
||||||
|
return supported.includes(feature);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if runtime models have been cached
|
||||||
|
*/
|
||||||
|
hasCachedModels(): boolean {
|
||||||
|
return this.runtimeModels !== null && this.runtimeModels.length > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear the runtime model cache
|
||||||
|
*/
|
||||||
|
clearModelCache(): void {
|
||||||
|
this.runtimeModels = null;
|
||||||
|
logger.debug('Cleared Copilot model cache');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Refresh models from CLI and return all available models
|
||||||
|
*/
|
||||||
|
async refreshModels(): Promise<ModelDefinition[]> {
|
||||||
|
logger.debug('Refreshing Copilot models from CLI');
|
||||||
|
await this.fetchRuntimeModels();
|
||||||
|
return this.getAvailableModels();
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -44,7 +44,7 @@ export class CursorConfigManager {
|
|||||||
|
|
||||||
// Return default config with all available models
|
// Return default config with all available models
|
||||||
return {
|
return {
|
||||||
defaultModel: 'auto',
|
defaultModel: 'cursor-auto',
|
||||||
models: getAllCursorModelIds(),
|
models: getAllCursorModelIds(),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -77,7 +77,7 @@ export class CursorConfigManager {
|
|||||||
* Get the default model
|
* Get the default model
|
||||||
*/
|
*/
|
||||||
getDefaultModel(): CursorModelId {
|
getDefaultModel(): CursorModelId {
|
||||||
return this.config.defaultModel || 'auto';
|
return this.config.defaultModel || 'cursor-auto';
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -93,7 +93,7 @@ export class CursorConfigManager {
|
|||||||
* Get enabled models
|
* Get enabled models
|
||||||
*/
|
*/
|
||||||
getEnabledModels(): CursorModelId[] {
|
getEnabledModels(): CursorModelId[] {
|
||||||
return this.config.models || ['auto'];
|
return this.config.models || ['cursor-auto'];
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -174,7 +174,7 @@ export class CursorConfigManager {
|
|||||||
*/
|
*/
|
||||||
reset(): void {
|
reset(): void {
|
||||||
this.config = {
|
this.config = {
|
||||||
defaultModel: 'auto',
|
defaultModel: 'cursor-auto',
|
||||||
models: getAllCursorModelIds(),
|
models: getAllCursorModelIds(),
|
||||||
};
|
};
|
||||||
this.saveConfig();
|
this.saveConfig();
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import { execSync } from 'child_process';
|
|||||||
import * as fs from 'fs';
|
import * as fs from 'fs';
|
||||||
import * as path from 'path';
|
import * as path from 'path';
|
||||||
import * as os from 'os';
|
import * as os from 'os';
|
||||||
|
import { findCliInWsl, isWslAvailable } from '@automaker/platform';
|
||||||
import {
|
import {
|
||||||
CliProvider,
|
CliProvider,
|
||||||
type CliSpawnConfig,
|
type CliSpawnConfig,
|
||||||
@@ -30,7 +31,7 @@ import type {
|
|||||||
} from './types.js';
|
} from './types.js';
|
||||||
import { validateBareModelId } from '@automaker/types';
|
import { validateBareModelId } from '@automaker/types';
|
||||||
import { validateApiKey } from '../lib/auth-utils.js';
|
import { validateApiKey } from '../lib/auth-utils.js';
|
||||||
import { getEffectivePermissions } from '../services/cursor-config-service.js';
|
import { getEffectivePermissions, detectProfile } from '../services/cursor-config-service.js';
|
||||||
import {
|
import {
|
||||||
type CursorStreamEvent,
|
type CursorStreamEvent,
|
||||||
type CursorSystemEvent,
|
type CursorSystemEvent,
|
||||||
@@ -68,6 +69,7 @@ interface CursorToolHandler<TArgs = unknown, TResult = unknown> {
|
|||||||
* Registry of Cursor tool handlers
|
* Registry of Cursor tool handlers
|
||||||
* Each handler knows how to normalize its specific tool call type
|
* Each handler knows how to normalize its specific tool call type
|
||||||
*/
|
*/
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any -- handler registry stores heterogeneous tool type parameters
|
||||||
const CURSOR_TOOL_HANDLERS: Record<string, CursorToolHandler<any, any>> = {
|
const CURSOR_TOOL_HANDLERS: Record<string, CursorToolHandler<any, any>> = {
|
||||||
readToolCall: {
|
readToolCall: {
|
||||||
name: 'Read',
|
name: 'Read',
|
||||||
@@ -286,15 +288,113 @@ export class CursorProvider extends CliProvider {
|
|||||||
|
|
||||||
getSpawnConfig(): CliSpawnConfig {
|
getSpawnConfig(): CliSpawnConfig {
|
||||||
return {
|
return {
|
||||||
windowsStrategy: 'wsl', // cursor-agent requires WSL on Windows
|
windowsStrategy: 'direct',
|
||||||
commonPaths: {
|
commonPaths: {
|
||||||
linux: [
|
linux: [
|
||||||
path.join(os.homedir(), '.local/bin/cursor-agent'), // Primary symlink location
|
path.join(os.homedir(), '.local/bin/cursor-agent'), // Primary symlink location
|
||||||
'/usr/local/bin/cursor-agent',
|
'/usr/local/bin/cursor-agent',
|
||||||
],
|
],
|
||||||
darwin: [path.join(os.homedir(), '.local/bin/cursor-agent'), '/usr/local/bin/cursor-agent'],
|
darwin: [path.join(os.homedir(), '.local/bin/cursor-agent'), '/usr/local/bin/cursor-agent'],
|
||||||
// Windows paths are not used - we check for WSL installation instead
|
win32: [
|
||||||
win32: [],
|
path.join(
|
||||||
|
process.env.LOCALAPPDATA || path.join(os.homedir(), 'AppData', 'Local'),
|
||||||
|
'Programs',
|
||||||
|
'Cursor',
|
||||||
|
'resources',
|
||||||
|
'app',
|
||||||
|
'bin',
|
||||||
|
'cursor-agent.exe'
|
||||||
|
),
|
||||||
|
path.join(
|
||||||
|
process.env.LOCALAPPDATA || path.join(os.homedir(), 'AppData', 'Local'),
|
||||||
|
'Programs',
|
||||||
|
'Cursor',
|
||||||
|
'resources',
|
||||||
|
'app',
|
||||||
|
'bin',
|
||||||
|
'cursor-agent.cmd'
|
||||||
|
),
|
||||||
|
path.join(
|
||||||
|
process.env.LOCALAPPDATA || path.join(os.homedir(), 'AppData', 'Local'),
|
||||||
|
'Programs',
|
||||||
|
'Cursor',
|
||||||
|
'resources',
|
||||||
|
'app',
|
||||||
|
'bin',
|
||||||
|
'cursor.exe'
|
||||||
|
),
|
||||||
|
path.join(
|
||||||
|
process.env.LOCALAPPDATA || path.join(os.homedir(), 'AppData', 'Local'),
|
||||||
|
'Programs',
|
||||||
|
'Cursor',
|
||||||
|
'cursor.exe'
|
||||||
|
),
|
||||||
|
path.join(
|
||||||
|
process.env.LOCALAPPDATA || path.join(os.homedir(), 'AppData', 'Local'),
|
||||||
|
'Programs',
|
||||||
|
'cursor',
|
||||||
|
'resources',
|
||||||
|
'app',
|
||||||
|
'bin',
|
||||||
|
'cursor-agent.exe'
|
||||||
|
),
|
||||||
|
path.join(
|
||||||
|
process.env.LOCALAPPDATA || path.join(os.homedir(), 'AppData', 'Local'),
|
||||||
|
'Programs',
|
||||||
|
'cursor',
|
||||||
|
'resources',
|
||||||
|
'app',
|
||||||
|
'bin',
|
||||||
|
'cursor-agent.cmd'
|
||||||
|
),
|
||||||
|
path.join(
|
||||||
|
process.env.LOCALAPPDATA || path.join(os.homedir(), 'AppData', 'Local'),
|
||||||
|
'Programs',
|
||||||
|
'cursor',
|
||||||
|
'resources',
|
||||||
|
'app',
|
||||||
|
'bin',
|
||||||
|
'cursor.exe'
|
||||||
|
),
|
||||||
|
path.join(
|
||||||
|
process.env.LOCALAPPDATA || path.join(os.homedir(), 'AppData', 'Local'),
|
||||||
|
'Programs',
|
||||||
|
'cursor',
|
||||||
|
'cursor.exe'
|
||||||
|
),
|
||||||
|
path.join(
|
||||||
|
process.env.APPDATA || path.join(os.homedir(), 'AppData', 'Roaming'),
|
||||||
|
'npm',
|
||||||
|
'cursor-agent.cmd'
|
||||||
|
),
|
||||||
|
path.join(
|
||||||
|
process.env.APPDATA || path.join(os.homedir(), 'AppData', 'Roaming'),
|
||||||
|
'npm',
|
||||||
|
'cursor.cmd'
|
||||||
|
),
|
||||||
|
path.join(
|
||||||
|
process.env.APPDATA || path.join(os.homedir(), 'AppData', 'Roaming'),
|
||||||
|
'.npm-global',
|
||||||
|
'bin',
|
||||||
|
'cursor-agent.cmd'
|
||||||
|
),
|
||||||
|
path.join(
|
||||||
|
process.env.APPDATA || path.join(os.homedir(), 'AppData', 'Roaming'),
|
||||||
|
'.npm-global',
|
||||||
|
'bin',
|
||||||
|
'cursor.cmd'
|
||||||
|
),
|
||||||
|
path.join(
|
||||||
|
process.env.LOCALAPPDATA || path.join(os.homedir(), 'AppData', 'Local'),
|
||||||
|
'pnpm',
|
||||||
|
'cursor-agent.cmd'
|
||||||
|
),
|
||||||
|
path.join(
|
||||||
|
process.env.LOCALAPPDATA || path.join(os.homedir(), 'AppData', 'Local'),
|
||||||
|
'pnpm',
|
||||||
|
'cursor.cmd'
|
||||||
|
),
|
||||||
|
],
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -337,10 +437,11 @@ export class CursorProvider extends CliProvider {
|
|||||||
'--stream-partial-output' // Real-time streaming
|
'--stream-partial-output' // Real-time streaming
|
||||||
);
|
);
|
||||||
|
|
||||||
// Only add --force if NOT in read-only mode
|
// In read-only mode, use --mode ask for Q&A style (no tools)
|
||||||
// Without --force, Cursor CLI suggests changes but doesn't apply them
|
// Otherwise, add --force to allow file edits
|
||||||
// With --force, Cursor CLI can actually edit files
|
if (options.readOnly) {
|
||||||
if (!options.readOnly) {
|
cliArgs.push('--mode', 'ask');
|
||||||
|
} else {
|
||||||
cliArgs.push('--force');
|
cliArgs.push('--force');
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -486,6 +587,92 @@ export class CursorProvider extends CliProvider {
|
|||||||
* 2. Cursor IDE with 'cursor agent' subcommand support
|
* 2. Cursor IDE with 'cursor agent' subcommand support
|
||||||
*/
|
*/
|
||||||
protected detectCli(): CliDetectionResult {
|
protected detectCli(): CliDetectionResult {
|
||||||
|
if (process.platform === 'win32') {
|
||||||
|
const findInPath = (command: string): string | null => {
|
||||||
|
try {
|
||||||
|
const result = execSync(`where ${command}`, {
|
||||||
|
encoding: 'utf8',
|
||||||
|
timeout: 5000,
|
||||||
|
stdio: ['pipe', 'pipe', 'pipe'],
|
||||||
|
windowsHide: true,
|
||||||
|
})
|
||||||
|
.trim()
|
||||||
|
.split(/\r?\n/)[0];
|
||||||
|
|
||||||
|
if (result && fs.existsSync(result)) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Not in PATH
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
};
|
||||||
|
|
||||||
|
const isCursorAgentBinary = (cliPath: string) =>
|
||||||
|
cliPath.toLowerCase().includes('cursor-agent');
|
||||||
|
|
||||||
|
const supportsCursorAgentSubcommand = (cliPath: string) => {
|
||||||
|
try {
|
||||||
|
execSync(`"${cliPath}" agent --version`, {
|
||||||
|
encoding: 'utf8',
|
||||||
|
timeout: 5000,
|
||||||
|
stdio: 'pipe',
|
||||||
|
windowsHide: true,
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const pathResult = findInPath('cursor-agent') || findInPath('cursor');
|
||||||
|
if (pathResult) {
|
||||||
|
if (isCursorAgentBinary(pathResult) || supportsCursorAgentSubcommand(pathResult)) {
|
||||||
|
return {
|
||||||
|
cliPath: pathResult,
|
||||||
|
useWsl: false,
|
||||||
|
strategy: pathResult.toLowerCase().endsWith('.cmd') ? 'cmd' : 'direct',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const config = this.getSpawnConfig();
|
||||||
|
for (const candidate of config.commonPaths.win32 || []) {
|
||||||
|
const resolved = candidate;
|
||||||
|
if (!fs.existsSync(resolved)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (isCursorAgentBinary(resolved) || supportsCursorAgentSubcommand(resolved)) {
|
||||||
|
return {
|
||||||
|
cliPath: resolved,
|
||||||
|
useWsl: false,
|
||||||
|
strategy: resolved.toLowerCase().endsWith('.cmd') ? 'cmd' : 'direct',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const wslLogger = (msg: string) => logger.debug(msg);
|
||||||
|
if (isWslAvailable({ logger: wslLogger })) {
|
||||||
|
const wslResult = findCliInWsl('cursor-agent', { logger: wslLogger });
|
||||||
|
if (wslResult) {
|
||||||
|
logger.debug(
|
||||||
|
`Using cursor-agent via WSL (${wslResult.distribution || 'default'}): ${wslResult.wslPath}`
|
||||||
|
);
|
||||||
|
return {
|
||||||
|
cliPath: 'wsl.exe',
|
||||||
|
useWsl: true,
|
||||||
|
wslCliPath: wslResult.wslPath,
|
||||||
|
wslDistribution: wslResult.distribution,
|
||||||
|
strategy: 'wsl',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug('cursor-agent not found on Windows');
|
||||||
|
return { cliPath: null, useWsl: false, strategy: 'direct' };
|
||||||
|
}
|
||||||
|
|
||||||
// First try standard detection (PATH, common paths, WSL)
|
// First try standard detection (PATH, common paths, WSL)
|
||||||
const result = super.detectCli();
|
const result = super.detectCli();
|
||||||
if (result.cliPath) {
|
if (result.cliPath) {
|
||||||
@@ -494,7 +681,7 @@ export class CursorProvider extends CliProvider {
|
|||||||
|
|
||||||
// Cursor-specific: Check versions directory for any installed version
|
// Cursor-specific: Check versions directory for any installed version
|
||||||
// This handles cases where cursor-agent is installed but not in PATH
|
// This handles cases where cursor-agent is installed but not in PATH
|
||||||
if (process.platform !== 'win32' && fs.existsSync(CursorProvider.VERSIONS_DIR)) {
|
if (fs.existsSync(CursorProvider.VERSIONS_DIR)) {
|
||||||
try {
|
try {
|
||||||
const versions = fs
|
const versions = fs
|
||||||
.readdirSync(CursorProvider.VERSIONS_DIR)
|
.readdirSync(CursorProvider.VERSIONS_DIR)
|
||||||
@@ -520,7 +707,6 @@ export class CursorProvider extends CliProvider {
|
|||||||
|
|
||||||
// If cursor-agent not found, try to find 'cursor' IDE and use 'cursor agent' subcommand
|
// If cursor-agent not found, try to find 'cursor' IDE and use 'cursor agent' subcommand
|
||||||
// The Cursor IDE includes the agent as a subcommand: cursor agent
|
// The Cursor IDE includes the agent as a subcommand: cursor agent
|
||||||
if (process.platform !== 'win32') {
|
|
||||||
const cursorPaths = [
|
const cursorPaths = [
|
||||||
'/usr/bin/cursor',
|
'/usr/bin/cursor',
|
||||||
'/usr/local/bin/cursor',
|
'/usr/local/bin/cursor',
|
||||||
@@ -549,7 +735,6 @@ export class CursorProvider extends CliProvider {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@@ -672,10 +857,13 @@ export class CursorProvider extends CliProvider {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract prompt text to pass via stdin (avoids shell escaping issues)
|
// Embed system prompt into user prompt (Cursor CLI doesn't support separate system messages)
|
||||||
const promptText = this.extractPromptText(options);
|
const effectiveOptions = this.embedSystemPromptIntoPrompt(options);
|
||||||
|
|
||||||
const cliArgs = this.buildCliArgs(options);
|
// Extract prompt text to pass via stdin (avoids shell escaping issues)
|
||||||
|
const promptText = this.extractPromptText(effectiveOptions);
|
||||||
|
|
||||||
|
const cliArgs = this.buildCliArgs(effectiveOptions);
|
||||||
const subprocessOptions = this.buildSubprocessOptions(options, cliArgs);
|
const subprocessOptions = this.buildSubprocessOptions(options, cliArgs);
|
||||||
|
|
||||||
// Pass prompt via stdin to avoid shell interpretation of special characters
|
// Pass prompt via stdin to avoid shell interpretation of special characters
|
||||||
@@ -690,8 +878,12 @@ export class CursorProvider extends CliProvider {
|
|||||||
|
|
||||||
logger.debug(`CursorProvider.executeQuery called with model: "${options.model}"`);
|
logger.debug(`CursorProvider.executeQuery called with model: "${options.model}"`);
|
||||||
|
|
||||||
// Get effective permissions for this project
|
// Get effective permissions for this project and detect the active profile
|
||||||
const effectivePermissions = await getEffectivePermissions(options.cwd || process.cwd());
|
const effectivePermissions = await getEffectivePermissions(options.cwd || process.cwd());
|
||||||
|
const activeProfile = detectProfile(effectivePermissions);
|
||||||
|
logger.debug(
|
||||||
|
`Active permission profile: ${activeProfile ?? 'none'}, permissions: ${JSON.stringify(effectivePermissions)}`
|
||||||
|
);
|
||||||
|
|
||||||
// Debug: log raw events when AUTOMAKER_DEBUG_RAW_OUTPUT is enabled
|
// Debug: log raw events when AUTOMAKER_DEBUG_RAW_OUTPUT is enabled
|
||||||
const debugRawEvents =
|
const debugRawEvents =
|
||||||
|
|||||||
809
apps/server/src/providers/gemini-provider.ts
Normal file
809
apps/server/src/providers/gemini-provider.ts
Normal file
@@ -0,0 +1,809 @@
|
|||||||
|
/**
|
||||||
|
* Gemini Provider - Executes queries using the Gemini CLI
|
||||||
|
*
|
||||||
|
* Extends CliProvider with Gemini-specific:
|
||||||
|
* - Event normalization for Gemini's JSONL streaming format
|
||||||
|
* - Google account and API key authentication support
|
||||||
|
* - Thinking level configuration
|
||||||
|
*
|
||||||
|
* Based on https://github.com/google-gemini/gemini-cli
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { execSync } from 'child_process';
|
||||||
|
import * as fs from 'fs/promises';
|
||||||
|
import * as path from 'path';
|
||||||
|
import * as os from 'os';
|
||||||
|
import { CliProvider, type CliSpawnConfig, type CliErrorInfo } from './cli-provider.js';
|
||||||
|
import type {
|
||||||
|
ProviderConfig,
|
||||||
|
ExecuteOptions,
|
||||||
|
ProviderMessage,
|
||||||
|
InstallationStatus,
|
||||||
|
ModelDefinition,
|
||||||
|
} from './types.js';
|
||||||
|
import { validateBareModelId } from '@automaker/types';
|
||||||
|
import { GEMINI_MODEL_MAP, type GeminiAuthStatus } from '@automaker/types';
|
||||||
|
import { createLogger, isAbortError } from '@automaker/utils';
|
||||||
|
import { spawnJSONLProcess } from '@automaker/platform';
|
||||||
|
import { normalizeTodos } from './tool-normalization.js';
|
||||||
|
|
||||||
|
// Create logger for this module
|
||||||
|
const logger = createLogger('GeminiProvider');
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// Gemini Stream Event Types
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base event structure from Gemini CLI --output-format stream-json
|
||||||
|
*
|
||||||
|
* Actual CLI output format:
|
||||||
|
* {"type":"init","timestamp":"...","session_id":"...","model":"..."}
|
||||||
|
* {"type":"message","timestamp":"...","role":"user","content":"..."}
|
||||||
|
* {"type":"message","timestamp":"...","role":"assistant","content":"...","delta":true}
|
||||||
|
* {"type":"tool_use","timestamp":"...","tool_name":"...","tool_id":"...","parameters":{...}}
|
||||||
|
* {"type":"tool_result","timestamp":"...","tool_id":"...","status":"success","output":"..."}
|
||||||
|
* {"type":"result","timestamp":"...","status":"success","stats":{...}}
|
||||||
|
*/
|
||||||
|
interface GeminiStreamEvent {
|
||||||
|
type: 'init' | 'message' | 'tool_use' | 'tool_result' | 'result' | 'error';
|
||||||
|
timestamp?: string;
|
||||||
|
session_id?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface GeminiInitEvent extends GeminiStreamEvent {
|
||||||
|
type: 'init';
|
||||||
|
session_id: string;
|
||||||
|
model: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface GeminiMessageEvent extends GeminiStreamEvent {
|
||||||
|
type: 'message';
|
||||||
|
role: 'user' | 'assistant';
|
||||||
|
content: string;
|
||||||
|
delta?: boolean;
|
||||||
|
session_id?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface GeminiToolUseEvent extends GeminiStreamEvent {
|
||||||
|
type: 'tool_use';
|
||||||
|
tool_id: string;
|
||||||
|
tool_name: string;
|
||||||
|
parameters: Record<string, unknown>;
|
||||||
|
session_id?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface GeminiToolResultEvent extends GeminiStreamEvent {
|
||||||
|
type: 'tool_result';
|
||||||
|
tool_id: string;
|
||||||
|
status: 'success' | 'error';
|
||||||
|
output: string;
|
||||||
|
session_id?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface GeminiResultEvent extends GeminiStreamEvent {
|
||||||
|
type: 'result';
|
||||||
|
status: 'success' | 'error';
|
||||||
|
stats?: {
|
||||||
|
total_tokens?: number;
|
||||||
|
input_tokens?: number;
|
||||||
|
output_tokens?: number;
|
||||||
|
cached?: number;
|
||||||
|
input?: number;
|
||||||
|
duration_ms?: number;
|
||||||
|
tool_calls?: number;
|
||||||
|
};
|
||||||
|
error?: string;
|
||||||
|
session_id?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// Error Codes
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
export enum GeminiErrorCode {
|
||||||
|
NOT_INSTALLED = 'GEMINI_NOT_INSTALLED',
|
||||||
|
NOT_AUTHENTICATED = 'GEMINI_NOT_AUTHENTICATED',
|
||||||
|
RATE_LIMITED = 'GEMINI_RATE_LIMITED',
|
||||||
|
MODEL_UNAVAILABLE = 'GEMINI_MODEL_UNAVAILABLE',
|
||||||
|
NETWORK_ERROR = 'GEMINI_NETWORK_ERROR',
|
||||||
|
PROCESS_CRASHED = 'GEMINI_PROCESS_CRASHED',
|
||||||
|
TIMEOUT = 'GEMINI_TIMEOUT',
|
||||||
|
UNKNOWN = 'GEMINI_UNKNOWN_ERROR',
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface GeminiError extends Error {
|
||||||
|
code: GeminiErrorCode;
|
||||||
|
recoverable: boolean;
|
||||||
|
suggestion?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// Tool Name Normalization
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gemini CLI tool name to standard tool name mapping
|
||||||
|
* This allows the UI to properly categorize and display Gemini tool calls
|
||||||
|
*/
|
||||||
|
const GEMINI_TOOL_NAME_MAP: Record<string, string> = {
|
||||||
|
write_todos: 'TodoWrite',
|
||||||
|
read_file: 'Read',
|
||||||
|
read_many_files: 'Read',
|
||||||
|
replace: 'Edit',
|
||||||
|
write_file: 'Write',
|
||||||
|
run_shell_command: 'Bash',
|
||||||
|
search_file_content: 'Grep',
|
||||||
|
glob: 'Glob',
|
||||||
|
list_directory: 'Ls',
|
||||||
|
web_fetch: 'WebFetch',
|
||||||
|
google_web_search: 'WebSearch',
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize Gemini tool names to standard tool names
|
||||||
|
*/
|
||||||
|
function normalizeGeminiToolName(geminiToolName: string): string {
|
||||||
|
return GEMINI_TOOL_NAME_MAP[geminiToolName] || geminiToolName;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize Gemini tool input parameters to standard format
|
||||||
|
*
|
||||||
|
* Uses shared normalizeTodos utility for consistent todo normalization.
|
||||||
|
*
|
||||||
|
* Gemini `write_todos` format:
|
||||||
|
* {"todos": [{"description": "Task text", "status": "pending|in_progress|completed|cancelled"}]}
|
||||||
|
*
|
||||||
|
* Claude `TodoWrite` format:
|
||||||
|
* {"todos": [{"content": "Task text", "status": "pending|in_progress|completed", "activeForm": "..."}]}
|
||||||
|
*/
|
||||||
|
function normalizeGeminiToolInput(
|
||||||
|
toolName: string,
|
||||||
|
input: Record<string, unknown>
|
||||||
|
): Record<string, unknown> {
|
||||||
|
// Normalize write_todos using shared utility
|
||||||
|
if (toolName === 'write_todos' && Array.isArray(input.todos)) {
|
||||||
|
return { todos: normalizeTodos(input.todos) };
|
||||||
|
}
|
||||||
|
return input;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* GeminiProvider - Integrates Gemini CLI as an AI provider
|
||||||
|
*
|
||||||
|
* Features:
|
||||||
|
* - Google account OAuth login support
|
||||||
|
* - API key authentication (GEMINI_API_KEY)
|
||||||
|
* - Vertex AI support
|
||||||
|
* - Thinking level configuration
|
||||||
|
* - Streaming JSON output
|
||||||
|
*/
|
||||||
|
export class GeminiProvider extends CliProvider {
|
||||||
|
constructor(config: ProviderConfig = {}) {
|
||||||
|
super(config);
|
||||||
|
// Trigger CLI detection on construction
|
||||||
|
this.ensureCliDetected();
|
||||||
|
}
|
||||||
|
|
||||||
|
// ==========================================================================
|
||||||
|
// CliProvider Abstract Method Implementations
|
||||||
|
// ==========================================================================
|
||||||
|
|
||||||
|
getName(): string {
|
||||||
|
return 'gemini';
|
||||||
|
}
|
||||||
|
|
||||||
|
getCliName(): string {
|
||||||
|
return 'gemini';
|
||||||
|
}
|
||||||
|
|
||||||
|
getSpawnConfig(): CliSpawnConfig {
|
||||||
|
return {
|
||||||
|
windowsStrategy: 'npx', // Gemini CLI can be run via npx
|
||||||
|
npxPackage: '@google/gemini-cli', // Official Google Gemini CLI package
|
||||||
|
commonPaths: {
|
||||||
|
linux: [
|
||||||
|
path.join(os.homedir(), '.local/bin/gemini'),
|
||||||
|
'/usr/local/bin/gemini',
|
||||||
|
path.join(os.homedir(), '.npm-global/bin/gemini'),
|
||||||
|
],
|
||||||
|
darwin: [
|
||||||
|
path.join(os.homedir(), '.local/bin/gemini'),
|
||||||
|
'/usr/local/bin/gemini',
|
||||||
|
'/opt/homebrew/bin/gemini',
|
||||||
|
path.join(os.homedir(), '.npm-global/bin/gemini'),
|
||||||
|
],
|
||||||
|
win32: [
|
||||||
|
path.join(os.homedir(), 'AppData', 'Roaming', 'npm', 'gemini.cmd'),
|
||||||
|
path.join(os.homedir(), '.npm-global', 'gemini.cmd'),
|
||||||
|
],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract prompt text from ExecuteOptions
|
||||||
|
*/
|
||||||
|
private extractPromptText(options: ExecuteOptions): string {
|
||||||
|
if (typeof options.prompt === 'string') {
|
||||||
|
return options.prompt;
|
||||||
|
} else if (Array.isArray(options.prompt)) {
|
||||||
|
return options.prompt
|
||||||
|
.filter((p) => p.type === 'text' && p.text)
|
||||||
|
.map((p) => p.text)
|
||||||
|
.join('\n');
|
||||||
|
} else {
|
||||||
|
throw new Error('Invalid prompt format');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buildCliArgs(options: ExecuteOptions): string[] {
|
||||||
|
// Model comes in stripped of provider prefix (e.g., '2.5-flash' from 'gemini-2.5-flash')
|
||||||
|
// We need to add 'gemini-' back since it's part of the actual CLI model name
|
||||||
|
const bareModel = options.model || '2.5-flash';
|
||||||
|
const cliArgs: string[] = [];
|
||||||
|
|
||||||
|
// Streaming JSON output format for real-time updates
|
||||||
|
cliArgs.push('--output-format', 'stream-json');
|
||||||
|
|
||||||
|
// Model selection - Gemini CLI expects full model names like "gemini-2.5-flash"
|
||||||
|
// Unlike Cursor CLI where 'cursor-' is just a routing prefix, for Gemini CLI
|
||||||
|
// the 'gemini-' is part of the actual model name Google expects
|
||||||
|
if (bareModel && bareModel !== 'auto') {
|
||||||
|
// Add gemini- prefix if not already present (handles edge cases)
|
||||||
|
const cliModel = bareModel.startsWith('gemini-') ? bareModel : `gemini-${bareModel}`;
|
||||||
|
cliArgs.push('--model', cliModel);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable sandbox mode for faster execution (sandbox adds overhead)
|
||||||
|
cliArgs.push('--sandbox', 'false');
|
||||||
|
|
||||||
|
// YOLO mode for automatic approval (required for non-interactive use)
|
||||||
|
// Use explicit approval-mode for clearer semantics
|
||||||
|
cliArgs.push('--approval-mode', 'yolo');
|
||||||
|
|
||||||
|
// Explicitly include the working directory in allowed workspace directories
|
||||||
|
// This ensures Gemini CLI allows file operations in the project directory,
|
||||||
|
// even if it has a different workspace cached from a previous session
|
||||||
|
if (options.cwd) {
|
||||||
|
cliArgs.push('--include-directories', options.cwd);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: Gemini CLI doesn't have a --thinking-level flag.
|
||||||
|
// Thinking capabilities are determined by the model selection (e.g., gemini-2.5-pro).
|
||||||
|
// The model handles thinking internally based on the task complexity.
|
||||||
|
|
||||||
|
// The prompt will be passed as the last positional argument
|
||||||
|
// We'll append it in executeQuery after extracting the text
|
||||||
|
|
||||||
|
return cliArgs;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert Gemini event to AutoMaker ProviderMessage format
|
||||||
|
*/
|
||||||
|
normalizeEvent(event: unknown): ProviderMessage | null {
|
||||||
|
const geminiEvent = event as GeminiStreamEvent;
|
||||||
|
|
||||||
|
switch (geminiEvent.type) {
|
||||||
|
case 'init': {
|
||||||
|
// Init event - capture session but don't yield a message
|
||||||
|
const initEvent = geminiEvent as GeminiInitEvent;
|
||||||
|
logger.debug(
|
||||||
|
`Gemini init event: session=${initEvent.session_id}, model=${initEvent.model}`
|
||||||
|
);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
case 'message': {
|
||||||
|
const messageEvent = geminiEvent as GeminiMessageEvent;
|
||||||
|
|
||||||
|
// Skip user messages - already handled by caller
|
||||||
|
if (messageEvent.role === 'user') {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle assistant messages
|
||||||
|
if (messageEvent.role === 'assistant') {
|
||||||
|
return {
|
||||||
|
type: 'assistant',
|
||||||
|
session_id: messageEvent.session_id,
|
||||||
|
message: {
|
||||||
|
role: 'assistant',
|
||||||
|
content: [{ type: 'text', text: messageEvent.content }],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
case 'tool_use': {
|
||||||
|
const toolEvent = geminiEvent as GeminiToolUseEvent;
|
||||||
|
const normalizedName = normalizeGeminiToolName(toolEvent.tool_name);
|
||||||
|
const normalizedInput = normalizeGeminiToolInput(
|
||||||
|
toolEvent.tool_name,
|
||||||
|
toolEvent.parameters as Record<string, unknown>
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
type: 'assistant',
|
||||||
|
session_id: toolEvent.session_id,
|
||||||
|
message: {
|
||||||
|
role: 'assistant',
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'tool_use',
|
||||||
|
name: normalizedName,
|
||||||
|
tool_use_id: toolEvent.tool_id,
|
||||||
|
input: normalizedInput,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
case 'tool_result': {
|
||||||
|
const toolResultEvent = geminiEvent as GeminiToolResultEvent;
|
||||||
|
// If tool result is an error, prefix with error indicator
|
||||||
|
const content =
|
||||||
|
toolResultEvent.status === 'error'
|
||||||
|
? `[ERROR] ${toolResultEvent.output}`
|
||||||
|
: toolResultEvent.output;
|
||||||
|
return {
|
||||||
|
type: 'assistant',
|
||||||
|
session_id: toolResultEvent.session_id,
|
||||||
|
message: {
|
||||||
|
role: 'assistant',
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'tool_result',
|
||||||
|
tool_use_id: toolResultEvent.tool_id,
|
||||||
|
content,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
case 'result': {
|
||||||
|
const resultEvent = geminiEvent as GeminiResultEvent;
|
||||||
|
|
||||||
|
if (resultEvent.status === 'error') {
|
||||||
|
return {
|
||||||
|
type: 'error',
|
||||||
|
session_id: resultEvent.session_id,
|
||||||
|
error: resultEvent.error || 'Unknown error',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Success result - include stats for logging
|
||||||
|
logger.debug(
|
||||||
|
`Gemini result: status=${resultEvent.status}, tokens=${resultEvent.stats?.total_tokens}`
|
||||||
|
);
|
||||||
|
return {
|
||||||
|
type: 'result',
|
||||||
|
subtype: 'success',
|
||||||
|
session_id: resultEvent.session_id,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
case 'error': {
|
||||||
|
const errorEvent = geminiEvent as GeminiResultEvent;
|
||||||
|
return {
|
||||||
|
type: 'error',
|
||||||
|
session_id: errorEvent.session_id,
|
||||||
|
error: errorEvent.error || 'Unknown error',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
logger.debug(`Unknown Gemini event type: ${geminiEvent.type}`);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ==========================================================================
|
||||||
|
// CliProvider Overrides
|
||||||
|
// ==========================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Override error mapping for Gemini-specific error codes
|
||||||
|
*/
|
||||||
|
protected mapError(stderr: string, exitCode: number | null): CliErrorInfo {
|
||||||
|
const lower = stderr.toLowerCase();
|
||||||
|
|
||||||
|
if (
|
||||||
|
lower.includes('not authenticated') ||
|
||||||
|
lower.includes('please log in') ||
|
||||||
|
lower.includes('unauthorized') ||
|
||||||
|
lower.includes('login required') ||
|
||||||
|
lower.includes('error authenticating') ||
|
||||||
|
lower.includes('loadcodeassist') ||
|
||||||
|
(lower.includes('econnrefused') && lower.includes('8888'))
|
||||||
|
) {
|
||||||
|
return {
|
||||||
|
code: GeminiErrorCode.NOT_AUTHENTICATED,
|
||||||
|
message: 'Gemini CLI is not authenticated',
|
||||||
|
recoverable: true,
|
||||||
|
suggestion:
|
||||||
|
'Run "gemini" interactively to log in, or set GEMINI_API_KEY environment variable',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
lower.includes('rate limit') ||
|
||||||
|
lower.includes('too many requests') ||
|
||||||
|
lower.includes('429') ||
|
||||||
|
lower.includes('quota exceeded')
|
||||||
|
) {
|
||||||
|
return {
|
||||||
|
code: GeminiErrorCode.RATE_LIMITED,
|
||||||
|
message: 'Gemini API rate limit exceeded',
|
||||||
|
recoverable: true,
|
||||||
|
suggestion: 'Wait a few minutes and try again. Free tier: 60 req/min, 1000 req/day',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
lower.includes('model not available') ||
|
||||||
|
lower.includes('invalid model') ||
|
||||||
|
lower.includes('unknown model') ||
|
||||||
|
lower.includes('modelnotfounderror') ||
|
||||||
|
lower.includes('model not found') ||
|
||||||
|
(lower.includes('not found') && lower.includes('404'))
|
||||||
|
) {
|
||||||
|
return {
|
||||||
|
code: GeminiErrorCode.MODEL_UNAVAILABLE,
|
||||||
|
message: 'Requested model is not available',
|
||||||
|
recoverable: true,
|
||||||
|
suggestion: 'Try using "gemini-2.5-flash" or select a different model',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
lower.includes('network') ||
|
||||||
|
lower.includes('connection') ||
|
||||||
|
lower.includes('econnrefused') ||
|
||||||
|
lower.includes('timeout')
|
||||||
|
) {
|
||||||
|
return {
|
||||||
|
code: GeminiErrorCode.NETWORK_ERROR,
|
||||||
|
message: 'Network connection error',
|
||||||
|
recoverable: true,
|
||||||
|
suggestion: 'Check your internet connection and try again',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (exitCode === 137 || lower.includes('killed') || lower.includes('sigterm')) {
|
||||||
|
return {
|
||||||
|
code: GeminiErrorCode.PROCESS_CRASHED,
|
||||||
|
message: 'Gemini CLI process was terminated',
|
||||||
|
recoverable: true,
|
||||||
|
suggestion: 'The process may have run out of memory. Try a simpler task.',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
code: GeminiErrorCode.UNKNOWN,
|
||||||
|
message: stderr || `Gemini CLI exited with code ${exitCode}`,
|
||||||
|
recoverable: false,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Override install instructions for Gemini-specific guidance
|
||||||
|
*/
|
||||||
|
protected getInstallInstructions(): string {
|
||||||
|
return 'Install with: npm install -g @google/gemini-cli (or visit https://github.com/google-gemini/gemini-cli)';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute a prompt using Gemini CLI with streaming
|
||||||
|
*/
|
||||||
|
async *executeQuery(options: ExecuteOptions): AsyncGenerator<ProviderMessage> {
|
||||||
|
this.ensureCliDetected();
|
||||||
|
|
||||||
|
// Validate that model doesn't have a provider prefix
|
||||||
|
validateBareModelId(options.model, 'GeminiProvider');
|
||||||
|
|
||||||
|
if (!this.cliPath) {
|
||||||
|
throw this.createError(
|
||||||
|
GeminiErrorCode.NOT_INSTALLED,
|
||||||
|
'Gemini CLI is not installed',
|
||||||
|
true,
|
||||||
|
this.getInstallInstructions()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract prompt text to pass as positional argument
|
||||||
|
const promptText = this.extractPromptText(options);
|
||||||
|
|
||||||
|
// Build CLI args and append the prompt as the last positional argument
|
||||||
|
const cliArgs = this.buildCliArgs(options);
|
||||||
|
cliArgs.push(promptText); // Gemini CLI uses positional args for the prompt
|
||||||
|
|
||||||
|
const subprocessOptions = this.buildSubprocessOptions(options, cliArgs);
|
||||||
|
|
||||||
|
let sessionId: string | undefined;
|
||||||
|
|
||||||
|
logger.debug(`GeminiProvider.executeQuery called with model: "${options.model}"`);
|
||||||
|
|
||||||
|
try {
|
||||||
|
for await (const rawEvent of spawnJSONLProcess(subprocessOptions)) {
|
||||||
|
const event = rawEvent as GeminiStreamEvent;
|
||||||
|
|
||||||
|
// Capture session ID from init event
|
||||||
|
if (event.type === 'init') {
|
||||||
|
const initEvent = event as GeminiInitEvent;
|
||||||
|
sessionId = initEvent.session_id;
|
||||||
|
logger.debug(`Session started: ${sessionId}, model: ${initEvent.model}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize and yield the event
|
||||||
|
const normalized = this.normalizeEvent(event);
|
||||||
|
if (normalized) {
|
||||||
|
if (!normalized.session_id && sessionId) {
|
||||||
|
normalized.session_id = sessionId;
|
||||||
|
}
|
||||||
|
yield normalized;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
if (isAbortError(error)) {
|
||||||
|
logger.debug('Query aborted');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map CLI errors to GeminiError
|
||||||
|
if (error instanceof Error && 'stderr' in error) {
|
||||||
|
const errorInfo = this.mapError(
|
||||||
|
(error as { stderr?: string }).stderr || error.message,
|
||||||
|
(error as { exitCode?: number | null }).exitCode ?? null
|
||||||
|
);
|
||||||
|
throw this.createError(
|
||||||
|
errorInfo.code as GeminiErrorCode,
|
||||||
|
errorInfo.message,
|
||||||
|
errorInfo.recoverable,
|
||||||
|
errorInfo.suggestion
|
||||||
|
);
|
||||||
|
}
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ==========================================================================
|
||||||
|
// Gemini-Specific Methods
|
||||||
|
// ==========================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a GeminiError with details
|
||||||
|
*/
|
||||||
|
private createError(
|
||||||
|
code: GeminiErrorCode,
|
||||||
|
message: string,
|
||||||
|
recoverable: boolean = false,
|
||||||
|
suggestion?: string
|
||||||
|
): GeminiError {
|
||||||
|
const error = new Error(message) as GeminiError;
|
||||||
|
error.code = code;
|
||||||
|
error.recoverable = recoverable;
|
||||||
|
error.suggestion = suggestion;
|
||||||
|
error.name = 'GeminiError';
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get Gemini CLI version
|
||||||
|
*/
|
||||||
|
async getVersion(): Promise<string | null> {
|
||||||
|
this.ensureCliDetected();
|
||||||
|
if (!this.cliPath) return null;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const result = execSync(`"${this.cliPath}" --version`, {
|
||||||
|
encoding: 'utf8',
|
||||||
|
timeout: 5000,
|
||||||
|
stdio: 'pipe',
|
||||||
|
}).trim();
|
||||||
|
return result;
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check authentication status
|
||||||
|
*
|
||||||
|
* Uses a fast credential check approach:
|
||||||
|
* 1. Check for GEMINI_API_KEY environment variable
|
||||||
|
* 2. Check for Google Cloud credentials
|
||||||
|
* 3. Check for Gemini settings file with stored credentials
|
||||||
|
* 4. Quick CLI auth test with --help (fast, doesn't make API calls)
|
||||||
|
*/
|
||||||
|
async checkAuth(): Promise<GeminiAuthStatus> {
|
||||||
|
this.ensureCliDetected();
|
||||||
|
if (!this.cliPath) {
|
||||||
|
logger.debug('checkAuth: CLI not found');
|
||||||
|
return { authenticated: false, method: 'none' };
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug('checkAuth: Starting credential check');
|
||||||
|
|
||||||
|
// Determine the likely auth method based on environment
|
||||||
|
const hasApiKey = !!process.env.GEMINI_API_KEY;
|
||||||
|
const hasEnvApiKey = hasApiKey;
|
||||||
|
const hasVertexAi = !!(
|
||||||
|
process.env.GOOGLE_APPLICATION_CREDENTIALS || process.env.GOOGLE_CLOUD_PROJECT
|
||||||
|
);
|
||||||
|
|
||||||
|
logger.debug(`checkAuth: hasApiKey=${hasApiKey}, hasVertexAi=${hasVertexAi}`);
|
||||||
|
|
||||||
|
// Check for Gemini credentials file (~/.gemini/settings.json)
|
||||||
|
const geminiConfigDir = path.join(os.homedir(), '.gemini');
|
||||||
|
const settingsPath = path.join(geminiConfigDir, 'settings.json');
|
||||||
|
let hasCredentialsFile = false;
|
||||||
|
let authType: string | null = null;
|
||||||
|
|
||||||
|
try {
|
||||||
|
await fs.access(settingsPath);
|
||||||
|
logger.debug(`checkAuth: Found settings file at ${settingsPath}`);
|
||||||
|
try {
|
||||||
|
const content = await fs.readFile(settingsPath, 'utf8');
|
||||||
|
const settings = JSON.parse(content);
|
||||||
|
|
||||||
|
// Auth config is at security.auth.selectedType (e.g., "oauth-personal", "oauth-adc", "api-key")
|
||||||
|
const selectedType = settings?.security?.auth?.selectedType;
|
||||||
|
if (selectedType) {
|
||||||
|
hasCredentialsFile = true;
|
||||||
|
authType = selectedType;
|
||||||
|
logger.debug(`checkAuth: Settings file has auth config, selectedType=${selectedType}`);
|
||||||
|
} else {
|
||||||
|
logger.debug(`checkAuth: Settings file found but no auth type configured`);
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
logger.debug(`checkAuth: Failed to parse settings file: ${e}`);
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
logger.debug('checkAuth: No settings file found');
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have an API key, we're authenticated
|
||||||
|
if (hasApiKey) {
|
||||||
|
logger.debug('checkAuth: Using API key authentication');
|
||||||
|
return {
|
||||||
|
authenticated: true,
|
||||||
|
method: 'api_key',
|
||||||
|
hasApiKey,
|
||||||
|
hasEnvApiKey,
|
||||||
|
hasCredentialsFile,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have Vertex AI credentials, we're authenticated
|
||||||
|
if (hasVertexAi) {
|
||||||
|
logger.debug('checkAuth: Using Vertex AI authentication');
|
||||||
|
return {
|
||||||
|
authenticated: true,
|
||||||
|
method: 'vertex_ai',
|
||||||
|
hasApiKey,
|
||||||
|
hasEnvApiKey,
|
||||||
|
hasCredentialsFile,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if settings file indicates configured authentication
|
||||||
|
if (hasCredentialsFile && authType) {
|
||||||
|
// OAuth types: "oauth-personal", "oauth-adc"
|
||||||
|
// API key type: "api-key"
|
||||||
|
// Code assist: "code-assist" (requires IDE integration)
|
||||||
|
if (authType.startsWith('oauth')) {
|
||||||
|
logger.debug(`checkAuth: OAuth authentication configured (${authType})`);
|
||||||
|
return {
|
||||||
|
authenticated: true,
|
||||||
|
method: 'google_login',
|
||||||
|
hasApiKey,
|
||||||
|
hasEnvApiKey,
|
||||||
|
hasCredentialsFile,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (authType === 'api-key') {
|
||||||
|
logger.debug('checkAuth: API key authentication configured in settings');
|
||||||
|
return {
|
||||||
|
authenticated: true,
|
||||||
|
method: 'api_key',
|
||||||
|
hasApiKey,
|
||||||
|
hasEnvApiKey,
|
||||||
|
hasCredentialsFile,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (authType === 'code-assist' || authType === 'codeassist') {
|
||||||
|
logger.debug('checkAuth: Code Assist auth configured but requires local server');
|
||||||
|
return {
|
||||||
|
authenticated: false,
|
||||||
|
method: 'google_login',
|
||||||
|
hasApiKey,
|
||||||
|
hasEnvApiKey,
|
||||||
|
hasCredentialsFile,
|
||||||
|
error:
|
||||||
|
'Code Assist authentication requires IDE integration. Please use "gemini" CLI to log in with a different method, or set GEMINI_API_KEY.',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unknown auth type but something is configured
|
||||||
|
logger.debug(`checkAuth: Unknown auth type configured: ${authType}`);
|
||||||
|
return {
|
||||||
|
authenticated: true,
|
||||||
|
method: 'google_login',
|
||||||
|
hasApiKey,
|
||||||
|
hasEnvApiKey,
|
||||||
|
hasCredentialsFile,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// No credentials found
|
||||||
|
logger.debug('checkAuth: No valid credentials found');
|
||||||
|
return {
|
||||||
|
authenticated: false,
|
||||||
|
method: 'none',
|
||||||
|
hasApiKey,
|
||||||
|
hasEnvApiKey,
|
||||||
|
hasCredentialsFile,
|
||||||
|
error:
|
||||||
|
'No authentication configured. Run "gemini" interactively to log in, or set GEMINI_API_KEY.',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Detect installation status (required by BaseProvider)
|
||||||
|
*/
|
||||||
|
async detectInstallation(): Promise<InstallationStatus> {
|
||||||
|
const installed = await this.isInstalled();
|
||||||
|
const version = installed ? await this.getVersion() : undefined;
|
||||||
|
const auth = await this.checkAuth();
|
||||||
|
|
||||||
|
return {
|
||||||
|
installed,
|
||||||
|
version: version || undefined,
|
||||||
|
path: this.cliPath || undefined,
|
||||||
|
method: 'cli',
|
||||||
|
hasApiKey: !!process.env.GEMINI_API_KEY,
|
||||||
|
authenticated: auth.authenticated,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the detected CLI path (public accessor for status endpoints)
|
||||||
|
*/
|
||||||
|
getCliPath(): string | null {
|
||||||
|
this.ensureCliDetected();
|
||||||
|
return this.cliPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get available Gemini models
|
||||||
|
*/
|
||||||
|
getAvailableModels(): ModelDefinition[] {
|
||||||
|
return Object.entries(GEMINI_MODEL_MAP).map(([id, config]) => ({
|
||||||
|
id, // Full model ID with gemini- prefix (e.g., 'gemini-2.5-flash')
|
||||||
|
name: config.label,
|
||||||
|
modelString: id, // Same as id - CLI uses the full model name
|
||||||
|
provider: 'gemini',
|
||||||
|
description: config.description,
|
||||||
|
supportsTools: true,
|
||||||
|
supportsVision: config.supportsVision,
|
||||||
|
contextWindow: config.contextWindow,
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a feature is supported
|
||||||
|
*/
|
||||||
|
supportsFeature(feature: string): boolean {
|
||||||
|
const supported = ['tools', 'text', 'streaming', 'vision', 'thinking'];
|
||||||
|
return supported.includes(feature);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -16,6 +16,16 @@ export type {
|
|||||||
ProviderMessage,
|
ProviderMessage,
|
||||||
InstallationStatus,
|
InstallationStatus,
|
||||||
ModelDefinition,
|
ModelDefinition,
|
||||||
|
AgentDefinition,
|
||||||
|
ReasoningEffort,
|
||||||
|
SystemPromptPreset,
|
||||||
|
ConversationMessage,
|
||||||
|
ContentBlock,
|
||||||
|
ValidationResult,
|
||||||
|
McpServerConfig,
|
||||||
|
McpStdioServerConfig,
|
||||||
|
McpSSEServerConfig,
|
||||||
|
McpHttpServerConfig,
|
||||||
} from './types.js';
|
} from './types.js';
|
||||||
|
|
||||||
// Claude provider
|
// Claude provider
|
||||||
@@ -28,5 +38,19 @@ export { CursorConfigManager } from './cursor-config-manager.js';
|
|||||||
// OpenCode provider
|
// OpenCode provider
|
||||||
export { OpencodeProvider } from './opencode-provider.js';
|
export { OpencodeProvider } from './opencode-provider.js';
|
||||||
|
|
||||||
|
// Gemini provider
|
||||||
|
export { GeminiProvider, GeminiErrorCode } from './gemini-provider.js';
|
||||||
|
|
||||||
|
// Copilot provider (GitHub Copilot SDK)
|
||||||
|
export { CopilotProvider, CopilotErrorCode } from './copilot-provider.js';
|
||||||
|
|
||||||
// Provider factory
|
// Provider factory
|
||||||
export { ProviderFactory } from './provider-factory.js';
|
export { ProviderFactory } from './provider-factory.js';
|
||||||
|
|
||||||
|
// Simple query service - unified interface for basic AI queries
|
||||||
|
export { simpleQuery, streamingQuery } from './simple-query-service.js';
|
||||||
|
export type {
|
||||||
|
SimpleQueryOptions,
|
||||||
|
SimpleQueryResult,
|
||||||
|
StreamingQueryOptions,
|
||||||
|
} from './simple-query-service.js';
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,14 @@
|
|||||||
|
|
||||||
import { BaseProvider } from './base-provider.js';
|
import { BaseProvider } from './base-provider.js';
|
||||||
import type { InstallationStatus, ModelDefinition } from './types.js';
|
import type { InstallationStatus, ModelDefinition } from './types.js';
|
||||||
import { isCursorModel, isCodexModel, isOpencodeModel, type ModelProvider } from '@automaker/types';
|
import {
|
||||||
|
isCursorModel,
|
||||||
|
isCodexModel,
|
||||||
|
isOpencodeModel,
|
||||||
|
isGeminiModel,
|
||||||
|
isCopilotModel,
|
||||||
|
type ModelProvider,
|
||||||
|
} from '@automaker/types';
|
||||||
import * as fs from 'fs';
|
import * as fs from 'fs';
|
||||||
import * as path from 'path';
|
import * as path from 'path';
|
||||||
|
|
||||||
@@ -16,6 +23,8 @@ const DISCONNECTED_MARKERS: Record<string, string> = {
|
|||||||
codex: '.codex-disconnected',
|
codex: '.codex-disconnected',
|
||||||
cursor: '.cursor-disconnected',
|
cursor: '.cursor-disconnected',
|
||||||
opencode: '.opencode-disconnected',
|
opencode: '.opencode-disconnected',
|
||||||
|
gemini: '.gemini-disconnected',
|
||||||
|
copilot: '.copilot-disconnected',
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -94,7 +103,7 @@ export class ProviderFactory {
|
|||||||
/**
|
/**
|
||||||
* Get the appropriate provider for a given model ID
|
* Get the appropriate provider for a given model ID
|
||||||
*
|
*
|
||||||
* @param modelId Model identifier (e.g., "claude-opus-4-5-20251101", "cursor-gpt-4o", "cursor-auto")
|
* @param modelId Model identifier (e.g., "claude-opus-4-6", "cursor-gpt-4o", "cursor-auto")
|
||||||
* @param options Optional settings
|
* @param options Optional settings
|
||||||
* @param options.throwOnDisconnected Throw error if provider is disconnected (default: true)
|
* @param options.throwOnDisconnected Throw error if provider is disconnected (default: true)
|
||||||
* @returns Provider instance for the model
|
* @returns Provider instance for the model
|
||||||
@@ -239,8 +248,8 @@ export class ProviderFactory {
|
|||||||
model.modelString === modelId ||
|
model.modelString === modelId ||
|
||||||
model.id.endsWith(`-${modelId}`) ||
|
model.id.endsWith(`-${modelId}`) ||
|
||||||
model.modelString.endsWith(`-${modelId}`) ||
|
model.modelString.endsWith(`-${modelId}`) ||
|
||||||
model.modelString === modelId.replace(/^(claude|cursor|codex)-/, '') ||
|
model.modelString === modelId.replace(/^(claude|cursor|codex|gemini)-/, '') ||
|
||||||
model.modelString === modelId.replace(/-(claude|cursor|codex)$/, '')
|
model.modelString === modelId.replace(/-(claude|cursor|codex|gemini)$/, '')
|
||||||
) {
|
) {
|
||||||
return model.supportsVision ?? true;
|
return model.supportsVision ?? true;
|
||||||
}
|
}
|
||||||
@@ -267,6 +276,8 @@ import { ClaudeProvider } from './claude-provider.js';
|
|||||||
import { CursorProvider } from './cursor-provider.js';
|
import { CursorProvider } from './cursor-provider.js';
|
||||||
import { CodexProvider } from './codex-provider.js';
|
import { CodexProvider } from './codex-provider.js';
|
||||||
import { OpencodeProvider } from './opencode-provider.js';
|
import { OpencodeProvider } from './opencode-provider.js';
|
||||||
|
import { GeminiProvider } from './gemini-provider.js';
|
||||||
|
import { CopilotProvider } from './copilot-provider.js';
|
||||||
|
|
||||||
// Register Claude provider
|
// Register Claude provider
|
||||||
registerProvider('claude', {
|
registerProvider('claude', {
|
||||||
@@ -301,3 +312,19 @@ registerProvider('opencode', {
|
|||||||
canHandleModel: (model: string) => isOpencodeModel(model),
|
canHandleModel: (model: string) => isOpencodeModel(model),
|
||||||
priority: 3, // Between codex (5) and claude (0)
|
priority: 3, // Between codex (5) and claude (0)
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Register Gemini provider
|
||||||
|
registerProvider('gemini', {
|
||||||
|
factory: () => new GeminiProvider(),
|
||||||
|
aliases: ['google'],
|
||||||
|
canHandleModel: (model: string) => isGeminiModel(model),
|
||||||
|
priority: 4, // Between opencode (3) and codex (5)
|
||||||
|
});
|
||||||
|
|
||||||
|
// Register Copilot provider (GitHub Copilot SDK)
|
||||||
|
registerProvider('copilot', {
|
||||||
|
factory: () => new CopilotProvider(),
|
||||||
|
aliases: ['github-copilot', 'github'],
|
||||||
|
canHandleModel: (model: string) => isCopilotModel(model),
|
||||||
|
priority: 6, // High priority - check before Codex since both can handle GPT models
|
||||||
|
});
|
||||||
|
|||||||
273
apps/server/src/providers/simple-query-service.ts
Normal file
273
apps/server/src/providers/simple-query-service.ts
Normal file
@@ -0,0 +1,273 @@
|
|||||||
|
/**
|
||||||
|
* Simple Query Service - Simplified interface for basic AI queries
|
||||||
|
*
|
||||||
|
* Use this for routes that need simple text responses without
|
||||||
|
* complex event handling. This service abstracts away the provider
|
||||||
|
* selection and streaming details, providing a clean interface
|
||||||
|
* for common query patterns.
|
||||||
|
*
|
||||||
|
* Benefits:
|
||||||
|
* - No direct SDK imports needed in route files
|
||||||
|
* - Consistent provider routing based on model
|
||||||
|
* - Automatic text extraction from streaming responses
|
||||||
|
* - Structured output support for JSON schema responses
|
||||||
|
* - Eliminates duplicate extractTextFromStream() functions
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { ProviderFactory } from './provider-factory.js';
|
||||||
|
import type {
|
||||||
|
ThinkingLevel,
|
||||||
|
ReasoningEffort,
|
||||||
|
ClaudeApiProfile,
|
||||||
|
ClaudeCompatibleProvider,
|
||||||
|
Credentials,
|
||||||
|
} from '@automaker/types';
|
||||||
|
import { stripProviderPrefix } from '@automaker/types';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for simple query execution
|
||||||
|
*/
|
||||||
|
export interface SimpleQueryOptions {
|
||||||
|
/** The prompt to send to the AI (can be text or multi-part content) */
|
||||||
|
prompt: string | Array<{ type: string; text?: string; source?: object }>;
|
||||||
|
/** Model to use (with or without provider prefix) */
|
||||||
|
model?: string;
|
||||||
|
/** Working directory for the query */
|
||||||
|
cwd: string;
|
||||||
|
/** System prompt (combined with user prompt for some providers) */
|
||||||
|
systemPrompt?: string;
|
||||||
|
/** Maximum turns for agentic operations (default: 1) */
|
||||||
|
maxTurns?: number;
|
||||||
|
/** Tools to allow (default: [] for simple queries) */
|
||||||
|
allowedTools?: string[];
|
||||||
|
/** Abort controller for cancellation */
|
||||||
|
abortController?: AbortController;
|
||||||
|
/** Structured output format for JSON responses */
|
||||||
|
outputFormat?: {
|
||||||
|
type: 'json_schema';
|
||||||
|
schema: Record<string, unknown>;
|
||||||
|
};
|
||||||
|
/** Thinking level for Claude models */
|
||||||
|
thinkingLevel?: ThinkingLevel;
|
||||||
|
/** Reasoning effort for Codex/OpenAI models */
|
||||||
|
reasoningEffort?: ReasoningEffort;
|
||||||
|
/** If true, runs in read-only mode (no file writes) */
|
||||||
|
readOnly?: boolean;
|
||||||
|
/** Setting sources for CLAUDE.md loading */
|
||||||
|
settingSources?: Array<'user' | 'project' | 'local'>;
|
||||||
|
/**
|
||||||
|
* Active Claude API profile for alternative endpoint configuration
|
||||||
|
* @deprecated Use claudeCompatibleProvider instead
|
||||||
|
*/
|
||||||
|
claudeApiProfile?: ClaudeApiProfile;
|
||||||
|
/**
|
||||||
|
* Claude-compatible provider for alternative endpoint configuration.
|
||||||
|
* Takes precedence over claudeApiProfile if both are set.
|
||||||
|
*/
|
||||||
|
claudeCompatibleProvider?: ClaudeCompatibleProvider;
|
||||||
|
/** Credentials for resolving 'credentials' apiKeySource in Claude API profiles/providers */
|
||||||
|
credentials?: Credentials;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Result from a simple query
|
||||||
|
*/
|
||||||
|
export interface SimpleQueryResult {
|
||||||
|
/** The accumulated text response */
|
||||||
|
text: string;
|
||||||
|
/** Structured output if outputFormat was specified and provider supports it */
|
||||||
|
structured_output?: Record<string, unknown>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for streaming query execution
|
||||||
|
*/
|
||||||
|
export interface StreamingQueryOptions extends SimpleQueryOptions {
|
||||||
|
/** Callback for each text chunk received */
|
||||||
|
onText?: (text: string) => void;
|
||||||
|
/** Callback for tool use events */
|
||||||
|
onToolUse?: (tool: string, input: unknown) => void;
|
||||||
|
/** Callback for thinking blocks (if available) */
|
||||||
|
onThinking?: (thinking: string) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default model to use when none specified
|
||||||
|
*/
|
||||||
|
const DEFAULT_MODEL = 'claude-sonnet-4-6';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute a simple query and return the text result
|
||||||
|
*
|
||||||
|
* Use this for simple, non-streaming queries where you just need
|
||||||
|
* the final text response. For more complex use cases with progress
|
||||||
|
* callbacks, use streamingQuery() instead.
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* const result = await simpleQuery({
|
||||||
|
* prompt: 'Generate a title for: user authentication',
|
||||||
|
* cwd: process.cwd(),
|
||||||
|
* systemPrompt: 'You are a title generator...',
|
||||||
|
* maxTurns: 1,
|
||||||
|
* allowedTools: [],
|
||||||
|
* });
|
||||||
|
* console.log(result.text); // "Add user authentication"
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
export async function simpleQuery(options: SimpleQueryOptions): Promise<SimpleQueryResult> {
|
||||||
|
const model = options.model || DEFAULT_MODEL;
|
||||||
|
const provider = ProviderFactory.getProviderForModel(model);
|
||||||
|
const bareModel = stripProviderPrefix(model);
|
||||||
|
|
||||||
|
let responseText = '';
|
||||||
|
let structuredOutput: Record<string, unknown> | undefined;
|
||||||
|
|
||||||
|
// Build provider options
|
||||||
|
const providerOptions = {
|
||||||
|
prompt: options.prompt,
|
||||||
|
model: bareModel,
|
||||||
|
originalModel: model,
|
||||||
|
cwd: options.cwd,
|
||||||
|
systemPrompt: options.systemPrompt,
|
||||||
|
maxTurns: options.maxTurns ?? 1,
|
||||||
|
allowedTools: options.allowedTools ?? [],
|
||||||
|
abortController: options.abortController,
|
||||||
|
outputFormat: options.outputFormat,
|
||||||
|
thinkingLevel: options.thinkingLevel,
|
||||||
|
reasoningEffort: options.reasoningEffort,
|
||||||
|
readOnly: options.readOnly,
|
||||||
|
settingSources: options.settingSources,
|
||||||
|
claudeApiProfile: options.claudeApiProfile, // Legacy: Pass active Claude API profile for alternative endpoint configuration
|
||||||
|
claudeCompatibleProvider: options.claudeCompatibleProvider, // New: Pass Claude-compatible provider (takes precedence)
|
||||||
|
credentials: options.credentials, // Pass credentials for resolving 'credentials' apiKeySource
|
||||||
|
};
|
||||||
|
|
||||||
|
for await (const msg of provider.executeQuery(providerOptions)) {
|
||||||
|
// Handle error messages
|
||||||
|
if (msg.type === 'error') {
|
||||||
|
const errorMessage = msg.error || 'Provider returned an error';
|
||||||
|
throw new Error(errorMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract text from assistant messages
|
||||||
|
if (msg.type === 'assistant' && msg.message?.content) {
|
||||||
|
for (const block of msg.message.content) {
|
||||||
|
if (block.type === 'text' && block.text) {
|
||||||
|
responseText += block.text;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle result messages
|
||||||
|
if (msg.type === 'result') {
|
||||||
|
if (msg.subtype === 'success') {
|
||||||
|
// Use result text if longer than accumulated text
|
||||||
|
if (msg.result && msg.result.length > responseText.length) {
|
||||||
|
responseText = msg.result;
|
||||||
|
}
|
||||||
|
// Capture structured output if present
|
||||||
|
if (msg.structured_output) {
|
||||||
|
structuredOutput = msg.structured_output;
|
||||||
|
}
|
||||||
|
} else if (msg.subtype === 'error_max_turns') {
|
||||||
|
// Max turns reached - return what we have
|
||||||
|
break;
|
||||||
|
} else if (msg.subtype === 'error_max_structured_output_retries') {
|
||||||
|
throw new Error('Could not produce valid structured output after retries');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return { text: responseText, structured_output: structuredOutput };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute a streaming query with event callbacks
|
||||||
|
*
|
||||||
|
* Use this for queries where you need real-time progress updates,
|
||||||
|
* such as when displaying streaming output to a user.
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* const result = await streamingQuery({
|
||||||
|
* prompt: 'Analyze this project and suggest improvements',
|
||||||
|
* cwd: '/path/to/project',
|
||||||
|
* maxTurns: 250,
|
||||||
|
* allowedTools: ['Read', 'Glob', 'Grep'],
|
||||||
|
* onText: (text) => emitProgress(text),
|
||||||
|
* onToolUse: (tool, input) => emitToolUse(tool, input),
|
||||||
|
* });
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
export async function streamingQuery(options: StreamingQueryOptions): Promise<SimpleQueryResult> {
|
||||||
|
const model = options.model || DEFAULT_MODEL;
|
||||||
|
const provider = ProviderFactory.getProviderForModel(model);
|
||||||
|
const bareModel = stripProviderPrefix(model);
|
||||||
|
|
||||||
|
let responseText = '';
|
||||||
|
let structuredOutput: Record<string, unknown> | undefined;
|
||||||
|
|
||||||
|
// Build provider options
|
||||||
|
const providerOptions = {
|
||||||
|
prompt: options.prompt,
|
||||||
|
model: bareModel,
|
||||||
|
originalModel: model,
|
||||||
|
cwd: options.cwd,
|
||||||
|
systemPrompt: options.systemPrompt,
|
||||||
|
maxTurns: options.maxTurns ?? 250,
|
||||||
|
allowedTools: options.allowedTools ?? ['Read', 'Glob', 'Grep'],
|
||||||
|
abortController: options.abortController,
|
||||||
|
outputFormat: options.outputFormat,
|
||||||
|
thinkingLevel: options.thinkingLevel,
|
||||||
|
reasoningEffort: options.reasoningEffort,
|
||||||
|
readOnly: options.readOnly,
|
||||||
|
settingSources: options.settingSources,
|
||||||
|
claudeApiProfile: options.claudeApiProfile, // Legacy: Pass active Claude API profile for alternative endpoint configuration
|
||||||
|
claudeCompatibleProvider: options.claudeCompatibleProvider, // New: Pass Claude-compatible provider (takes precedence)
|
||||||
|
credentials: options.credentials, // Pass credentials for resolving 'credentials' apiKeySource
|
||||||
|
};
|
||||||
|
|
||||||
|
for await (const msg of provider.executeQuery(providerOptions)) {
|
||||||
|
// Handle error messages
|
||||||
|
if (msg.type === 'error') {
|
||||||
|
const errorMessage = msg.error || 'Provider returned an error';
|
||||||
|
throw new Error(errorMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract content from assistant messages
|
||||||
|
if (msg.type === 'assistant' && msg.message?.content) {
|
||||||
|
for (const block of msg.message.content) {
|
||||||
|
if (block.type === 'text' && block.text) {
|
||||||
|
responseText += block.text;
|
||||||
|
options.onText?.(block.text);
|
||||||
|
} else if (block.type === 'tool_use' && block.name) {
|
||||||
|
options.onToolUse?.(block.name, block.input);
|
||||||
|
} else if (block.type === 'thinking' && block.thinking) {
|
||||||
|
options.onThinking?.(block.thinking);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle result messages
|
||||||
|
if (msg.type === 'result') {
|
||||||
|
if (msg.subtype === 'success') {
|
||||||
|
// Use result text if longer than accumulated text
|
||||||
|
if (msg.result && msg.result.length > responseText.length) {
|
||||||
|
responseText = msg.result;
|
||||||
|
}
|
||||||
|
// Capture structured output if present
|
||||||
|
if (msg.structured_output) {
|
||||||
|
structuredOutput = msg.structured_output;
|
||||||
|
}
|
||||||
|
} else if (msg.subtype === 'error_max_turns') {
|
||||||
|
// Max turns reached - return what we have
|
||||||
|
break;
|
||||||
|
} else if (msg.subtype === 'error_max_structured_output_retries') {
|
||||||
|
throw new Error('Could not produce valid structured output after retries');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return { text: responseText, structured_output: structuredOutput };
|
||||||
|
}
|
||||||
112
apps/server/src/providers/tool-normalization.ts
Normal file
112
apps/server/src/providers/tool-normalization.ts
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
/**
|
||||||
|
* Shared tool normalization utilities for AI providers
|
||||||
|
*
|
||||||
|
* These utilities help normalize tool inputs from various AI providers
|
||||||
|
* to the standard format expected by the application.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Valid todo status values in the standard format
|
||||||
|
*/
|
||||||
|
type TodoStatus = 'pending' | 'in_progress' | 'completed';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set of valid status values for validation
|
||||||
|
*/
|
||||||
|
const VALID_STATUSES = new Set<TodoStatus>(['pending', 'in_progress', 'completed']);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Todo item from various AI providers (Gemini, Copilot, etc.)
|
||||||
|
*/
|
||||||
|
interface ProviderTodo {
|
||||||
|
description?: string;
|
||||||
|
content?: string;
|
||||||
|
status?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Standard todo format used by the application
|
||||||
|
*/
|
||||||
|
interface NormalizedTodo {
|
||||||
|
content: string;
|
||||||
|
status: TodoStatus;
|
||||||
|
activeForm: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize a provider status value to a valid TodoStatus
|
||||||
|
*/
|
||||||
|
function normalizeStatus(status: string | undefined): TodoStatus {
|
||||||
|
if (!status) return 'pending';
|
||||||
|
if (status === 'cancelled' || status === 'canceled') return 'completed';
|
||||||
|
if (VALID_STATUSES.has(status as TodoStatus)) return status as TodoStatus;
|
||||||
|
return 'pending';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize todos array from provider format to standard format
|
||||||
|
*
|
||||||
|
* Handles different formats from providers:
|
||||||
|
* - Gemini: { description, status } with 'cancelled' as possible status
|
||||||
|
* - Copilot: { content/description, status } with 'cancelled' as possible status
|
||||||
|
*
|
||||||
|
* Output format (Claude/Standard):
|
||||||
|
* - { content, status, activeForm } where status is 'pending'|'in_progress'|'completed'
|
||||||
|
*/
|
||||||
|
export function normalizeTodos(todos: ProviderTodo[] | null | undefined): NormalizedTodo[] {
|
||||||
|
if (!todos) return [];
|
||||||
|
return todos.map((todo) => ({
|
||||||
|
content: todo.content || todo.description || '',
|
||||||
|
status: normalizeStatus(todo.status),
|
||||||
|
// Use content/description as activeForm since providers may not have it
|
||||||
|
activeForm: todo.content || todo.description || '',
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize file path parameters from various provider formats
|
||||||
|
*
|
||||||
|
* Different providers use different parameter names for file paths:
|
||||||
|
* - path, file, filename, filePath -> file_path
|
||||||
|
*/
|
||||||
|
export function normalizeFilePathInput(input: Record<string, unknown>): Record<string, unknown> {
|
||||||
|
const normalized = { ...input };
|
||||||
|
if (!normalized.file_path) {
|
||||||
|
if (input.path) normalized.file_path = input.path;
|
||||||
|
else if (input.file) normalized.file_path = input.file;
|
||||||
|
else if (input.filename) normalized.file_path = input.filename;
|
||||||
|
else if (input.filePath) normalized.file_path = input.filePath;
|
||||||
|
}
|
||||||
|
return normalized;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize shell command parameters from various provider formats
|
||||||
|
*
|
||||||
|
* Different providers use different parameter names for commands:
|
||||||
|
* - cmd, script -> command
|
||||||
|
*/
|
||||||
|
export function normalizeCommandInput(input: Record<string, unknown>): Record<string, unknown> {
|
||||||
|
const normalized = { ...input };
|
||||||
|
if (!normalized.command) {
|
||||||
|
if (input.cmd) normalized.command = input.cmd;
|
||||||
|
else if (input.script) normalized.command = input.script;
|
||||||
|
}
|
||||||
|
return normalized;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize search pattern parameters from various provider formats
|
||||||
|
*
|
||||||
|
* Different providers use different parameter names for search patterns:
|
||||||
|
* - query, search, regex -> pattern
|
||||||
|
*/
|
||||||
|
export function normalizePatternInput(input: Record<string, unknown>): Record<string, unknown> {
|
||||||
|
const normalized = { ...input };
|
||||||
|
if (!normalized.pattern) {
|
||||||
|
if (input.query) normalized.pattern = input.query;
|
||||||
|
else if (input.search) normalized.pattern = input.search;
|
||||||
|
else if (input.regex) normalized.pattern = input.regex;
|
||||||
|
}
|
||||||
|
return normalized;
|
||||||
|
}
|
||||||
@@ -19,4 +19,7 @@ export type {
|
|||||||
InstallationStatus,
|
InstallationStatus,
|
||||||
ValidationResult,
|
ValidationResult,
|
||||||
ModelDefinition,
|
ModelDefinition,
|
||||||
|
AgentDefinition,
|
||||||
|
ReasoningEffort,
|
||||||
|
SystemPromptPreset,
|
||||||
} from '@automaker/types';
|
} from '@automaker/types';
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ export function createHistoryHandler(agentService: AgentService) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const result = agentService.getHistory(sessionId);
|
const result = await agentService.getHistory(sessionId);
|
||||||
res.json(result);
|
res.json(result);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logError(error, 'Get history failed');
|
logError(error, 'Get history failed');
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ export function createQueueListHandler(agentService: AgentService) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const result = agentService.getQueue(sessionId);
|
const result = await agentService.getQueue(sessionId);
|
||||||
res.json(result);
|
res.json(result);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logError(error, 'List queue failed');
|
logError(error, 'List queue failed');
|
||||||
|
|||||||
@@ -53,7 +53,15 @@ export function createSendHandler(agentService: AgentService) {
|
|||||||
thinkingLevel,
|
thinkingLevel,
|
||||||
})
|
})
|
||||||
.catch((error) => {
|
.catch((error) => {
|
||||||
logger.error('Background error in sendMessage():', error);
|
const errorMsg = (error as Error).message || 'Unknown error';
|
||||||
|
logger.error(`Background error in sendMessage() for session ${sessionId}:`, errorMsg);
|
||||||
|
|
||||||
|
// Emit error via WebSocket so the UI is notified even though
|
||||||
|
// the HTTP response already returned 200. This is critical for
|
||||||
|
// session-not-found errors where sendMessage() throws before it
|
||||||
|
// can emit its own error event (no in-memory session to emit from).
|
||||||
|
agentService.emitSessionError(sessionId, errorMsg);
|
||||||
|
|
||||||
logError(error, 'Send message failed (background)');
|
logError(error, 'Send message failed (background)');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import type { Request, Response } from 'express';
|
|||||||
import { AgentService } from '../../../services/agent-service.js';
|
import { AgentService } from '../../../services/agent-service.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
const logger = createLogger('Agent');
|
const _logger = createLogger('Agent');
|
||||||
|
|
||||||
export function createStartHandler(agentService: AgentService) {
|
export function createStartHandler(agentService: AgentService) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
|||||||
@@ -6,26 +6,103 @@ import { createLogger } from '@automaker/utils';
|
|||||||
|
|
||||||
const logger = createLogger('SpecRegeneration');
|
const logger = createLogger('SpecRegeneration');
|
||||||
|
|
||||||
// Shared state for tracking generation status - private
|
// Types for running generation
|
||||||
let isRunning = false;
|
export type GenerationType = 'spec_regeneration' | 'feature_generation' | 'sync';
|
||||||
let currentAbortController: AbortController | null = null;
|
|
||||||
|
interface RunningGeneration {
|
||||||
|
isRunning: boolean;
|
||||||
|
type: GenerationType;
|
||||||
|
startedAt: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shared state for tracking generation status - scoped by project path
|
||||||
|
const runningProjects = new Map<string, RunningGeneration>();
|
||||||
|
const abortControllers = new Map<string, AbortController>();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the current running state
|
* Get the running state for a specific project
|
||||||
*/
|
*/
|
||||||
export function getSpecRegenerationStatus(): {
|
export function getSpecRegenerationStatus(projectPath?: string): {
|
||||||
isRunning: boolean;
|
isRunning: boolean;
|
||||||
currentAbortController: AbortController | null;
|
currentAbortController: AbortController | null;
|
||||||
|
projectPath?: string;
|
||||||
|
type?: GenerationType;
|
||||||
|
startedAt?: string;
|
||||||
} {
|
} {
|
||||||
return { isRunning, currentAbortController };
|
if (projectPath) {
|
||||||
|
const generation = runningProjects.get(projectPath);
|
||||||
|
return {
|
||||||
|
isRunning: generation?.isRunning || false,
|
||||||
|
currentAbortController: abortControllers.get(projectPath) || null,
|
||||||
|
projectPath,
|
||||||
|
type: generation?.type,
|
||||||
|
startedAt: generation?.startedAt,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
// Fallback: check if any project is running (for backward compatibility)
|
||||||
|
const isAnyRunning = Array.from(runningProjects.values()).some((g) => g.isRunning);
|
||||||
|
return { isRunning: isAnyRunning, currentAbortController: null };
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set the running state and abort controller
|
* Get the project path that is currently running (if any)
|
||||||
*/
|
*/
|
||||||
export function setRunningState(running: boolean, controller: AbortController | null = null): void {
|
export function getRunningProjectPath(): string | null {
|
||||||
isRunning = running;
|
for (const [path, running] of runningProjects.entries()) {
|
||||||
currentAbortController = controller;
|
if (running) return path;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the running state and abort controller for a specific project
|
||||||
|
*/
|
||||||
|
export function setRunningState(
|
||||||
|
projectPath: string,
|
||||||
|
running: boolean,
|
||||||
|
controller: AbortController | null = null,
|
||||||
|
type: GenerationType = 'spec_regeneration'
|
||||||
|
): void {
|
||||||
|
if (running) {
|
||||||
|
runningProjects.set(projectPath, {
|
||||||
|
isRunning: true,
|
||||||
|
type,
|
||||||
|
startedAt: new Date().toISOString(),
|
||||||
|
});
|
||||||
|
if (controller) {
|
||||||
|
abortControllers.set(projectPath, controller);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
runningProjects.delete(projectPath);
|
||||||
|
abortControllers.delete(projectPath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all running spec/feature generations for the running agents view
|
||||||
|
*/
|
||||||
|
export function getAllRunningGenerations(): Array<{
|
||||||
|
projectPath: string;
|
||||||
|
type: GenerationType;
|
||||||
|
startedAt: string;
|
||||||
|
}> {
|
||||||
|
const results: Array<{
|
||||||
|
projectPath: string;
|
||||||
|
type: GenerationType;
|
||||||
|
startedAt: string;
|
||||||
|
}> = [];
|
||||||
|
|
||||||
|
for (const [projectPath, generation] of runningProjects.entries()) {
|
||||||
|
if (generation.isRunning) {
|
||||||
|
results.push({
|
||||||
|
projectPath,
|
||||||
|
type: generation.type,
|
||||||
|
startedAt: generation.startedAt,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return results;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -51,7 +128,7 @@ export function logAuthStatus(context: string): void {
|
|||||||
*/
|
*/
|
||||||
export function logError(error: unknown, context: string): void {
|
export function logError(error: unknown, context: string): void {
|
||||||
logger.error(`❌ ${context}:`);
|
logger.error(`❌ ${context}:`);
|
||||||
logger.error('Error name:', (error as any)?.name);
|
logger.error('Error name:', (error as Error)?.name);
|
||||||
logger.error('Error message:', (error as Error)?.message);
|
logger.error('Error message:', (error as Error)?.message);
|
||||||
logger.error('Error stack:', (error as Error)?.stack);
|
logger.error('Error stack:', (error as Error)?.stack);
|
||||||
logger.error('Full error object:', JSON.stringify(error, Object.getOwnPropertyNames(error), 2));
|
logger.error('Full error object:', JSON.stringify(error, Object.getOwnPropertyNames(error), 2));
|
||||||
|
|||||||
@@ -5,24 +5,85 @@
|
|||||||
* (defaults to Sonnet for balanced speed and quality).
|
* (defaults to Sonnet for balanced speed and quality).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
|
||||||
import * as secureFs from '../../lib/secure-fs.js';
|
import * as secureFs from '../../lib/secure-fs.js';
|
||||||
import type { EventEmitter } from '../../lib/events.js';
|
import type { EventEmitter } from '../../lib/events.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
|
import { DEFAULT_PHASE_MODELS, supportsStructuredOutput, isCodexModel } from '@automaker/types';
|
||||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||||
import { createFeatureGenerationOptions } from '../../lib/sdk-options.js';
|
import { streamingQuery } from '../../providers/simple-query-service.js';
|
||||||
import { ProviderFactory } from '../../providers/provider-factory.js';
|
|
||||||
import { logAuthStatus } from './common.js';
|
|
||||||
import { parseAndCreateFeatures } from './parse-and-create-features.js';
|
import { parseAndCreateFeatures } from './parse-and-create-features.js';
|
||||||
|
import { extractJsonWithArray } from '../../lib/json-extractor.js';
|
||||||
import { getAppSpecPath } from '@automaker/platform';
|
import { getAppSpecPath } from '@automaker/platform';
|
||||||
import type { SettingsService } from '../../services/settings-service.js';
|
import type { SettingsService } from '../../services/settings-service.js';
|
||||||
import { getAutoLoadClaudeMdSetting } from '../../lib/settings-helpers.js';
|
import {
|
||||||
|
getAutoLoadClaudeMdSetting,
|
||||||
|
getPromptCustomization,
|
||||||
|
getPhaseModelWithOverrides,
|
||||||
|
} from '../../lib/settings-helpers.js';
|
||||||
|
import { FeatureLoader } from '../../services/feature-loader.js';
|
||||||
|
|
||||||
const logger = createLogger('SpecRegeneration');
|
const logger = createLogger('SpecRegeneration');
|
||||||
|
|
||||||
const DEFAULT_MAX_FEATURES = 50;
|
const DEFAULT_MAX_FEATURES = 50;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Timeout for Codex models when generating features (5 minutes).
|
||||||
|
* Codex models are slower and need more time to generate 50+ features.
|
||||||
|
*/
|
||||||
|
const _CODEX_FEATURE_GENERATION_TIMEOUT_MS = 300000; // 5 minutes
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Type for extracted features JSON response
|
||||||
|
*/
|
||||||
|
interface FeaturesExtractionResult {
|
||||||
|
features: Array<{
|
||||||
|
id: string;
|
||||||
|
category?: string;
|
||||||
|
title: string;
|
||||||
|
description: string;
|
||||||
|
priority?: number;
|
||||||
|
complexity?: 'simple' | 'moderate' | 'complex';
|
||||||
|
dependencies?: string[];
|
||||||
|
}>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* JSON schema for features output format (Claude/Codex structured output)
|
||||||
|
*/
|
||||||
|
const featuresOutputSchema = {
|
||||||
|
type: 'object',
|
||||||
|
properties: {
|
||||||
|
features: {
|
||||||
|
type: 'array',
|
||||||
|
items: {
|
||||||
|
type: 'object',
|
||||||
|
properties: {
|
||||||
|
id: { type: 'string', description: 'Unique feature identifier (kebab-case)' },
|
||||||
|
category: { type: 'string', description: 'Feature category' },
|
||||||
|
title: { type: 'string', description: 'Short, descriptive title' },
|
||||||
|
description: { type: 'string', description: 'Detailed feature description' },
|
||||||
|
priority: {
|
||||||
|
type: 'number',
|
||||||
|
description: 'Priority level: 1 (highest) to 5 (lowest)',
|
||||||
|
},
|
||||||
|
complexity: {
|
||||||
|
type: 'string',
|
||||||
|
enum: ['simple', 'moderate', 'complex'],
|
||||||
|
description: 'Implementation complexity',
|
||||||
|
},
|
||||||
|
dependencies: {
|
||||||
|
type: 'array',
|
||||||
|
items: { type: 'string' },
|
||||||
|
description: 'IDs of features this depends on',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
required: ['id', 'title', 'description'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
required: ['features'],
|
||||||
|
} as const;
|
||||||
|
|
||||||
export async function generateFeaturesFromSpec(
|
export async function generateFeaturesFromSpec(
|
||||||
projectPath: string,
|
projectPath: string,
|
||||||
events: EventEmitter,
|
events: EventEmitter,
|
||||||
@@ -56,38 +117,48 @@ export async function generateFeaturesFromSpec(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get customized prompts from settings
|
||||||
|
const prompts = await getPromptCustomization(settingsService, '[FeatureGeneration]');
|
||||||
|
|
||||||
|
// Load existing features to prevent duplicates
|
||||||
|
const featureLoader = new FeatureLoader();
|
||||||
|
const existingFeatures = await featureLoader.getAll(projectPath);
|
||||||
|
|
||||||
|
logger.info(`Found ${existingFeatures.length} existing features to exclude from generation`);
|
||||||
|
|
||||||
|
// Build existing features context for the prompt
|
||||||
|
let existingFeaturesContext = '';
|
||||||
|
if (existingFeatures.length > 0) {
|
||||||
|
const featuresList = existingFeatures
|
||||||
|
.map(
|
||||||
|
(f) =>
|
||||||
|
`- "${f.title}" (ID: ${f.id}): ${f.description?.substring(0, 100) || 'No description'}`
|
||||||
|
)
|
||||||
|
.join('\n');
|
||||||
|
existingFeaturesContext = `
|
||||||
|
|
||||||
|
## EXISTING FEATURES (DO NOT REGENERATE THESE)
|
||||||
|
|
||||||
|
The following ${existingFeatures.length} features already exist in the project. You MUST NOT generate features that duplicate or overlap with these:
|
||||||
|
|
||||||
|
${featuresList}
|
||||||
|
|
||||||
|
CRITICAL INSTRUCTIONS:
|
||||||
|
- DO NOT generate any features with the same or similar titles as the existing features listed above
|
||||||
|
- DO NOT generate features that cover the same functionality as existing features
|
||||||
|
- ONLY generate NEW features that are not yet in the system
|
||||||
|
- If a feature from the roadmap already exists, skip it entirely
|
||||||
|
- Generate unique feature IDs that do not conflict with existing IDs: ${existingFeatures.map((f) => f.id).join(', ')}
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
const prompt = `Based on this project specification:
|
const prompt = `Based on this project specification:
|
||||||
|
|
||||||
${spec}
|
${spec}
|
||||||
|
${existingFeaturesContext}
|
||||||
|
${prompts.appSpec.generateFeaturesFromSpecPrompt}
|
||||||
|
|
||||||
Generate a prioritized list of implementable features. For each feature provide:
|
Generate ${featureCount} NEW features that build on each other logically. Remember: ONLY generate features that DO NOT already exist.`;
|
||||||
|
|
||||||
1. **id**: A unique lowercase-hyphenated identifier
|
|
||||||
2. **category**: Functional category (e.g., "Core", "UI", "API", "Authentication", "Database")
|
|
||||||
3. **title**: Short descriptive title
|
|
||||||
4. **description**: What this feature does (2-3 sentences)
|
|
||||||
5. **priority**: 1 (high), 2 (medium), or 3 (low)
|
|
||||||
6. **complexity**: "simple", "moderate", or "complex"
|
|
||||||
7. **dependencies**: Array of feature IDs this depends on (can be empty)
|
|
||||||
|
|
||||||
Format as JSON:
|
|
||||||
{
|
|
||||||
"features": [
|
|
||||||
{
|
|
||||||
"id": "feature-id",
|
|
||||||
"category": "Feature Category",
|
|
||||||
"title": "Feature Title",
|
|
||||||
"description": "What it does",
|
|
||||||
"priority": 1,
|
|
||||||
"complexity": "moderate",
|
|
||||||
"dependencies": []
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
Generate ${featureCount} features that build on each other logically.
|
|
||||||
|
|
||||||
IMPORTANT: Do not ask for clarification. The specification is provided above. Generate the JSON immediately.`;
|
|
||||||
|
|
||||||
logger.info('========== PROMPT BEING SENT ==========');
|
logger.info('========== PROMPT BEING SENT ==========');
|
||||||
logger.info(`Prompt length: ${prompt.length} chars`);
|
logger.info(`Prompt length: ${prompt.length} chars`);
|
||||||
@@ -107,135 +178,152 @@ IMPORTANT: Do not ask for clarification. The specification is provided above. Ge
|
|||||||
'[FeatureGeneration]'
|
'[FeatureGeneration]'
|
||||||
);
|
);
|
||||||
|
|
||||||
// Get model from phase settings
|
// Get model from phase settings with provider info
|
||||||
const settings = await settingsService?.getGlobalSettings();
|
const {
|
||||||
const phaseModelEntry =
|
phaseModel: phaseModelEntry,
|
||||||
settings?.phaseModels?.featureGenerationModel || DEFAULT_PHASE_MODELS.featureGenerationModel;
|
provider,
|
||||||
const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
|
credentials,
|
||||||
|
} = settingsService
|
||||||
|
? await getPhaseModelWithOverrides(
|
||||||
|
'featureGenerationModel',
|
||||||
|
settingsService,
|
||||||
|
projectPath,
|
||||||
|
'[FeatureGeneration]'
|
||||||
|
)
|
||||||
|
: {
|
||||||
|
phaseModel: DEFAULT_PHASE_MODELS.featureGenerationModel,
|
||||||
|
provider: undefined,
|
||||||
|
credentials: undefined,
|
||||||
|
};
|
||||||
|
const { model, thinkingLevel, reasoningEffort } = resolvePhaseModel(phaseModelEntry);
|
||||||
|
|
||||||
logger.info('Using model:', model);
|
logger.info('Using model:', model, provider ? `via provider: ${provider.name}` : 'direct API');
|
||||||
|
|
||||||
let responseText = '';
|
// Codex models need extended timeout for generating many features.
|
||||||
let messageCount = 0;
|
// Use 'xhigh' reasoning effort to get 5-minute timeout (300s base * 1.0x = 300s).
|
||||||
|
// The Codex provider has a special 5-minute base timeout for feature generation.
|
||||||
|
const isCodex = isCodexModel(model);
|
||||||
|
const effectiveReasoningEffort = isCodex ? 'xhigh' : reasoningEffort;
|
||||||
|
|
||||||
// Route to appropriate provider based on model type
|
if (isCodex) {
|
||||||
if (isCursorModel(model)) {
|
logger.info('Codex model detected - using extended timeout (5 minutes for feature generation)');
|
||||||
// Use Cursor provider for Cursor models
|
}
|
||||||
logger.info('[FeatureGeneration] Using Cursor provider');
|
if (effectiveReasoningEffort) {
|
||||||
|
logger.info('Reasoning effort:', effectiveReasoningEffort);
|
||||||
|
}
|
||||||
|
|
||||||
const provider = ProviderFactory.getProviderForModel(model);
|
// Determine if we should use structured output based on model type
|
||||||
// Strip provider prefix - providers expect bare model IDs
|
const useStructuredOutput = supportsStructuredOutput(model);
|
||||||
const bareModel = stripProviderPrefix(model);
|
logger.info(
|
||||||
|
`Structured output mode: ${useStructuredOutput ? 'enabled (Claude/Codex)' : 'disabled (using JSON instructions)'}`
|
||||||
|
);
|
||||||
|
|
||||||
// Add explicit instructions for Cursor to return JSON in response
|
// Build the final prompt - for non-Claude/Codex models, include explicit JSON instructions
|
||||||
const cursorPrompt = `${prompt}
|
let finalPrompt = prompt;
|
||||||
|
if (!useStructuredOutput) {
|
||||||
|
finalPrompt = `${prompt}
|
||||||
|
|
||||||
CRITICAL INSTRUCTIONS:
|
CRITICAL INSTRUCTIONS:
|
||||||
1. DO NOT write any files. Return the JSON in your response only.
|
1. DO NOT write any files. Return the JSON in your response only.
|
||||||
2. Respond with ONLY a JSON object - no explanations, no markdown, just raw JSON.
|
2. After analyzing the spec, respond with ONLY a JSON object - no explanations, no markdown, just raw JSON.
|
||||||
3. Your entire response should be valid JSON starting with { and ending with }. No text before or after.`;
|
3. The JSON must have this exact structure:
|
||||||
|
{
|
||||||
|
"features": [
|
||||||
|
{
|
||||||
|
"id": "unique-feature-id",
|
||||||
|
"category": "Category Name",
|
||||||
|
"title": "Short Feature Title",
|
||||||
|
"description": "Detailed description of the feature",
|
||||||
|
"priority": 1,
|
||||||
|
"complexity": "simple|moderate|complex",
|
||||||
|
"dependencies": ["other-feature-id"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
for await (const msg of provider.executeQuery({
|
4. Feature IDs must be unique, lowercase, kebab-case (e.g., "user-authentication", "data-export")
|
||||||
prompt: cursorPrompt,
|
5. Priority ranges from 1 (highest) to 5 (lowest)
|
||||||
model: bareModel,
|
6. Complexity must be one of: "simple", "moderate", "complex"
|
||||||
|
7. Dependencies is an array of feature IDs that must be completed first (can be empty)
|
||||||
|
|
||||||
|
Your entire response should be valid JSON starting with { and ending with }. No text before or after.`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use streamingQuery with event callbacks
|
||||||
|
const result = await streamingQuery({
|
||||||
|
prompt: finalPrompt,
|
||||||
|
model,
|
||||||
cwd: projectPath,
|
cwd: projectPath,
|
||||||
maxTurns: 250,
|
maxTurns: 250,
|
||||||
allowedTools: ['Read', 'Glob', 'Grep'],
|
allowedTools: ['Read', 'Glob', 'Grep'],
|
||||||
abortController,
|
abortController,
|
||||||
|
thinkingLevel,
|
||||||
|
reasoningEffort: effectiveReasoningEffort, // Extended timeout for Codex models
|
||||||
readOnly: true, // Feature generation only reads code, doesn't write
|
readOnly: true, // Feature generation only reads code, doesn't write
|
||||||
})) {
|
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
|
||||||
messageCount++;
|
claudeCompatibleProvider: provider, // Pass provider for alternative endpoint configuration
|
||||||
|
credentials, // Pass credentials for resolving 'credentials' apiKeySource
|
||||||
if (msg.type === 'assistant' && msg.message?.content) {
|
outputFormat: useStructuredOutput
|
||||||
for (const block of msg.message.content) {
|
? {
|
||||||
if (block.type === 'text' && block.text) {
|
type: 'json_schema',
|
||||||
responseText += block.text;
|
schema: featuresOutputSchema,
|
||||||
logger.debug(`Feature text block received (${block.text.length} chars)`);
|
}
|
||||||
|
: undefined,
|
||||||
|
onText: (text) => {
|
||||||
|
logger.debug(`Feature text block received (${text.length} chars)`);
|
||||||
events.emit('spec-regeneration:event', {
|
events.emit('spec-regeneration:event', {
|
||||||
type: 'spec_regeneration_progress',
|
type: 'spec_regeneration_progress',
|
||||||
content: block.text,
|
content: text,
|
||||||
projectPath: projectPath,
|
projectPath: projectPath,
|
||||||
});
|
});
|
||||||
}
|
},
|
||||||
}
|
});
|
||||||
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
|
|
||||||
// Use result if it's a final accumulated message
|
// Get response content - prefer structured output if available
|
||||||
if (msg.result.length > responseText.length) {
|
let contentForParsing: string;
|
||||||
responseText = msg.result;
|
|
||||||
}
|
if (result.structured_output) {
|
||||||
}
|
// Use structured output from Claude/Codex models
|
||||||
}
|
logger.info('✅ Received structured output from model');
|
||||||
|
contentForParsing = JSON.stringify(result.structured_output);
|
||||||
|
logger.debug('Structured output:', contentForParsing);
|
||||||
} else {
|
} else {
|
||||||
// Use Claude SDK for Claude models
|
// Use text response (for non-Claude/Codex models or fallback)
|
||||||
logger.info('[FeatureGeneration] Using Claude SDK');
|
// Pre-extract JSON to handle conversational text that may surround the JSON response
|
||||||
|
// This follows the same pattern used in generate-spec.ts and validate-issue.ts
|
||||||
const options = createFeatureGenerationOptions({
|
const rawText = result.text;
|
||||||
cwd: projectPath,
|
logger.info(`Feature stream complete.`);
|
||||||
abortController,
|
logger.info(`Feature response length: ${rawText.length} chars`);
|
||||||
autoLoadClaudeMd,
|
|
||||||
model,
|
|
||||||
thinkingLevel, // Pass thinking level for extended thinking
|
|
||||||
});
|
|
||||||
|
|
||||||
logger.debug('SDK Options:', JSON.stringify(options, null, 2));
|
|
||||||
logger.info('Calling Claude Agent SDK query() for features...');
|
|
||||||
|
|
||||||
logAuthStatus('Right before SDK query() for features');
|
|
||||||
|
|
||||||
let stream;
|
|
||||||
try {
|
|
||||||
stream = query({ prompt, options });
|
|
||||||
logger.debug('query() returned stream successfully');
|
|
||||||
} catch (queryError) {
|
|
||||||
logger.error('❌ query() threw an exception:');
|
|
||||||
logger.error('Error:', queryError);
|
|
||||||
throw queryError;
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.debug('Starting to iterate over feature stream...');
|
|
||||||
|
|
||||||
try {
|
|
||||||
for await (const msg of stream) {
|
|
||||||
messageCount++;
|
|
||||||
logger.debug(
|
|
||||||
`Feature stream message #${messageCount}:`,
|
|
||||||
JSON.stringify({ type: msg.type, subtype: (msg as any).subtype }, null, 2)
|
|
||||||
);
|
|
||||||
|
|
||||||
if (msg.type === 'assistant' && msg.message.content) {
|
|
||||||
for (const block of msg.message.content) {
|
|
||||||
if (block.type === 'text') {
|
|
||||||
responseText += block.text;
|
|
||||||
logger.debug(`Feature text block received (${block.text.length} chars)`);
|
|
||||||
events.emit('spec-regeneration:event', {
|
|
||||||
type: 'spec_regeneration_progress',
|
|
||||||
content: block.text,
|
|
||||||
projectPath: projectPath,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (msg.type === 'result' && (msg as any).subtype === 'success') {
|
|
||||||
logger.debug('Received success result for features');
|
|
||||||
responseText = (msg as any).result || responseText;
|
|
||||||
} else if ((msg as { type: string }).type === 'error') {
|
|
||||||
logger.error('❌ Received error message from feature stream:');
|
|
||||||
logger.error('Error message:', JSON.stringify(msg, null, 2));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (streamError) {
|
|
||||||
logger.error('❌ Error while iterating feature stream:');
|
|
||||||
logger.error('Stream error:', streamError);
|
|
||||||
throw streamError;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.info(`Feature stream complete. Total messages: ${messageCount}`);
|
|
||||||
logger.info(`Feature response length: ${responseText.length} chars`);
|
|
||||||
logger.info('========== FULL RESPONSE TEXT ==========');
|
logger.info('========== FULL RESPONSE TEXT ==========');
|
||||||
logger.info(responseText);
|
logger.info(rawText);
|
||||||
logger.info('========== END RESPONSE TEXT ==========');
|
logger.info('========== END RESPONSE TEXT ==========');
|
||||||
|
|
||||||
await parseAndCreateFeatures(projectPath, responseText, events);
|
// Pre-extract JSON from response - handles conversational text around the JSON
|
||||||
|
const extracted = extractJsonWithArray<FeaturesExtractionResult>(rawText, 'features', {
|
||||||
|
logger,
|
||||||
|
});
|
||||||
|
if (extracted) {
|
||||||
|
contentForParsing = JSON.stringify(extracted);
|
||||||
|
logger.info('✅ Pre-extracted JSON from text response');
|
||||||
|
} else {
|
||||||
|
// If pre-extraction fails, we know the next step will also fail.
|
||||||
|
// Throw an error here to avoid redundant parsing and make the failure point clearer.
|
||||||
|
logger.error(
|
||||||
|
'❌ Could not extract features JSON from model response. Full response text was:\n' +
|
||||||
|
rawText
|
||||||
|
);
|
||||||
|
const errorMessage =
|
||||||
|
'Failed to parse features from model response: No valid JSON with a "features" array found.';
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_regeneration_error',
|
||||||
|
error: errorMessage,
|
||||||
|
projectPath: projectPath,
|
||||||
|
});
|
||||||
|
throw new Error(errorMessage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
await parseAndCreateFeatures(projectPath, contentForParsing, events);
|
||||||
|
|
||||||
logger.debug('========== generateFeaturesFromSpec() completed ==========');
|
logger.debug('========== generateFeaturesFromSpec() completed ==========');
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,27 +5,22 @@
|
|||||||
* (defaults to Opus for high-quality specification generation).
|
* (defaults to Opus for high-quality specification generation).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
|
||||||
import path from 'path';
|
|
||||||
import * as secureFs from '../../lib/secure-fs.js';
|
import * as secureFs from '../../lib/secure-fs.js';
|
||||||
import type { EventEmitter } from '../../lib/events.js';
|
import type { EventEmitter } from '../../lib/events.js';
|
||||||
import {
|
import { specOutputSchema, specToXml, type SpecOutput } from '../../lib/app-spec-format.js';
|
||||||
specOutputSchema,
|
|
||||||
specToXml,
|
|
||||||
getStructuredSpecPromptInstruction,
|
|
||||||
type SpecOutput,
|
|
||||||
} from '../../lib/app-spec-format.js';
|
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
|
import { DEFAULT_PHASE_MODELS, supportsStructuredOutput } from '@automaker/types';
|
||||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||||
import { createSpecGenerationOptions } from '../../lib/sdk-options.js';
|
|
||||||
import { extractJson } from '../../lib/json-extractor.js';
|
import { extractJson } from '../../lib/json-extractor.js';
|
||||||
import { ProviderFactory } from '../../providers/provider-factory.js';
|
import { streamingQuery } from '../../providers/simple-query-service.js';
|
||||||
import { logAuthStatus } from './common.js';
|
|
||||||
import { generateFeaturesFromSpec } from './generate-features-from-spec.js';
|
import { generateFeaturesFromSpec } from './generate-features-from-spec.js';
|
||||||
import { ensureAutomakerDir, getAppSpecPath } from '@automaker/platform';
|
import { ensureAutomakerDir, getAppSpecPath } from '@automaker/platform';
|
||||||
import type { SettingsService } from '../../services/settings-service.js';
|
import type { SettingsService } from '../../services/settings-service.js';
|
||||||
import { getAutoLoadClaudeMdSetting } from '../../lib/settings-helpers.js';
|
import {
|
||||||
|
getAutoLoadClaudeMdSetting,
|
||||||
|
getPromptCustomization,
|
||||||
|
getPhaseModelWithOverrides,
|
||||||
|
} from '../../lib/settings-helpers.js';
|
||||||
|
|
||||||
const logger = createLogger('SpecRegeneration');
|
const logger = createLogger('SpecRegeneration');
|
||||||
|
|
||||||
@@ -47,6 +42,9 @@ export async function generateSpec(
|
|||||||
logger.info('analyzeProject:', analyzeProject);
|
logger.info('analyzeProject:', analyzeProject);
|
||||||
logger.info('maxFeatures:', maxFeatures);
|
logger.info('maxFeatures:', maxFeatures);
|
||||||
|
|
||||||
|
// Get customized prompts from settings
|
||||||
|
const prompts = await getPromptCustomization(settingsService, '[SpecRegeneration]');
|
||||||
|
|
||||||
// Build the prompt based on whether we should analyze the project
|
// Build the prompt based on whether we should analyze the project
|
||||||
let analysisInstructions = '';
|
let analysisInstructions = '';
|
||||||
let techStackDefaults = '';
|
let techStackDefaults = '';
|
||||||
@@ -70,9 +68,7 @@ export async function generateSpec(
|
|||||||
Use these technologies as the foundation for the specification.`;
|
Use these technologies as the foundation for the specification.`;
|
||||||
}
|
}
|
||||||
|
|
||||||
const prompt = `You are helping to define a software project specification.
|
const prompt = `${prompts.appSpec.generateSpecSystemPrompt}
|
||||||
|
|
||||||
IMPORTANT: Never ask for clarification or additional information. Use the information provided and make reasonable assumptions to create the best possible specification. If details are missing, infer them based on common patterns and best practices.
|
|
||||||
|
|
||||||
Project Overview:
|
Project Overview:
|
||||||
${projectOverview}
|
${projectOverview}
|
||||||
@@ -81,7 +77,7 @@ ${techStackDefaults}
|
|||||||
|
|
||||||
${analysisInstructions}
|
${analysisInstructions}
|
||||||
|
|
||||||
${getStructuredSpecPromptInstruction()}`;
|
${prompts.appSpec.structuredSpecInstructions}`;
|
||||||
|
|
||||||
logger.info('========== PROMPT BEING SENT ==========');
|
logger.info('========== PROMPT BEING SENT ==========');
|
||||||
logger.info(`Prompt length: ${prompt.length} chars`);
|
logger.info(`Prompt length: ${prompt.length} chars`);
|
||||||
@@ -100,30 +96,40 @@ ${getStructuredSpecPromptInstruction()}`;
|
|||||||
'[SpecRegeneration]'
|
'[SpecRegeneration]'
|
||||||
);
|
);
|
||||||
|
|
||||||
// Get model from phase settings
|
// Get model from phase settings with provider info
|
||||||
const settings = await settingsService?.getGlobalSettings();
|
const {
|
||||||
const phaseModelEntry =
|
phaseModel: phaseModelEntry,
|
||||||
settings?.phaseModels?.specGenerationModel || DEFAULT_PHASE_MODELS.specGenerationModel;
|
provider,
|
||||||
|
credentials,
|
||||||
|
} = settingsService
|
||||||
|
? await getPhaseModelWithOverrides(
|
||||||
|
'specGenerationModel',
|
||||||
|
settingsService,
|
||||||
|
projectPath,
|
||||||
|
'[SpecRegeneration]'
|
||||||
|
)
|
||||||
|
: {
|
||||||
|
phaseModel: DEFAULT_PHASE_MODELS.specGenerationModel,
|
||||||
|
provider: undefined,
|
||||||
|
credentials: undefined,
|
||||||
|
};
|
||||||
const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
|
const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
|
||||||
|
|
||||||
logger.info('Using model:', model);
|
logger.info('Using model:', model, provider ? `via provider: ${provider.name}` : 'direct API');
|
||||||
|
|
||||||
let responseText = '';
|
let responseText = '';
|
||||||
let messageCount = 0;
|
|
||||||
let structuredOutput: SpecOutput | null = null;
|
let structuredOutput: SpecOutput | null = null;
|
||||||
|
|
||||||
// Route to appropriate provider based on model type
|
// Determine if we should use structured output based on model type
|
||||||
if (isCursorModel(model)) {
|
const useStructuredOutput = supportsStructuredOutput(model);
|
||||||
// Use Cursor provider for Cursor models
|
logger.info(
|
||||||
logger.info('[SpecGeneration] Using Cursor provider');
|
`Structured output mode: ${useStructuredOutput ? 'enabled (Claude/Codex)' : 'disabled (using JSON instructions)'}`
|
||||||
|
);
|
||||||
|
|
||||||
const provider = ProviderFactory.getProviderForModel(model);
|
// Build the final prompt - for non-Claude/Codex models, include JSON schema instructions
|
||||||
// Strip provider prefix - providers expect bare model IDs
|
let finalPrompt = prompt;
|
||||||
const bareModel = stripProviderPrefix(model);
|
if (!useStructuredOutput) {
|
||||||
|
finalPrompt = `${prompt}
|
||||||
// For Cursor, include the JSON schema in the prompt with clear instructions
|
|
||||||
// to return JSON in the response (not write to a file)
|
|
||||||
const cursorPrompt = `${prompt}
|
|
||||||
|
|
||||||
CRITICAL INSTRUCTIONS:
|
CRITICAL INSTRUCTIONS:
|
||||||
1. DO NOT write any files. DO NOT create any files like "project_specification.json".
|
1. DO NOT write any files. DO NOT create any files like "project_specification.json".
|
||||||
@@ -133,153 +139,59 @@ CRITICAL INSTRUCTIONS:
|
|||||||
${JSON.stringify(specOutputSchema, null, 2)}
|
${JSON.stringify(specOutputSchema, null, 2)}
|
||||||
|
|
||||||
Your entire response should be valid JSON starting with { and ending with }. No text before or after.`;
|
Your entire response should be valid JSON starting with { and ending with }. No text before or after.`;
|
||||||
|
}
|
||||||
|
|
||||||
for await (const msg of provider.executeQuery({
|
// Use streamingQuery with event callbacks
|
||||||
prompt: cursorPrompt,
|
const result = await streamingQuery({
|
||||||
model: bareModel,
|
prompt: finalPrompt,
|
||||||
|
model,
|
||||||
cwd: projectPath,
|
cwd: projectPath,
|
||||||
maxTurns: 250,
|
maxTurns: 250,
|
||||||
allowedTools: ['Read', 'Glob', 'Grep'],
|
allowedTools: ['Read', 'Glob', 'Grep'],
|
||||||
abortController,
|
abortController,
|
||||||
|
thinkingLevel,
|
||||||
readOnly: true, // Spec generation only reads code, we write the spec ourselves
|
readOnly: true, // Spec generation only reads code, we write the spec ourselves
|
||||||
})) {
|
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
|
||||||
messageCount++;
|
claudeCompatibleProvider: provider, // Pass provider for alternative endpoint configuration
|
||||||
|
credentials, // Pass credentials for resolving 'credentials' apiKeySource
|
||||||
if (msg.type === 'assistant' && msg.message?.content) {
|
outputFormat: useStructuredOutput
|
||||||
for (const block of msg.message.content) {
|
? {
|
||||||
if (block.type === 'text' && block.text) {
|
type: 'json_schema',
|
||||||
responseText += block.text;
|
schema: specOutputSchema,
|
||||||
|
}
|
||||||
|
: undefined,
|
||||||
|
onText: (text) => {
|
||||||
|
responseText += text;
|
||||||
logger.info(
|
logger.info(
|
||||||
`Text block received (${block.text.length} chars), total now: ${responseText.length} chars`
|
`Text block received (${text.length} chars), total now: ${responseText.length} chars`
|
||||||
);
|
);
|
||||||
events.emit('spec-regeneration:event', {
|
events.emit('spec-regeneration:event', {
|
||||||
type: 'spec_regeneration_progress',
|
type: 'spec_regeneration_progress',
|
||||||
content: block.text,
|
content: text,
|
||||||
projectPath: projectPath,
|
projectPath: projectPath,
|
||||||
});
|
});
|
||||||
} else if (block.type === 'tool_use') {
|
},
|
||||||
logger.info('Tool use:', block.name);
|
onToolUse: (tool, input) => {
|
||||||
|
logger.info('Tool use:', tool);
|
||||||
events.emit('spec-regeneration:event', {
|
events.emit('spec-regeneration:event', {
|
||||||
type: 'spec_tool',
|
type: 'spec_tool',
|
||||||
tool: block.name,
|
tool,
|
||||||
input: block.input,
|
input,
|
||||||
});
|
});
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
|
|
||||||
// Use result if it's a final accumulated message
|
|
||||||
if (msg.result.length > responseText.length) {
|
|
||||||
responseText = msg.result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse JSON from the response text using shared utility
|
|
||||||
if (responseText) {
|
|
||||||
structuredOutput = extractJson<SpecOutput>(responseText, { logger });
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Use Claude SDK for Claude models
|
|
||||||
logger.info('[SpecGeneration] Using Claude SDK');
|
|
||||||
|
|
||||||
const options = createSpecGenerationOptions({
|
|
||||||
cwd: projectPath,
|
|
||||||
abortController,
|
|
||||||
autoLoadClaudeMd,
|
|
||||||
model,
|
|
||||||
thinkingLevel, // Pass thinking level for extended thinking
|
|
||||||
outputFormat: {
|
|
||||||
type: 'json_schema',
|
|
||||||
schema: specOutputSchema,
|
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
logger.debug('SDK Options:', JSON.stringify(options, null, 2));
|
// Get structured output if available
|
||||||
logger.info('Calling Claude Agent SDK query()...');
|
if (result.structured_output) {
|
||||||
|
structuredOutput = result.structured_output as unknown as SpecOutput;
|
||||||
// Log auth status right before the SDK call
|
|
||||||
logAuthStatus('Right before SDK query()');
|
|
||||||
|
|
||||||
let stream;
|
|
||||||
try {
|
|
||||||
stream = query({ prompt, options });
|
|
||||||
logger.debug('query() returned stream successfully');
|
|
||||||
} catch (queryError) {
|
|
||||||
logger.error('❌ query() threw an exception:');
|
|
||||||
logger.error('Error:', queryError);
|
|
||||||
throw queryError;
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.info('Starting to iterate over stream...');
|
|
||||||
|
|
||||||
try {
|
|
||||||
for await (const msg of stream) {
|
|
||||||
messageCount++;
|
|
||||||
logger.info(
|
|
||||||
`Stream message #${messageCount}: type=${msg.type}, subtype=${(msg as any).subtype}`
|
|
||||||
);
|
|
||||||
|
|
||||||
if (msg.type === 'assistant') {
|
|
||||||
const msgAny = msg as any;
|
|
||||||
if (msgAny.message?.content) {
|
|
||||||
for (const block of msgAny.message.content) {
|
|
||||||
if (block.type === 'text') {
|
|
||||||
responseText += block.text;
|
|
||||||
logger.info(
|
|
||||||
`Text block received (${block.text.length} chars), total now: ${responseText.length} chars`
|
|
||||||
);
|
|
||||||
events.emit('spec-regeneration:event', {
|
|
||||||
type: 'spec_regeneration_progress',
|
|
||||||
content: block.text,
|
|
||||||
projectPath: projectPath,
|
|
||||||
});
|
|
||||||
} else if (block.type === 'tool_use') {
|
|
||||||
logger.info('Tool use:', block.name);
|
|
||||||
events.emit('spec-regeneration:event', {
|
|
||||||
type: 'spec_tool',
|
|
||||||
tool: block.name,
|
|
||||||
input: block.input,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (msg.type === 'result' && (msg as any).subtype === 'success') {
|
|
||||||
logger.info('Received success result');
|
|
||||||
// Check for structured output - this is the reliable way to get spec data
|
|
||||||
const resultMsg = msg as any;
|
|
||||||
if (resultMsg.structured_output) {
|
|
||||||
structuredOutput = resultMsg.structured_output as SpecOutput;
|
|
||||||
logger.info('✅ Received structured output');
|
logger.info('✅ Received structured output');
|
||||||
logger.debug('Structured output:', JSON.stringify(structuredOutput, null, 2));
|
logger.debug('Structured output:', JSON.stringify(structuredOutput, null, 2));
|
||||||
} else {
|
} else if (!useStructuredOutput && responseText) {
|
||||||
logger.warn('⚠️ No structured output in result, will fall back to text parsing');
|
// For non-Claude providers, parse JSON from response text
|
||||||
}
|
structuredOutput = extractJson<SpecOutput>(responseText, { logger });
|
||||||
} else if (msg.type === 'result') {
|
|
||||||
// Handle error result types
|
|
||||||
const subtype = (msg as any).subtype;
|
|
||||||
logger.info(`Result message: subtype=${subtype}`);
|
|
||||||
if (subtype === 'error_max_turns') {
|
|
||||||
logger.error('❌ Hit max turns limit!');
|
|
||||||
} else if (subtype === 'error_max_structured_output_retries') {
|
|
||||||
logger.error('❌ Failed to produce valid structured output after retries');
|
|
||||||
throw new Error('Could not produce valid spec output');
|
|
||||||
}
|
|
||||||
} else if ((msg as { type: string }).type === 'error') {
|
|
||||||
logger.error('❌ Received error message from stream:');
|
|
||||||
logger.error('Error message:', JSON.stringify(msg, null, 2));
|
|
||||||
} else if (msg.type === 'user') {
|
|
||||||
// Log user messages (tool results)
|
|
||||||
logger.info(`User message (tool result): ${JSON.stringify(msg).substring(0, 500)}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (streamError) {
|
|
||||||
logger.error('❌ Error while iterating stream:');
|
|
||||||
logger.error('Stream error:', streamError);
|
|
||||||
throw streamError;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info(`Stream iteration complete. Total messages: ${messageCount}`);
|
logger.info(`Stream iteration complete.`);
|
||||||
logger.info(`Response text length: ${responseText.length} chars`);
|
logger.info(`Response text length: ${responseText.length} chars`);
|
||||||
|
|
||||||
// Determine XML content to save
|
// Determine XML content to save
|
||||||
@@ -311,21 +223,35 @@ Your entire response should be valid JSON starting with { and ending with }. No
|
|||||||
xmlContent = responseText.substring(xmlStart, xmlEnd + '</project_specification>'.length);
|
xmlContent = responseText.substring(xmlStart, xmlEnd + '</project_specification>'.length);
|
||||||
logger.info(`Extracted XML content: ${xmlContent.length} chars (from position ${xmlStart})`);
|
logger.info(`Extracted XML content: ${xmlContent.length} chars (from position ${xmlStart})`);
|
||||||
} else {
|
} else {
|
||||||
// No valid XML structure found in the response text
|
// No XML found, try JSON extraction
|
||||||
// This happens when structured output was expected but not received, and the agent
|
logger.warn('⚠️ No XML tags found, attempting JSON extraction...');
|
||||||
// output conversational text instead of XML (e.g., "The project directory appears to be empty...")
|
const extractedJson = extractJson<SpecOutput>(responseText, { logger });
|
||||||
// We should NOT save this conversational text as it's not a valid spec
|
|
||||||
logger.error('❌ Response does not contain valid <project_specification> XML structure');
|
if (
|
||||||
|
extractedJson &&
|
||||||
|
typeof extractedJson.project_name === 'string' &&
|
||||||
|
typeof extractedJson.overview === 'string' &&
|
||||||
|
Array.isArray(extractedJson.technology_stack) &&
|
||||||
|
Array.isArray(extractedJson.core_capabilities) &&
|
||||||
|
Array.isArray(extractedJson.implemented_features)
|
||||||
|
) {
|
||||||
|
logger.info('✅ Successfully extracted JSON from response text');
|
||||||
|
xmlContent = specToXml(extractedJson);
|
||||||
|
logger.info(`✅ Converted extracted JSON to XML: ${xmlContent.length} chars`);
|
||||||
|
} else {
|
||||||
|
// Neither XML nor valid JSON found
|
||||||
|
logger.error('❌ Response does not contain valid XML or JSON structure');
|
||||||
logger.error(
|
logger.error(
|
||||||
'This typically happens when structured output failed and the agent produced conversational text instead of XML'
|
'This typically happens when structured output failed and the agent produced conversational text instead of structured output'
|
||||||
);
|
);
|
||||||
throw new Error(
|
throw new Error(
|
||||||
'Failed to generate spec: No valid XML structure found in response. ' +
|
'Failed to generate spec: No valid XML or JSON structure found in response. ' +
|
||||||
'The response contained conversational text but no <project_specification> tags. ' +
|
'The response contained conversational text but no <project_specification> tags or valid JSON. ' +
|
||||||
'Please try again.'
|
'Please try again.'
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Save spec to .automaker directory
|
// Save spec to .automaker directory
|
||||||
await ensureAutomakerDir(projectPath);
|
await ensureAutomakerDir(projectPath);
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import type { EventEmitter } from '../../lib/events.js';
|
|||||||
import { createCreateHandler } from './routes/create.js';
|
import { createCreateHandler } from './routes/create.js';
|
||||||
import { createGenerateHandler } from './routes/generate.js';
|
import { createGenerateHandler } from './routes/generate.js';
|
||||||
import { createGenerateFeaturesHandler } from './routes/generate-features.js';
|
import { createGenerateFeaturesHandler } from './routes/generate-features.js';
|
||||||
|
import { createSyncHandler } from './routes/sync.js';
|
||||||
import { createStopHandler } from './routes/stop.js';
|
import { createStopHandler } from './routes/stop.js';
|
||||||
import { createStatusHandler } from './routes/status.js';
|
import { createStatusHandler } from './routes/status.js';
|
||||||
import type { SettingsService } from '../../services/settings-service.js';
|
import type { SettingsService } from '../../services/settings-service.js';
|
||||||
@@ -20,6 +21,7 @@ export function createSpecRegenerationRoutes(
|
|||||||
router.post('/create', createCreateHandler(events));
|
router.post('/create', createCreateHandler(events));
|
||||||
router.post('/generate', createGenerateHandler(events, settingsService));
|
router.post('/generate', createGenerateHandler(events, settingsService));
|
||||||
router.post('/generate-features', createGenerateFeaturesHandler(events, settingsService));
|
router.post('/generate-features', createGenerateFeaturesHandler(events, settingsService));
|
||||||
|
router.post('/sync', createSyncHandler(events, settingsService));
|
||||||
router.post('/stop', createStopHandler());
|
router.post('/stop', createStopHandler());
|
||||||
router.get('/status', createStatusHandler());
|
router.get('/status', createStatusHandler());
|
||||||
|
|
||||||
|
|||||||
@@ -5,9 +5,10 @@
|
|||||||
import path from 'path';
|
import path from 'path';
|
||||||
import * as secureFs from '../../lib/secure-fs.js';
|
import * as secureFs from '../../lib/secure-fs.js';
|
||||||
import type { EventEmitter } from '../../lib/events.js';
|
import type { EventEmitter } from '../../lib/events.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger, atomicWriteJson, DEFAULT_BACKUP_COUNT } from '@automaker/utils';
|
||||||
import { getFeaturesDir } from '@automaker/platform';
|
import { getFeaturesDir } from '@automaker/platform';
|
||||||
import { extractJsonWithArray } from '../../lib/json-extractor.js';
|
import { extractJsonWithArray } from '../../lib/json-extractor.js';
|
||||||
|
import { getNotificationService } from '../../services/notification-service.js';
|
||||||
|
|
||||||
const logger = createLogger('SpecRegeneration');
|
const logger = createLogger('SpecRegeneration');
|
||||||
|
|
||||||
@@ -73,10 +74,10 @@ export async function parseAndCreateFeatures(
|
|||||||
updatedAt: new Date().toISOString(),
|
updatedAt: new Date().toISOString(),
|
||||||
};
|
};
|
||||||
|
|
||||||
await secureFs.writeFile(
|
// Use atomic write with backup support for crash protection
|
||||||
path.join(featureDir, 'feature.json'),
|
await atomicWriteJson(path.join(featureDir, 'feature.json'), featureData, {
|
||||||
JSON.stringify(featureData, null, 2)
|
backupCount: DEFAULT_BACKUP_COUNT,
|
||||||
);
|
});
|
||||||
|
|
||||||
createdFeatures.push({ id: feature.id, title: feature.title });
|
createdFeatures.push({ id: feature.id, title: feature.title });
|
||||||
}
|
}
|
||||||
@@ -88,6 +89,15 @@ export async function parseAndCreateFeatures(
|
|||||||
message: `Spec regeneration complete! Created ${createdFeatures.length} features.`,
|
message: `Spec regeneration complete! Created ${createdFeatures.length} features.`,
|
||||||
projectPath: projectPath,
|
projectPath: projectPath,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Create notification for spec generation completion
|
||||||
|
const notificationService = getNotificationService();
|
||||||
|
await notificationService.createNotification({
|
||||||
|
type: 'spec_regeneration_complete',
|
||||||
|
title: 'Spec Generation Complete',
|
||||||
|
message: `Created ${createdFeatures.length} features from the project specification.`,
|
||||||
|
projectPath: projectPath,
|
||||||
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('❌ parseAndCreateFeatures() failed:');
|
logger.error('❌ parseAndCreateFeatures() failed:');
|
||||||
logger.error('Error:', error);
|
logger.error('Error:', error);
|
||||||
|
|||||||
@@ -47,17 +47,17 @@ export function createCreateHandler(events: EventEmitter) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const { isRunning } = getSpecRegenerationStatus();
|
const { isRunning } = getSpecRegenerationStatus(projectPath);
|
||||||
if (isRunning) {
|
if (isRunning) {
|
||||||
logger.warn('Generation already running, rejecting request');
|
logger.warn('Generation already running for project:', projectPath);
|
||||||
res.json({ success: false, error: 'Spec generation already running' });
|
res.json({ success: false, error: 'Spec generation already running for this project' });
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
logAuthStatus('Before starting generation');
|
logAuthStatus('Before starting generation');
|
||||||
|
|
||||||
const abortController = new AbortController();
|
const abortController = new AbortController();
|
||||||
setRunningState(true, abortController);
|
setRunningState(projectPath, true, abortController);
|
||||||
logger.info('Starting background generation task...');
|
logger.info('Starting background generation task...');
|
||||||
|
|
||||||
// Start generation in background
|
// Start generation in background
|
||||||
@@ -80,7 +80,7 @@ export function createCreateHandler(events: EventEmitter) {
|
|||||||
})
|
})
|
||||||
.finally(() => {
|
.finally(() => {
|
||||||
logger.info('Generation task finished (success or error)');
|
logger.info('Generation task finished (success or error)');
|
||||||
setRunningState(false, null);
|
setRunningState(projectPath, false, null);
|
||||||
});
|
});
|
||||||
|
|
||||||
logger.info('Returning success response (generation running in background)');
|
logger.info('Returning success response (generation running in background)');
|
||||||
|
|||||||
@@ -40,17 +40,17 @@ export function createGenerateFeaturesHandler(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const { isRunning } = getSpecRegenerationStatus();
|
const { isRunning } = getSpecRegenerationStatus(projectPath);
|
||||||
if (isRunning) {
|
if (isRunning) {
|
||||||
logger.warn('Generation already running, rejecting request');
|
logger.warn('Generation already running for project:', projectPath);
|
||||||
res.json({ success: false, error: 'Generation already running' });
|
res.json({ success: false, error: 'Generation already running for this project' });
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
logAuthStatus('Before starting feature generation');
|
logAuthStatus('Before starting feature generation');
|
||||||
|
|
||||||
const abortController = new AbortController();
|
const abortController = new AbortController();
|
||||||
setRunningState(true, abortController);
|
setRunningState(projectPath, true, abortController, 'feature_generation');
|
||||||
logger.info('Starting background feature generation task...');
|
logger.info('Starting background feature generation task...');
|
||||||
|
|
||||||
generateFeaturesFromSpec(projectPath, events, abortController, maxFeatures, settingsService)
|
generateFeaturesFromSpec(projectPath, events, abortController, maxFeatures, settingsService)
|
||||||
@@ -63,7 +63,7 @@ export function createGenerateFeaturesHandler(
|
|||||||
})
|
})
|
||||||
.finally(() => {
|
.finally(() => {
|
||||||
logger.info('Feature generation task finished (success or error)');
|
logger.info('Feature generation task finished (success or error)');
|
||||||
setRunningState(false, null);
|
setRunningState(projectPath, false, null);
|
||||||
});
|
});
|
||||||
|
|
||||||
logger.info('Returning success response (generation running in background)');
|
logger.info('Returning success response (generation running in background)');
|
||||||
|
|||||||
@@ -48,17 +48,17 @@ export function createGenerateHandler(events: EventEmitter, settingsService?: Se
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const { isRunning } = getSpecRegenerationStatus();
|
const { isRunning } = getSpecRegenerationStatus(projectPath);
|
||||||
if (isRunning) {
|
if (isRunning) {
|
||||||
logger.warn('Generation already running, rejecting request');
|
logger.warn('Generation already running for project:', projectPath);
|
||||||
res.json({ success: false, error: 'Spec generation already running' });
|
res.json({ success: false, error: 'Spec generation already running for this project' });
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
logAuthStatus('Before starting generation');
|
logAuthStatus('Before starting generation');
|
||||||
|
|
||||||
const abortController = new AbortController();
|
const abortController = new AbortController();
|
||||||
setRunningState(true, abortController);
|
setRunningState(projectPath, true, abortController);
|
||||||
logger.info('Starting background generation task...');
|
logger.info('Starting background generation task...');
|
||||||
|
|
||||||
generateSpec(
|
generateSpec(
|
||||||
@@ -81,7 +81,7 @@ export function createGenerateHandler(events: EventEmitter, settingsService?: Se
|
|||||||
})
|
})
|
||||||
.finally(() => {
|
.finally(() => {
|
||||||
logger.info('Generation task finished (success or error)');
|
logger.info('Generation task finished (success or error)');
|
||||||
setRunningState(false, null);
|
setRunningState(projectPath, false, null);
|
||||||
});
|
});
|
||||||
|
|
||||||
logger.info('Returning success response (generation running in background)');
|
logger.info('Returning success response (generation running in background)');
|
||||||
|
|||||||
@@ -6,10 +6,11 @@ import type { Request, Response } from 'express';
|
|||||||
import { getSpecRegenerationStatus, getErrorMessage } from '../common.js';
|
import { getSpecRegenerationStatus, getErrorMessage } from '../common.js';
|
||||||
|
|
||||||
export function createStatusHandler() {
|
export function createStatusHandler() {
|
||||||
return async (_req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { isRunning } = getSpecRegenerationStatus();
|
const projectPath = req.query.projectPath as string | undefined;
|
||||||
res.json({ success: true, isRunning });
|
const { isRunning } = getSpecRegenerationStatus(projectPath);
|
||||||
|
res.json({ success: true, isRunning, projectPath });
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,13 +6,16 @@ import type { Request, Response } from 'express';
|
|||||||
import { getSpecRegenerationStatus, setRunningState, getErrorMessage } from '../common.js';
|
import { getSpecRegenerationStatus, setRunningState, getErrorMessage } from '../common.js';
|
||||||
|
|
||||||
export function createStopHandler() {
|
export function createStopHandler() {
|
||||||
return async (_req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { currentAbortController } = getSpecRegenerationStatus();
|
const { projectPath } = req.body as { projectPath?: string };
|
||||||
|
const { currentAbortController } = getSpecRegenerationStatus(projectPath);
|
||||||
if (currentAbortController) {
|
if (currentAbortController) {
|
||||||
currentAbortController.abort();
|
currentAbortController.abort();
|
||||||
}
|
}
|
||||||
setRunningState(false, null);
|
if (projectPath) {
|
||||||
|
setRunningState(projectPath, false, null);
|
||||||
|
}
|
||||||
res.json({ success: true });
|
res.json({ success: true });
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
|||||||
76
apps/server/src/routes/app-spec/routes/sync.ts
Normal file
76
apps/server/src/routes/app-spec/routes/sync.ts
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
/**
|
||||||
|
* POST /sync endpoint - Sync spec with codebase and features
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import type { EventEmitter } from '../../../lib/events.js';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import {
|
||||||
|
getSpecRegenerationStatus,
|
||||||
|
setRunningState,
|
||||||
|
logAuthStatus,
|
||||||
|
logError,
|
||||||
|
getErrorMessage,
|
||||||
|
} from '../common.js';
|
||||||
|
import { syncSpec } from '../sync-spec.js';
|
||||||
|
import type { SettingsService } from '../../../services/settings-service.js';
|
||||||
|
|
||||||
|
const logger = createLogger('SpecSync');
|
||||||
|
|
||||||
|
export function createSyncHandler(events: EventEmitter, settingsService?: SettingsService) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
logger.info('========== /sync endpoint called ==========');
|
||||||
|
logger.debug('Request body:', JSON.stringify(req.body, null, 2));
|
||||||
|
|
||||||
|
try {
|
||||||
|
const { projectPath } = req.body as {
|
||||||
|
projectPath: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
logger.debug('projectPath:', projectPath);
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
logger.error('Missing projectPath parameter');
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const { isRunning } = getSpecRegenerationStatus(projectPath);
|
||||||
|
if (isRunning) {
|
||||||
|
logger.warn('Generation/sync already running for project:', projectPath);
|
||||||
|
res.json({ success: false, error: 'Operation already running for this project' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logAuthStatus('Before starting spec sync');
|
||||||
|
|
||||||
|
const abortController = new AbortController();
|
||||||
|
setRunningState(projectPath, true, abortController, 'sync');
|
||||||
|
logger.info('Starting background spec sync task...');
|
||||||
|
|
||||||
|
syncSpec(projectPath, events, abortController, settingsService)
|
||||||
|
.then((result) => {
|
||||||
|
logger.info('Spec sync completed successfully');
|
||||||
|
logger.info('Result:', JSON.stringify(result, null, 2));
|
||||||
|
})
|
||||||
|
.catch((error) => {
|
||||||
|
logError(error, 'Spec sync failed with error');
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_regeneration_error',
|
||||||
|
error: getErrorMessage(error),
|
||||||
|
projectPath,
|
||||||
|
});
|
||||||
|
})
|
||||||
|
.finally(() => {
|
||||||
|
logger.info('Spec sync task finished (success or error)');
|
||||||
|
setRunningState(projectPath, false, null);
|
||||||
|
});
|
||||||
|
|
||||||
|
logger.info('Returning success response (sync running in background)');
|
||||||
|
res.json({ success: true });
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Sync route handler failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
389
apps/server/src/routes/app-spec/sync-spec.ts
Normal file
389
apps/server/src/routes/app-spec/sync-spec.ts
Normal file
@@ -0,0 +1,389 @@
|
|||||||
|
/**
|
||||||
|
* Sync spec with current codebase and feature state
|
||||||
|
*
|
||||||
|
* Updates the spec file based on:
|
||||||
|
* - Completed Automaker features
|
||||||
|
* - Code analysis for tech stack and implementations
|
||||||
|
* - Roadmap phase status updates
|
||||||
|
*/
|
||||||
|
|
||||||
|
import * as secureFs from '../../lib/secure-fs.js';
|
||||||
|
import type { EventEmitter } from '../../lib/events.js';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import { DEFAULT_PHASE_MODELS, supportsStructuredOutput } from '@automaker/types';
|
||||||
|
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||||
|
import { streamingQuery } from '../../providers/simple-query-service.js';
|
||||||
|
import { extractJson } from '../../lib/json-extractor.js';
|
||||||
|
import { getAppSpecPath } from '@automaker/platform';
|
||||||
|
import type { SettingsService } from '../../services/settings-service.js';
|
||||||
|
import {
|
||||||
|
getAutoLoadClaudeMdSetting,
|
||||||
|
getPhaseModelWithOverrides,
|
||||||
|
} from '../../lib/settings-helpers.js';
|
||||||
|
import { FeatureLoader } from '../../services/feature-loader.js';
|
||||||
|
import {
|
||||||
|
extractImplementedFeatures,
|
||||||
|
extractTechnologyStack,
|
||||||
|
extractRoadmapPhases,
|
||||||
|
updateImplementedFeaturesSection,
|
||||||
|
updateTechnologyStack,
|
||||||
|
updateRoadmapPhaseStatus,
|
||||||
|
type ImplementedFeature,
|
||||||
|
} from '../../lib/xml-extractor.js';
|
||||||
|
import { getNotificationService } from '../../services/notification-service.js';
|
||||||
|
|
||||||
|
const logger = createLogger('SpecSync');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Type for extracted tech stack JSON response
|
||||||
|
*/
|
||||||
|
interface TechStackExtractionResult {
|
||||||
|
technologies: string[];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* JSON schema for tech stack analysis output (Claude/Codex structured output)
|
||||||
|
*/
|
||||||
|
const techStackOutputSchema = {
|
||||||
|
type: 'object',
|
||||||
|
properties: {
|
||||||
|
technologies: {
|
||||||
|
type: 'array',
|
||||||
|
items: { type: 'string' },
|
||||||
|
description: 'List of technologies detected in the project',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
required: ['technologies'],
|
||||||
|
} as const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Result of a sync operation
|
||||||
|
*/
|
||||||
|
export interface SyncResult {
|
||||||
|
techStackUpdates: {
|
||||||
|
added: string[];
|
||||||
|
removed: string[];
|
||||||
|
};
|
||||||
|
implementedFeaturesUpdates: {
|
||||||
|
addedFromFeatures: string[];
|
||||||
|
removed: string[];
|
||||||
|
};
|
||||||
|
roadmapUpdates: Array<{ phaseName: string; newStatus: string }>;
|
||||||
|
summary: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sync the spec with current codebase and feature state
|
||||||
|
*/
|
||||||
|
export async function syncSpec(
|
||||||
|
projectPath: string,
|
||||||
|
events: EventEmitter,
|
||||||
|
abortController: AbortController,
|
||||||
|
settingsService?: SettingsService
|
||||||
|
): Promise<SyncResult> {
|
||||||
|
logger.info('========== syncSpec() started ==========');
|
||||||
|
logger.info('projectPath:', projectPath);
|
||||||
|
|
||||||
|
const result: SyncResult = {
|
||||||
|
techStackUpdates: { added: [], removed: [] },
|
||||||
|
implementedFeaturesUpdates: { addedFromFeatures: [], removed: [] },
|
||||||
|
roadmapUpdates: [],
|
||||||
|
summary: '',
|
||||||
|
};
|
||||||
|
|
||||||
|
// Read existing spec
|
||||||
|
const specPath = getAppSpecPath(projectPath);
|
||||||
|
let specContent: string;
|
||||||
|
|
||||||
|
try {
|
||||||
|
specContent = (await secureFs.readFile(specPath, 'utf-8')) as string;
|
||||||
|
logger.info(`Spec loaded successfully (${specContent.length} chars)`);
|
||||||
|
} catch (readError) {
|
||||||
|
logger.error('Failed to read spec file:', readError);
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_regeneration_error',
|
||||||
|
error: 'No project spec found. Create or regenerate spec first.',
|
||||||
|
projectPath,
|
||||||
|
});
|
||||||
|
throw new Error('No project spec found');
|
||||||
|
}
|
||||||
|
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_regeneration_progress',
|
||||||
|
content: '[Phase: sync] Starting spec sync...\n',
|
||||||
|
projectPath,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Extract current state from spec
|
||||||
|
const currentImplementedFeatures = extractImplementedFeatures(specContent);
|
||||||
|
const currentTechStack = extractTechnologyStack(specContent);
|
||||||
|
const currentRoadmapPhases = extractRoadmapPhases(specContent);
|
||||||
|
|
||||||
|
logger.info(`Current spec has ${currentImplementedFeatures.length} implemented features`);
|
||||||
|
logger.info(`Current spec has ${currentTechStack.length} technologies`);
|
||||||
|
logger.info(`Current spec has ${currentRoadmapPhases.length} roadmap phases`);
|
||||||
|
|
||||||
|
// Load completed Automaker features
|
||||||
|
const featureLoader = new FeatureLoader();
|
||||||
|
const allFeatures = await featureLoader.getAll(projectPath);
|
||||||
|
const completedFeatures = allFeatures.filter(
|
||||||
|
(f) => f.status === 'completed' || f.status === 'verified'
|
||||||
|
);
|
||||||
|
|
||||||
|
logger.info(`Found ${completedFeatures.length} completed/verified features in Automaker`);
|
||||||
|
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_regeneration_progress',
|
||||||
|
content: `Found ${completedFeatures.length} completed features to sync...\n`,
|
||||||
|
projectPath,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Build new implemented features list from completed Automaker features
|
||||||
|
const newImplementedFeatures: ImplementedFeature[] = [];
|
||||||
|
const existingNames = new Set(currentImplementedFeatures.map((f) => f.name.toLowerCase()));
|
||||||
|
|
||||||
|
for (const feature of completedFeatures) {
|
||||||
|
const name = feature.title || `Feature: ${feature.id}`;
|
||||||
|
if (!existingNames.has(name.toLowerCase())) {
|
||||||
|
newImplementedFeatures.push({
|
||||||
|
name,
|
||||||
|
description: feature.description || '',
|
||||||
|
});
|
||||||
|
result.implementedFeaturesUpdates.addedFromFeatures.push(name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge: keep existing + add new from completed features
|
||||||
|
const mergedFeatures = [...currentImplementedFeatures, ...newImplementedFeatures];
|
||||||
|
|
||||||
|
// Update spec with merged features
|
||||||
|
if (result.implementedFeaturesUpdates.addedFromFeatures.length > 0) {
|
||||||
|
specContent = updateImplementedFeaturesSection(specContent, mergedFeatures);
|
||||||
|
logger.info(
|
||||||
|
`Added ${result.implementedFeaturesUpdates.addedFromFeatures.length} features to spec`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Analyze codebase for tech stack updates using AI
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_regeneration_progress',
|
||||||
|
content: 'Analyzing codebase for technology updates...\n',
|
||||||
|
projectPath,
|
||||||
|
});
|
||||||
|
|
||||||
|
const autoLoadClaudeMd = await getAutoLoadClaudeMdSetting(
|
||||||
|
projectPath,
|
||||||
|
settingsService,
|
||||||
|
'[SpecSync]'
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get model from phase settings with provider info
|
||||||
|
const {
|
||||||
|
phaseModel: phaseModelEntry,
|
||||||
|
provider,
|
||||||
|
credentials,
|
||||||
|
} = settingsService
|
||||||
|
? await getPhaseModelWithOverrides(
|
||||||
|
'specGenerationModel',
|
||||||
|
settingsService,
|
||||||
|
projectPath,
|
||||||
|
'[SpecSync]'
|
||||||
|
)
|
||||||
|
: {
|
||||||
|
phaseModel: DEFAULT_PHASE_MODELS.specGenerationModel,
|
||||||
|
provider: undefined,
|
||||||
|
credentials: undefined,
|
||||||
|
};
|
||||||
|
const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
|
||||||
|
|
||||||
|
logger.info('Using model:', model, provider ? `via provider: ${provider.name}` : 'direct API');
|
||||||
|
|
||||||
|
// Determine if we should use structured output based on model type
|
||||||
|
const useStructuredOutput = supportsStructuredOutput(model);
|
||||||
|
logger.info(
|
||||||
|
`Structured output mode: ${useStructuredOutput ? 'enabled (Claude/Codex)' : 'disabled (using JSON instructions)'}`
|
||||||
|
);
|
||||||
|
|
||||||
|
// Use AI to analyze tech stack
|
||||||
|
let techAnalysisPrompt = `Analyze this project and return ONLY a JSON object with the current technology stack.
|
||||||
|
|
||||||
|
Current known technologies: ${currentTechStack.join(', ')}
|
||||||
|
|
||||||
|
Look at package.json, config files, and source code to identify:
|
||||||
|
- Frameworks (React, Vue, Express, etc.)
|
||||||
|
- Languages (TypeScript, JavaScript, Python, etc.)
|
||||||
|
- Build tools (Vite, Webpack, etc.)
|
||||||
|
- Databases (PostgreSQL, MongoDB, etc.)
|
||||||
|
- Key libraries and tools
|
||||||
|
|
||||||
|
Return ONLY this JSON format, no other text:
|
||||||
|
{
|
||||||
|
"technologies": ["Technology 1", "Technology 2", ...]
|
||||||
|
}`;
|
||||||
|
|
||||||
|
// Add explicit JSON instructions for non-Claude/Codex models
|
||||||
|
if (!useStructuredOutput) {
|
||||||
|
techAnalysisPrompt = `${techAnalysisPrompt}
|
||||||
|
|
||||||
|
CRITICAL INSTRUCTIONS:
|
||||||
|
1. DO NOT write any files. Return the JSON in your response only.
|
||||||
|
2. Your entire response should be valid JSON starting with { and ending with }.
|
||||||
|
3. No explanations, no markdown, no text before or after the JSON.`;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const techResult = await streamingQuery({
|
||||||
|
prompt: techAnalysisPrompt,
|
||||||
|
model,
|
||||||
|
cwd: projectPath,
|
||||||
|
maxTurns: 10,
|
||||||
|
allowedTools: ['Read', 'Glob', 'Grep'],
|
||||||
|
abortController,
|
||||||
|
thinkingLevel,
|
||||||
|
readOnly: true,
|
||||||
|
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
|
||||||
|
claudeCompatibleProvider: provider, // Pass provider for alternative endpoint configuration
|
||||||
|
credentials, // Pass credentials for resolving 'credentials' apiKeySource
|
||||||
|
outputFormat: useStructuredOutput
|
||||||
|
? {
|
||||||
|
type: 'json_schema',
|
||||||
|
schema: techStackOutputSchema,
|
||||||
|
}
|
||||||
|
: undefined,
|
||||||
|
onText: (text) => {
|
||||||
|
logger.debug(`Tech analysis text: ${text.substring(0, 100)}`);
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Parse tech stack from response - prefer structured output if available
|
||||||
|
let parsedTechnologies: string[] | null = null;
|
||||||
|
|
||||||
|
if (techResult.structured_output) {
|
||||||
|
// Use structured output from Claude/Codex models
|
||||||
|
const structured = techResult.structured_output as unknown as TechStackExtractionResult;
|
||||||
|
if (Array.isArray(structured.technologies)) {
|
||||||
|
parsedTechnologies = structured.technologies;
|
||||||
|
logger.info('✅ Received structured output for tech analysis');
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Fall back to text parsing for non-Claude/Codex models
|
||||||
|
const extracted = extractJson<TechStackExtractionResult>(techResult.text, {
|
||||||
|
logger,
|
||||||
|
requiredKey: 'technologies',
|
||||||
|
requireArray: true,
|
||||||
|
});
|
||||||
|
if (extracted && Array.isArray(extracted.technologies)) {
|
||||||
|
parsedTechnologies = extracted.technologies;
|
||||||
|
logger.info('✅ Extracted tech stack from text response');
|
||||||
|
} else {
|
||||||
|
logger.warn('⚠️ Failed to extract tech stack JSON from response');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (parsedTechnologies) {
|
||||||
|
const newTechStack = parsedTechnologies;
|
||||||
|
|
||||||
|
// Calculate differences
|
||||||
|
const currentSet = new Set(currentTechStack.map((t) => t.toLowerCase()));
|
||||||
|
const newSet = new Set(newTechStack.map((t) => t.toLowerCase()));
|
||||||
|
|
||||||
|
for (const tech of newTechStack) {
|
||||||
|
if (!currentSet.has(tech.toLowerCase())) {
|
||||||
|
result.techStackUpdates.added.push(tech);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const tech of currentTechStack) {
|
||||||
|
if (!newSet.has(tech.toLowerCase())) {
|
||||||
|
result.techStackUpdates.removed.push(tech);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update spec with new tech stack if there are changes
|
||||||
|
if (result.techStackUpdates.added.length > 0 || result.techStackUpdates.removed.length > 0) {
|
||||||
|
specContent = updateTechnologyStack(specContent, newTechStack);
|
||||||
|
logger.info(
|
||||||
|
`Updated tech stack: +${result.techStackUpdates.added.length}, -${result.techStackUpdates.removed.length}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
logger.warn('Failed to analyze tech stack:', error);
|
||||||
|
// Continue with other sync operations
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update roadmap phase statuses based on completed features
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_regeneration_progress',
|
||||||
|
content: 'Checking roadmap phase statuses...\n',
|
||||||
|
projectPath,
|
||||||
|
});
|
||||||
|
|
||||||
|
// For each phase, check if all its features are completed
|
||||||
|
// This is a heuristic - we check if the phase name appears in any feature titles/descriptions
|
||||||
|
for (const phase of currentRoadmapPhases) {
|
||||||
|
if (phase.status === 'completed') continue; // Already completed
|
||||||
|
|
||||||
|
// Check if this phase should be marked as completed
|
||||||
|
// A phase is considered complete if we have completed features that mention it
|
||||||
|
const phaseNameLower = phase.name.toLowerCase();
|
||||||
|
const relatedCompletedFeatures = completedFeatures.filter(
|
||||||
|
(f) =>
|
||||||
|
f.title?.toLowerCase().includes(phaseNameLower) ||
|
||||||
|
f.description?.toLowerCase().includes(phaseNameLower) ||
|
||||||
|
f.category?.toLowerCase().includes(phaseNameLower)
|
||||||
|
);
|
||||||
|
|
||||||
|
// If we have related completed features and the phase is still pending/in_progress,
|
||||||
|
// update it to in_progress or completed based on feature count
|
||||||
|
if (relatedCompletedFeatures.length > 0 && phase.status !== 'completed') {
|
||||||
|
const newStatus = 'in_progress';
|
||||||
|
specContent = updateRoadmapPhaseStatus(specContent, phase.name, newStatus);
|
||||||
|
result.roadmapUpdates.push({ phaseName: phase.name, newStatus });
|
||||||
|
logger.info(`Updated phase "${phase.name}" to ${newStatus}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save updated spec
|
||||||
|
await secureFs.writeFile(specPath, specContent, 'utf-8');
|
||||||
|
logger.info('Spec saved successfully');
|
||||||
|
|
||||||
|
// Build summary
|
||||||
|
const summaryParts: string[] = [];
|
||||||
|
if (result.implementedFeaturesUpdates.addedFromFeatures.length > 0) {
|
||||||
|
summaryParts.push(
|
||||||
|
`Added ${result.implementedFeaturesUpdates.addedFromFeatures.length} implemented features`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if (result.techStackUpdates.added.length > 0) {
|
||||||
|
summaryParts.push(`Added ${result.techStackUpdates.added.length} technologies`);
|
||||||
|
}
|
||||||
|
if (result.techStackUpdates.removed.length > 0) {
|
||||||
|
summaryParts.push(`Removed ${result.techStackUpdates.removed.length} technologies`);
|
||||||
|
}
|
||||||
|
if (result.roadmapUpdates.length > 0) {
|
||||||
|
summaryParts.push(`Updated ${result.roadmapUpdates.length} roadmap phases`);
|
||||||
|
}
|
||||||
|
|
||||||
|
result.summary = summaryParts.length > 0 ? summaryParts.join(', ') : 'Spec is already up to date';
|
||||||
|
|
||||||
|
// Create notification
|
||||||
|
const notificationService = getNotificationService();
|
||||||
|
await notificationService.createNotification({
|
||||||
|
type: 'spec_regeneration_complete',
|
||||||
|
title: 'Spec Sync Complete',
|
||||||
|
message: result.summary,
|
||||||
|
projectPath,
|
||||||
|
});
|
||||||
|
|
||||||
|
events.emit('spec-regeneration:event', {
|
||||||
|
type: 'spec_regeneration_complete',
|
||||||
|
message: `Spec sync complete! ${result.summary}`,
|
||||||
|
projectPath,
|
||||||
|
});
|
||||||
|
|
||||||
|
logger.info('========== syncSpec() completed ==========');
|
||||||
|
logger.info('Summary:', result.summary);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
@@ -117,9 +117,27 @@ export function createAuthRoutes(): Router {
|
|||||||
*
|
*
|
||||||
* Returns whether the current request is authenticated.
|
* Returns whether the current request is authenticated.
|
||||||
* Used by the UI to determine if login is needed.
|
* Used by the UI to determine if login is needed.
|
||||||
|
*
|
||||||
|
* If AUTOMAKER_AUTO_LOGIN=true is set, automatically creates a session
|
||||||
|
* for unauthenticated requests (useful for development).
|
||||||
*/
|
*/
|
||||||
router.get('/status', (req, res) => {
|
router.get('/status', async (req, res) => {
|
||||||
const authenticated = isRequestAuthenticated(req);
|
let authenticated = isRequestAuthenticated(req);
|
||||||
|
|
||||||
|
// Auto-login for development: create session automatically if enabled
|
||||||
|
// Only works in non-production environments as a safeguard
|
||||||
|
if (
|
||||||
|
!authenticated &&
|
||||||
|
process.env.AUTOMAKER_AUTO_LOGIN === 'true' &&
|
||||||
|
process.env.NODE_ENV !== 'production'
|
||||||
|
) {
|
||||||
|
const sessionToken = await createSession();
|
||||||
|
const cookieOptions = getSessionCookieOptions();
|
||||||
|
const cookieName = getSessionCookieName();
|
||||||
|
res.cookie(cookieName, sessionToken, cookieOptions);
|
||||||
|
authenticated = true;
|
||||||
|
}
|
||||||
|
|
||||||
res.json({
|
res.json({
|
||||||
success: true,
|
success: true,
|
||||||
authenticated,
|
authenticated,
|
||||||
|
|||||||
@@ -1,15 +1,18 @@
|
|||||||
/**
|
/**
|
||||||
* Auto Mode routes - HTTP API for autonomous feature implementation
|
* Auto Mode routes - HTTP API for autonomous feature implementation
|
||||||
*
|
*
|
||||||
* Uses the AutoModeService for real feature execution with Claude Agent SDK
|
* Uses AutoModeServiceCompat which provides the old interface while
|
||||||
|
* delegating to GlobalAutoModeService and per-project facades.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { Router } from 'express';
|
import { Router } from 'express';
|
||||||
import type { AutoModeService } from '../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../services/auto-mode/index.js';
|
||||||
import { validatePathParams } from '../../middleware/validate-paths.js';
|
import { validatePathParams } from '../../middleware/validate-paths.js';
|
||||||
import { createStopFeatureHandler } from './routes/stop-feature.js';
|
import { createStopFeatureHandler } from './routes/stop-feature.js';
|
||||||
import { createStatusHandler } from './routes/status.js';
|
import { createStatusHandler } from './routes/status.js';
|
||||||
import { createRunFeatureHandler } from './routes/run-feature.js';
|
import { createRunFeatureHandler } from './routes/run-feature.js';
|
||||||
|
import { createStartHandler } from './routes/start.js';
|
||||||
|
import { createStopHandler } from './routes/stop.js';
|
||||||
import { createVerifyFeatureHandler } from './routes/verify-feature.js';
|
import { createVerifyFeatureHandler } from './routes/verify-feature.js';
|
||||||
import { createResumeFeatureHandler } from './routes/resume-feature.js';
|
import { createResumeFeatureHandler } from './routes/resume-feature.js';
|
||||||
import { createContextExistsHandler } from './routes/context-exists.js';
|
import { createContextExistsHandler } from './routes/context-exists.js';
|
||||||
@@ -17,10 +20,21 @@ import { createAnalyzeProjectHandler } from './routes/analyze-project.js';
|
|||||||
import { createFollowUpFeatureHandler } from './routes/follow-up-feature.js';
|
import { createFollowUpFeatureHandler } from './routes/follow-up-feature.js';
|
||||||
import { createCommitFeatureHandler } from './routes/commit-feature.js';
|
import { createCommitFeatureHandler } from './routes/commit-feature.js';
|
||||||
import { createApprovePlanHandler } from './routes/approve-plan.js';
|
import { createApprovePlanHandler } from './routes/approve-plan.js';
|
||||||
|
import { createResumeInterruptedHandler } from './routes/resume-interrupted.js';
|
||||||
|
import { createReconcileHandler } from './routes/reconcile.js';
|
||||||
|
|
||||||
export function createAutoModeRoutes(autoModeService: AutoModeService): Router {
|
/**
|
||||||
|
* Create auto-mode routes.
|
||||||
|
*
|
||||||
|
* @param autoModeService - AutoModeServiceCompat instance
|
||||||
|
*/
|
||||||
|
export function createAutoModeRoutes(autoModeService: AutoModeServiceCompat): Router {
|
||||||
const router = Router();
|
const router = Router();
|
||||||
|
|
||||||
|
// Auto loop control routes
|
||||||
|
router.post('/start', validatePathParams('projectPath'), createStartHandler(autoModeService));
|
||||||
|
router.post('/stop', validatePathParams('projectPath'), createStopHandler(autoModeService));
|
||||||
|
|
||||||
router.post('/stop-feature', createStopFeatureHandler(autoModeService));
|
router.post('/stop-feature', createStopFeatureHandler(autoModeService));
|
||||||
router.post('/status', validatePathParams('projectPath?'), createStatusHandler(autoModeService));
|
router.post('/status', validatePathParams('projectPath?'), createStatusHandler(autoModeService));
|
||||||
router.post(
|
router.post(
|
||||||
@@ -63,6 +77,16 @@ export function createAutoModeRoutes(autoModeService: AutoModeService): Router {
|
|||||||
validatePathParams('projectPath'),
|
validatePathParams('projectPath'),
|
||||||
createApprovePlanHandler(autoModeService)
|
createApprovePlanHandler(autoModeService)
|
||||||
);
|
);
|
||||||
|
router.post(
|
||||||
|
'/resume-interrupted',
|
||||||
|
validatePathParams('projectPath'),
|
||||||
|
createResumeInterruptedHandler(autoModeService)
|
||||||
|
);
|
||||||
|
router.post(
|
||||||
|
'/reconcile',
|
||||||
|
validatePathParams('projectPath'),
|
||||||
|
createReconcileHandler(autoModeService)
|
||||||
|
);
|
||||||
|
|
||||||
return router;
|
return router;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,13 +3,13 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
const logger = createLogger('AutoMode');
|
const logger = createLogger('AutoMode');
|
||||||
|
|
||||||
export function createAnalyzeProjectHandler(autoModeService: AutoModeService) {
|
export function createAnalyzeProjectHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath } = req.body as { projectPath: string };
|
const { projectPath } = req.body as { projectPath: string };
|
||||||
|
|||||||
@@ -3,13 +3,13 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
const logger = createLogger('AutoMode');
|
const logger = createLogger('AutoMode');
|
||||||
|
|
||||||
export function createApprovePlanHandler(autoModeService: AutoModeService) {
|
export function createApprovePlanHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { featureId, approved, editedPlan, feedback, projectPath } = req.body as {
|
const { featureId, approved, editedPlan, feedback, projectPath } = req.body as {
|
||||||
@@ -17,7 +17,7 @@ export function createApprovePlanHandler(autoModeService: AutoModeService) {
|
|||||||
approved: boolean;
|
approved: boolean;
|
||||||
editedPlan?: string;
|
editedPlan?: string;
|
||||||
feedback?: string;
|
feedback?: string;
|
||||||
projectPath?: string;
|
projectPath: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
if (!featureId) {
|
if (!featureId) {
|
||||||
@@ -36,6 +36,14 @@ export function createApprovePlanHandler(autoModeService: AutoModeService) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'projectPath is required',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Note: We no longer check hasPendingApproval here because resolvePlanApproval
|
// Note: We no longer check hasPendingApproval here because resolvePlanApproval
|
||||||
// can handle recovery when pending approval is not in Map but feature has planSpec.status='generated'
|
// can handle recovery when pending approval is not in Map but feature has planSpec.status='generated'
|
||||||
// This supports cases where the server restarted while waiting for approval
|
// This supports cases where the server restarted while waiting for approval
|
||||||
@@ -48,11 +56,11 @@ export function createApprovePlanHandler(autoModeService: AutoModeService) {
|
|||||||
|
|
||||||
// Resolve the pending approval (with recovery support)
|
// Resolve the pending approval (with recovery support)
|
||||||
const result = await autoModeService.resolvePlanApproval(
|
const result = await autoModeService.resolvePlanApproval(
|
||||||
|
projectPath,
|
||||||
featureId,
|
featureId,
|
||||||
approved,
|
approved,
|
||||||
editedPlan,
|
editedPlan,
|
||||||
feedback,
|
feedback
|
||||||
projectPath
|
|
||||||
);
|
);
|
||||||
|
|
||||||
if (!result.success) {
|
if (!result.success) {
|
||||||
|
|||||||
@@ -3,10 +3,10 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
export function createCommitFeatureHandler(autoModeService: AutoModeService) {
|
export function createCommitFeatureHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, featureId, worktreePath } = req.body as {
|
const { projectPath, featureId, worktreePath } = req.body as {
|
||||||
|
|||||||
@@ -3,10 +3,10 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
export function createContextExistsHandler(autoModeService: AutoModeService) {
|
export function createContextExistsHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, featureId } = req.body as {
|
const { projectPath, featureId } = req.body as {
|
||||||
|
|||||||
@@ -3,13 +3,13 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
const logger = createLogger('AutoMode');
|
const logger = createLogger('AutoMode');
|
||||||
|
|
||||||
export function createFollowUpFeatureHandler(autoModeService: AutoModeService) {
|
export function createFollowUpFeatureHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, featureId, prompt, imagePaths, useWorktrees } = req.body as {
|
const { projectPath, featureId, prompt, imagePaths, useWorktrees } = req.body as {
|
||||||
@@ -30,16 +30,12 @@ export function createFollowUpFeatureHandler(autoModeService: AutoModeService) {
|
|||||||
|
|
||||||
// Start follow-up in background
|
// Start follow-up in background
|
||||||
// followUpFeature derives workDir from feature.branchName
|
// followUpFeature derives workDir from feature.branchName
|
||||||
autoModeService
|
|
||||||
// Default to false to match run-feature/resume-feature behavior.
|
// Default to false to match run-feature/resume-feature behavior.
|
||||||
// Worktrees should only be used when explicitly enabled by the user.
|
// Worktrees should only be used when explicitly enabled by the user.
|
||||||
|
autoModeService
|
||||||
.followUpFeature(projectPath, featureId, prompt, imagePaths, useWorktrees ?? false)
|
.followUpFeature(projectPath, featureId, prompt, imagePaths, useWorktrees ?? false)
|
||||||
.catch((error) => {
|
.catch((error) => {
|
||||||
logger.error(`[AutoMode] Follow up feature ${featureId} error:`, error);
|
logger.error(`[AutoMode] Follow up feature ${featureId} error:`, error);
|
||||||
})
|
|
||||||
.finally(() => {
|
|
||||||
// Release the starting slot when follow-up completes (success or error)
|
|
||||||
// Note: The feature should be in runningFeatures by this point
|
|
||||||
});
|
});
|
||||||
|
|
||||||
res.json({ success: true });
|
res.json({ success: true });
|
||||||
|
|||||||
53
apps/server/src/routes/auto-mode/routes/reconcile.ts
Normal file
53
apps/server/src/routes/auto-mode/routes/reconcile.ts
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
/**
|
||||||
|
* Reconcile Feature States Handler
|
||||||
|
*
|
||||||
|
* On-demand endpoint to reconcile all feature states for a project.
|
||||||
|
* Resets features stuck in transient states (in_progress, interrupted, pipeline_*)
|
||||||
|
* back to resting states (ready/backlog) and emits events to update the UI.
|
||||||
|
*
|
||||||
|
* This is useful when:
|
||||||
|
* - The UI reconnects after a server restart
|
||||||
|
* - A client detects stale feature states
|
||||||
|
* - An admin wants to force-reset stuck features
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
|
|
||||||
|
const logger = createLogger('ReconcileFeatures');
|
||||||
|
|
||||||
|
interface ReconcileRequest {
|
||||||
|
projectPath: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createReconcileHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
const { projectPath } = req.body as ReconcileRequest;
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
res.status(400).json({ error: 'Project path is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(`Reconciling feature states for ${projectPath}`);
|
||||||
|
|
||||||
|
try {
|
||||||
|
const reconciledCount = await autoModeService.reconcileFeatureStates(projectPath);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
reconciledCount,
|
||||||
|
message:
|
||||||
|
reconciledCount > 0
|
||||||
|
? `Reconciled ${reconciledCount} feature(s)`
|
||||||
|
: 'No features needed reconciliation',
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Error reconciling feature states:', error);
|
||||||
|
res.status(500).json({
|
||||||
|
error: error instanceof Error ? error.message : 'Unknown error',
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -3,13 +3,13 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
const logger = createLogger('AutoMode');
|
const logger = createLogger('AutoMode');
|
||||||
|
|
||||||
export function createResumeFeatureHandler(autoModeService: AutoModeService) {
|
export function createResumeFeatureHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, featureId, useWorktrees } = req.body as {
|
const { projectPath, featureId, useWorktrees } = req.body as {
|
||||||
|
|||||||
@@ -0,0 +1,43 @@
|
|||||||
|
/**
|
||||||
|
* Resume Interrupted Features Handler
|
||||||
|
*
|
||||||
|
* Checks for features that were interrupted (in pipeline steps or in_progress)
|
||||||
|
* when the server was restarted and resumes them.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
|
|
||||||
|
const logger = createLogger('ResumeInterrupted');
|
||||||
|
|
||||||
|
interface ResumeInterruptedRequest {
|
||||||
|
projectPath: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createResumeInterruptedHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
const { projectPath } = req.body as ResumeInterruptedRequest;
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
res.status(400).json({ error: 'Project path is required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(`Checking for interrupted features in ${projectPath}`);
|
||||||
|
|
||||||
|
try {
|
||||||
|
await autoModeService.resumeInterruptedFeatures(projectPath);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
message: 'Resume check completed',
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Error resuming interrupted features:', error);
|
||||||
|
res.status(500).json({
|
||||||
|
error: error instanceof Error ? error.message : 'Unknown error',
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -3,13 +3,13 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
const logger = createLogger('AutoMode');
|
const logger = createLogger('AutoMode');
|
||||||
|
|
||||||
export function createRunFeatureHandler(autoModeService: AutoModeService) {
|
export function createRunFeatureHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, featureId, useWorktrees } = req.body as {
|
const { projectPath, featureId, useWorktrees } = req.body as {
|
||||||
@@ -26,16 +26,16 @@ export function createRunFeatureHandler(autoModeService: AutoModeService) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Note: No concurrency limit check here. Manual feature starts always run
|
||||||
|
// immediately and bypass the concurrency limit. Their presence IS counted
|
||||||
|
// by the auto-loop coordinator when deciding whether to dispatch new auto-mode tasks.
|
||||||
|
|
||||||
// Start execution in background
|
// Start execution in background
|
||||||
// executeFeature derives workDir from feature.branchName
|
// executeFeature derives workDir from feature.branchName
|
||||||
autoModeService
|
autoModeService
|
||||||
.executeFeature(projectPath, featureId, useWorktrees ?? false, false)
|
.executeFeature(projectPath, featureId, useWorktrees ?? false, false)
|
||||||
.catch((error) => {
|
.catch((error) => {
|
||||||
logger.error(`Feature ${featureId} error:`, error);
|
logger.error(`Feature ${featureId} error:`, error);
|
||||||
})
|
|
||||||
.finally(() => {
|
|
||||||
// Release the starting slot when execution completes (success or error)
|
|
||||||
// Note: The feature should be in runningFeatures by this point
|
|
||||||
});
|
});
|
||||||
|
|
||||||
res.json({ success: true });
|
res.json({ success: true });
|
||||||
|
|||||||
67
apps/server/src/routes/auto-mode/routes/start.ts
Normal file
67
apps/server/src/routes/auto-mode/routes/start.ts
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
/**
|
||||||
|
* POST /start endpoint - Start auto mode loop for a project
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
const logger = createLogger('AutoMode');
|
||||||
|
|
||||||
|
export function createStartHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath, branchName, maxConcurrency } = req.body as {
|
||||||
|
projectPath: string;
|
||||||
|
branchName?: string | null;
|
||||||
|
maxConcurrency?: number;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'projectPath is required',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize branchName: undefined becomes null
|
||||||
|
const normalizedBranchName = branchName ?? null;
|
||||||
|
const worktreeDesc = normalizedBranchName
|
||||||
|
? `worktree ${normalizedBranchName}`
|
||||||
|
: 'main worktree';
|
||||||
|
|
||||||
|
// Check if already running
|
||||||
|
if (autoModeService.isAutoLoopRunningForProject(projectPath, normalizedBranchName)) {
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
message: `Auto mode is already running for ${worktreeDesc}`,
|
||||||
|
alreadyRunning: true,
|
||||||
|
branchName: normalizedBranchName,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start the auto loop for this project/worktree
|
||||||
|
const resolvedMaxConcurrency = await autoModeService.startAutoLoopForProject(
|
||||||
|
projectPath,
|
||||||
|
normalizedBranchName,
|
||||||
|
maxConcurrency
|
||||||
|
);
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
`Started auto loop for ${worktreeDesc} in project: ${projectPath} with maxConcurrency: ${resolvedMaxConcurrency}`
|
||||||
|
);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
message: `Auto mode started with max ${resolvedMaxConcurrency} concurrent features`,
|
||||||
|
branchName: normalizedBranchName,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Start auto mode failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -1,18 +1,56 @@
|
|||||||
/**
|
/**
|
||||||
* POST /status endpoint - Get auto mode status
|
* POST /status endpoint - Get auto mode status
|
||||||
|
*
|
||||||
|
* If projectPath is provided, returns per-project status including autoloop state.
|
||||||
|
* If no projectPath, returns global status for backward compatibility.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
export function createStatusHandler(autoModeService: AutoModeService) {
|
/**
|
||||||
|
* Create status handler.
|
||||||
|
*/
|
||||||
|
export function createStatusHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
|
const { projectPath, branchName } = req.body as {
|
||||||
|
projectPath?: string;
|
||||||
|
branchName?: string | null;
|
||||||
|
};
|
||||||
|
|
||||||
|
// If projectPath is provided, return per-project/worktree status
|
||||||
|
if (projectPath) {
|
||||||
|
// Normalize branchName: undefined becomes null
|
||||||
|
const normalizedBranchName = branchName ?? null;
|
||||||
|
|
||||||
|
const projectStatus = await autoModeService.getStatusForProject(
|
||||||
|
projectPath,
|
||||||
|
normalizedBranchName
|
||||||
|
);
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
isRunning: projectStatus.runningCount > 0,
|
||||||
|
isAutoLoopRunning: projectStatus.isAutoLoopRunning,
|
||||||
|
runningFeatures: projectStatus.runningFeatures,
|
||||||
|
runningCount: projectStatus.runningCount,
|
||||||
|
maxConcurrency: projectStatus.maxConcurrency,
|
||||||
|
projectPath,
|
||||||
|
branchName: normalizedBranchName,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Global status for backward compatibility
|
||||||
const status = autoModeService.getStatus();
|
const status = autoModeService.getStatus();
|
||||||
|
const activeProjects = autoModeService.getActiveAutoLoopProjects();
|
||||||
|
const activeWorktrees = autoModeService.getActiveAutoLoopWorktrees();
|
||||||
res.json({
|
res.json({
|
||||||
success: true,
|
success: true,
|
||||||
...status,
|
...status,
|
||||||
|
activeAutoLoopProjects: activeProjects,
|
||||||
|
activeAutoLoopWorktrees: activeWorktrees,
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logError(error, 'Get status failed');
|
logError(error, 'Get status failed');
|
||||||
|
|||||||
@@ -3,10 +3,10 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
export function createStopFeatureHandler(autoModeService: AutoModeService) {
|
export function createStopFeatureHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { featureId } = req.body as { featureId: string };
|
const { featureId } = req.body as { featureId: string };
|
||||||
|
|||||||
66
apps/server/src/routes/auto-mode/routes/stop.ts
Normal file
66
apps/server/src/routes/auto-mode/routes/stop.ts
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
/**
|
||||||
|
* POST /stop endpoint - Stop auto mode loop for a project
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
const logger = createLogger('AutoMode');
|
||||||
|
|
||||||
|
export function createStopHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath, branchName } = req.body as {
|
||||||
|
projectPath: string;
|
||||||
|
branchName?: string | null;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
res.status(400).json({
|
||||||
|
success: false,
|
||||||
|
error: 'projectPath is required',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize branchName: undefined becomes null
|
||||||
|
const normalizedBranchName = branchName ?? null;
|
||||||
|
const worktreeDesc = normalizedBranchName
|
||||||
|
? `worktree ${normalizedBranchName}`
|
||||||
|
: 'main worktree';
|
||||||
|
|
||||||
|
// Check if running
|
||||||
|
if (!autoModeService.isAutoLoopRunningForProject(projectPath, normalizedBranchName)) {
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
message: `Auto mode is not running for ${worktreeDesc}`,
|
||||||
|
wasRunning: false,
|
||||||
|
branchName: normalizedBranchName,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop the auto loop for this project/worktree
|
||||||
|
const runningCount = await autoModeService.stopAutoLoopForProject(
|
||||||
|
projectPath,
|
||||||
|
normalizedBranchName
|
||||||
|
);
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
`Stopped auto loop for ${worktreeDesc} in project: ${projectPath}, ${runningCount} features still running`
|
||||||
|
);
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
message: 'Auto mode stopped',
|
||||||
|
runningFeaturesCount: runningCount,
|
||||||
|
branchName: normalizedBranchName,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Stop auto mode failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -3,10 +3,10 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { AutoModeService } from '../../../services/auto-mode-service.js';
|
import type { AutoModeServiceCompat } from '../../../services/auto-mode/index.js';
|
||||||
import { getErrorMessage, logError } from '../common.js';
|
import { getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
export function createVerifyFeatureHandler(autoModeService: AutoModeService) {
|
export function createVerifyFeatureHandler(autoModeService: AutoModeServiceCompat) {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, featureId } = req.body as {
|
const { projectPath, featureId } = req.body as {
|
||||||
|
|||||||
@@ -3,12 +3,31 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
|
import { ensureAutomakerDir, getAutomakerDir } from '@automaker/platform';
|
||||||
|
import * as secureFs from '../../lib/secure-fs.js';
|
||||||
|
import path from 'path';
|
||||||
|
import type { BacklogPlanResult } from '@automaker/types';
|
||||||
|
|
||||||
const logger = createLogger('BacklogPlan');
|
const logger = createLogger('BacklogPlan');
|
||||||
|
|
||||||
// State for tracking running generation
|
// State for tracking running generation
|
||||||
let isRunning = false;
|
let isRunning = false;
|
||||||
let currentAbortController: AbortController | null = null;
|
let currentAbortController: AbortController | null = null;
|
||||||
|
let runningDetails: {
|
||||||
|
projectPath: string;
|
||||||
|
prompt: string;
|
||||||
|
model?: string;
|
||||||
|
startedAt: string;
|
||||||
|
} | null = null;
|
||||||
|
|
||||||
|
const BACKLOG_PLAN_FILENAME = 'backlog-plan.json';
|
||||||
|
|
||||||
|
export interface StoredBacklogPlan {
|
||||||
|
savedAt: string;
|
||||||
|
prompt: string;
|
||||||
|
model?: string;
|
||||||
|
result: BacklogPlanResult;
|
||||||
|
}
|
||||||
|
|
||||||
export function getBacklogPlanStatus(): { isRunning: boolean } {
|
export function getBacklogPlanStatus(): { isRunning: boolean } {
|
||||||
return { isRunning };
|
return { isRunning };
|
||||||
@@ -16,20 +35,125 @@ export function getBacklogPlanStatus(): { isRunning: boolean } {
|
|||||||
|
|
||||||
export function setRunningState(running: boolean, abortController?: AbortController | null): void {
|
export function setRunningState(running: boolean, abortController?: AbortController | null): void {
|
||||||
isRunning = running;
|
isRunning = running;
|
||||||
|
if (!running) {
|
||||||
|
runningDetails = null;
|
||||||
|
}
|
||||||
if (abortController !== undefined) {
|
if (abortController !== undefined) {
|
||||||
currentAbortController = abortController;
|
currentAbortController = abortController;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function setRunningDetails(
|
||||||
|
details: {
|
||||||
|
projectPath: string;
|
||||||
|
prompt: string;
|
||||||
|
model?: string;
|
||||||
|
startedAt: string;
|
||||||
|
} | null
|
||||||
|
): void {
|
||||||
|
runningDetails = details;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getRunningDetails(): {
|
||||||
|
projectPath: string;
|
||||||
|
prompt: string;
|
||||||
|
model?: string;
|
||||||
|
startedAt: string;
|
||||||
|
} | null {
|
||||||
|
return runningDetails;
|
||||||
|
}
|
||||||
|
|
||||||
|
function getBacklogPlanPath(projectPath: string): string {
|
||||||
|
return path.join(getAutomakerDir(projectPath), BACKLOG_PLAN_FILENAME);
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function saveBacklogPlan(projectPath: string, plan: StoredBacklogPlan): Promise<void> {
|
||||||
|
await ensureAutomakerDir(projectPath);
|
||||||
|
const filePath = getBacklogPlanPath(projectPath);
|
||||||
|
await secureFs.writeFile(filePath, JSON.stringify(plan, null, 2), 'utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function loadBacklogPlan(projectPath: string): Promise<StoredBacklogPlan | null> {
|
||||||
|
try {
|
||||||
|
const filePath = getBacklogPlanPath(projectPath);
|
||||||
|
const raw = await secureFs.readFile(filePath, 'utf-8');
|
||||||
|
const parsed = JSON.parse(raw as string) as StoredBacklogPlan;
|
||||||
|
if (!Array.isArray(parsed?.result?.changes)) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return parsed;
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function clearBacklogPlan(projectPath: string): Promise<void> {
|
||||||
|
try {
|
||||||
|
const filePath = getBacklogPlanPath(projectPath);
|
||||||
|
await secureFs.unlink(filePath);
|
||||||
|
} catch {
|
||||||
|
// ignore missing file
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
export function getAbortController(): AbortController | null {
|
export function getAbortController(): AbortController | null {
|
||||||
return currentAbortController;
|
return currentAbortController;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function getErrorMessage(error: unknown): string {
|
/**
|
||||||
if (error instanceof Error) {
|
* Map SDK/CLI errors to user-friendly messages
|
||||||
return error.message;
|
*/
|
||||||
|
export function mapBacklogPlanError(rawMessage: string): string {
|
||||||
|
// Claude Code spawn failures
|
||||||
|
if (
|
||||||
|
rawMessage.includes('Failed to spawn Claude Code process') ||
|
||||||
|
rawMessage.includes('spawn node ENOENT') ||
|
||||||
|
rawMessage.includes('Claude Code executable not found') ||
|
||||||
|
rawMessage.includes('Claude Code native binary not found')
|
||||||
|
) {
|
||||||
|
return 'Claude CLI could not be launched. Make sure the Claude CLI is installed and available in PATH, or check that Node.js is correctly installed. Try running "which claude" or "claude --version" in your terminal to verify.';
|
||||||
}
|
}
|
||||||
return String(error);
|
|
||||||
|
// Claude Code process crash
|
||||||
|
if (rawMessage.includes('Claude Code process exited')) {
|
||||||
|
return 'Claude exited unexpectedly. Try again. If it keeps happening, re-run `claude login` or update your API key in Setup.';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate limiting
|
||||||
|
if (rawMessage.toLowerCase().includes('rate limit') || rawMessage.includes('429')) {
|
||||||
|
return 'Rate limited. Please wait a moment and try again.';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Network errors
|
||||||
|
if (
|
||||||
|
rawMessage.toLowerCase().includes('network') ||
|
||||||
|
rawMessage.toLowerCase().includes('econnrefused') ||
|
||||||
|
rawMessage.toLowerCase().includes('timeout')
|
||||||
|
) {
|
||||||
|
return 'Network error. Check your internet connection and try again.';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Authentication errors
|
||||||
|
if (
|
||||||
|
rawMessage.toLowerCase().includes('not authenticated') ||
|
||||||
|
rawMessage.toLowerCase().includes('unauthorized') ||
|
||||||
|
rawMessage.includes('401')
|
||||||
|
) {
|
||||||
|
return 'Authentication failed. Please check your API key or run `claude login` to authenticate.';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return original message for unknown errors
|
||||||
|
return rawMessage;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getErrorMessage(error: unknown): string {
|
||||||
|
let rawMessage: string;
|
||||||
|
if (error instanceof Error) {
|
||||||
|
rawMessage = error.message;
|
||||||
|
} else {
|
||||||
|
rawMessage = String(error);
|
||||||
|
}
|
||||||
|
return mapBacklogPlanError(rawMessage);
|
||||||
}
|
}
|
||||||
|
|
||||||
export function logError(error: unknown, context: string): void {
|
export function logError(error: unknown, context: string): void {
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { EventEmitter } from '../../lib/events.js';
|
import type { EventEmitter } from '../../lib/events.js';
|
||||||
import type { Feature, BacklogPlanResult, BacklogChange, DependencyUpdate } from '@automaker/types';
|
import type { Feature, BacklogPlanResult } from '@automaker/types';
|
||||||
import {
|
import {
|
||||||
DEFAULT_PHASE_MODELS,
|
DEFAULT_PHASE_MODELS,
|
||||||
isCursorModel,
|
isCursorModel,
|
||||||
@@ -17,9 +17,19 @@ import { resolvePhaseModel } from '@automaker/model-resolver';
|
|||||||
import { FeatureLoader } from '../../services/feature-loader.js';
|
import { FeatureLoader } from '../../services/feature-loader.js';
|
||||||
import { ProviderFactory } from '../../providers/provider-factory.js';
|
import { ProviderFactory } from '../../providers/provider-factory.js';
|
||||||
import { extractJsonWithArray } from '../../lib/json-extractor.js';
|
import { extractJsonWithArray } from '../../lib/json-extractor.js';
|
||||||
import { logger, setRunningState, getErrorMessage } from './common.js';
|
import {
|
||||||
|
logger,
|
||||||
|
setRunningState,
|
||||||
|
setRunningDetails,
|
||||||
|
getErrorMessage,
|
||||||
|
saveBacklogPlan,
|
||||||
|
} from './common.js';
|
||||||
import type { SettingsService } from '../../services/settings-service.js';
|
import type { SettingsService } from '../../services/settings-service.js';
|
||||||
import { getAutoLoadClaudeMdSetting, getPromptCustomization } from '../../lib/settings-helpers.js';
|
import {
|
||||||
|
getAutoLoadClaudeMdSetting,
|
||||||
|
getPromptCustomization,
|
||||||
|
getPhaseModelWithOverrides,
|
||||||
|
} from '../../lib/settings-helpers.js';
|
||||||
|
|
||||||
const featureLoader = new FeatureLoader();
|
const featureLoader = new FeatureLoader();
|
||||||
|
|
||||||
@@ -111,18 +121,42 @@ export async function generateBacklogPlan(
|
|||||||
content: 'Generating plan with AI...',
|
content: 'Generating plan with AI...',
|
||||||
});
|
});
|
||||||
|
|
||||||
// Get the model to use from settings or provided override
|
// Get the model to use from settings or provided override with provider info
|
||||||
let effectiveModel = model;
|
let effectiveModel = model;
|
||||||
let thinkingLevel: ThinkingLevel | undefined;
|
let thinkingLevel: ThinkingLevel | undefined;
|
||||||
if (!effectiveModel) {
|
let claudeCompatibleProvider: import('@automaker/types').ClaudeCompatibleProvider | undefined;
|
||||||
const settings = await settingsService?.getGlobalSettings();
|
let credentials: import('@automaker/types').Credentials | undefined;
|
||||||
const phaseModelEntry =
|
|
||||||
settings?.phaseModels?.backlogPlanningModel || DEFAULT_PHASE_MODELS.backlogPlanningModel;
|
if (effectiveModel) {
|
||||||
const resolved = resolvePhaseModel(phaseModelEntry);
|
// Use explicit override - resolve model alias and get credentials
|
||||||
|
const resolved = resolvePhaseModel({ model: effectiveModel });
|
||||||
|
effectiveModel = resolved.model;
|
||||||
|
thinkingLevel = resolved.thinkingLevel;
|
||||||
|
credentials = await settingsService?.getCredentials();
|
||||||
|
} else if (settingsService) {
|
||||||
|
// Use settings-based model with provider info
|
||||||
|
const phaseResult = await getPhaseModelWithOverrides(
|
||||||
|
'backlogPlanningModel',
|
||||||
|
settingsService,
|
||||||
|
projectPath,
|
||||||
|
'[BacklogPlan]'
|
||||||
|
);
|
||||||
|
const resolved = resolvePhaseModel(phaseResult.phaseModel);
|
||||||
|
effectiveModel = resolved.model;
|
||||||
|
thinkingLevel = resolved.thinkingLevel;
|
||||||
|
claudeCompatibleProvider = phaseResult.provider;
|
||||||
|
credentials = phaseResult.credentials;
|
||||||
|
} else {
|
||||||
|
// Fallback to defaults
|
||||||
|
const resolved = resolvePhaseModel(DEFAULT_PHASE_MODELS.backlogPlanningModel);
|
||||||
effectiveModel = resolved.model;
|
effectiveModel = resolved.model;
|
||||||
thinkingLevel = resolved.thinkingLevel;
|
thinkingLevel = resolved.thinkingLevel;
|
||||||
}
|
}
|
||||||
logger.info('[BacklogPlan] Using model:', effectiveModel);
|
logger.info(
|
||||||
|
'[BacklogPlan] Using model:',
|
||||||
|
effectiveModel,
|
||||||
|
claudeCompatibleProvider ? `via provider: ${claudeCompatibleProvider.name}` : 'direct API'
|
||||||
|
);
|
||||||
|
|
||||||
const provider = ProviderFactory.getProviderForModel(effectiveModel);
|
const provider = ProviderFactory.getProviderForModel(effectiveModel);
|
||||||
// Strip provider prefix - providers expect bare model IDs
|
// Strip provider prefix - providers expect bare model IDs
|
||||||
@@ -167,6 +201,8 @@ ${userPrompt}`;
|
|||||||
settingSources: autoLoadClaudeMd ? ['user', 'project'] : undefined,
|
settingSources: autoLoadClaudeMd ? ['user', 'project'] : undefined,
|
||||||
readOnly: true, // Plan generation only generates text, doesn't write files
|
readOnly: true, // Plan generation only generates text, doesn't write files
|
||||||
thinkingLevel, // Pass thinking level for extended thinking
|
thinkingLevel, // Pass thinking level for extended thinking
|
||||||
|
claudeCompatibleProvider, // Pass provider for alternative endpoint configuration
|
||||||
|
credentials, // Pass credentials for resolving 'credentials' apiKeySource
|
||||||
});
|
});
|
||||||
|
|
||||||
let responseText = '';
|
let responseText = '';
|
||||||
@@ -200,6 +236,13 @@ ${userPrompt}`;
|
|||||||
// Parse the response
|
// Parse the response
|
||||||
const result = parsePlanResponse(responseText);
|
const result = parsePlanResponse(responseText);
|
||||||
|
|
||||||
|
await saveBacklogPlan(projectPath, {
|
||||||
|
savedAt: new Date().toISOString(),
|
||||||
|
prompt,
|
||||||
|
model: effectiveModel,
|
||||||
|
result,
|
||||||
|
});
|
||||||
|
|
||||||
events.emit('backlog-plan:event', {
|
events.emit('backlog-plan:event', {
|
||||||
type: 'backlog_plan_complete',
|
type: 'backlog_plan_complete',
|
||||||
result,
|
result,
|
||||||
@@ -218,5 +261,6 @@ ${userPrompt}`;
|
|||||||
throw error;
|
throw error;
|
||||||
} finally {
|
} finally {
|
||||||
setRunningState(false, null);
|
setRunningState(false, null);
|
||||||
|
setRunningDetails(null);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import { createGenerateHandler } from './routes/generate.js';
|
|||||||
import { createStopHandler } from './routes/stop.js';
|
import { createStopHandler } from './routes/stop.js';
|
||||||
import { createStatusHandler } from './routes/status.js';
|
import { createStatusHandler } from './routes/status.js';
|
||||||
import { createApplyHandler } from './routes/apply.js';
|
import { createApplyHandler } from './routes/apply.js';
|
||||||
|
import { createClearHandler } from './routes/clear.js';
|
||||||
import type { SettingsService } from '../../services/settings-service.js';
|
import type { SettingsService } from '../../services/settings-service.js';
|
||||||
|
|
||||||
export function createBacklogPlanRoutes(
|
export function createBacklogPlanRoutes(
|
||||||
@@ -23,8 +24,9 @@ export function createBacklogPlanRoutes(
|
|||||||
createGenerateHandler(events, settingsService)
|
createGenerateHandler(events, settingsService)
|
||||||
);
|
);
|
||||||
router.post('/stop', createStopHandler());
|
router.post('/stop', createStopHandler());
|
||||||
router.get('/status', createStatusHandler());
|
router.get('/status', validatePathParams('projectPath'), createStatusHandler());
|
||||||
router.post('/apply', validatePathParams('projectPath'), createApplyHandler());
|
router.post('/apply', validatePathParams('projectPath'), createApplyHandler());
|
||||||
|
router.post('/clear', validatePathParams('projectPath'), createClearHandler());
|
||||||
|
|
||||||
return router;
|
return router;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,20 +3,31 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { BacklogPlanResult, BacklogChange, Feature } from '@automaker/types';
|
import type { BacklogPlanResult } from '@automaker/types';
|
||||||
import { FeatureLoader } from '../../../services/feature-loader.js';
|
import { FeatureLoader } from '../../../services/feature-loader.js';
|
||||||
import { getErrorMessage, logError, logger } from '../common.js';
|
import { clearBacklogPlan, getErrorMessage, logError, logger } from '../common.js';
|
||||||
|
|
||||||
const featureLoader = new FeatureLoader();
|
const featureLoader = new FeatureLoader();
|
||||||
|
|
||||||
export function createApplyHandler() {
|
export function createApplyHandler() {
|
||||||
return async (req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const { projectPath, plan } = req.body as {
|
const {
|
||||||
|
projectPath,
|
||||||
|
plan,
|
||||||
|
branchName: rawBranchName,
|
||||||
|
} = req.body as {
|
||||||
projectPath: string;
|
projectPath: string;
|
||||||
plan: BacklogPlanResult;
|
plan: BacklogPlanResult;
|
||||||
|
branchName?: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Validate branchName: must be undefined or a non-empty trimmed string
|
||||||
|
const branchName =
|
||||||
|
typeof rawBranchName === 'string' && rawBranchName.trim().length > 0
|
||||||
|
? rawBranchName.trim()
|
||||||
|
: undefined;
|
||||||
|
|
||||||
if (!projectPath) {
|
if (!projectPath) {
|
||||||
res.status(400).json({ success: false, error: 'projectPath required' });
|
res.status(400).json({ success: false, error: 'projectPath required' });
|
||||||
return;
|
return;
|
||||||
@@ -47,6 +58,9 @@ export function createApplyHandler() {
|
|||||||
if (feature.dependencies?.includes(change.featureId)) {
|
if (feature.dependencies?.includes(change.featureId)) {
|
||||||
const newDeps = feature.dependencies.filter((d) => d !== change.featureId);
|
const newDeps = feature.dependencies.filter((d) => d !== change.featureId);
|
||||||
await featureLoader.update(projectPath, feature.id, { dependencies: newDeps });
|
await featureLoader.update(projectPath, feature.id, { dependencies: newDeps });
|
||||||
|
// Mutate the in-memory feature object so subsequent deletions use the updated
|
||||||
|
// dependency list and don't reintroduce already-removed dependency IDs.
|
||||||
|
feature.dependencies = newDeps;
|
||||||
logger.info(
|
logger.info(
|
||||||
`[BacklogPlan] Removed dependency ${change.featureId} from ${feature.id}`
|
`[BacklogPlan] Removed dependency ${change.featureId} from ${feature.id}`
|
||||||
);
|
);
|
||||||
@@ -74,14 +88,16 @@ export function createApplyHandler() {
|
|||||||
if (!change.feature) continue;
|
if (!change.feature) continue;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Create the new feature
|
// Create the new feature - use the AI-generated ID if provided
|
||||||
const newFeature = await featureLoader.create(projectPath, {
|
const newFeature = await featureLoader.create(projectPath, {
|
||||||
|
id: change.feature.id, // Use descriptive ID from AI if provided
|
||||||
title: change.feature.title,
|
title: change.feature.title,
|
||||||
description: change.feature.description || '',
|
description: change.feature.description || '',
|
||||||
category: change.feature.category || 'Uncategorized',
|
category: change.feature.category || 'Uncategorized',
|
||||||
dependencies: change.feature.dependencies,
|
dependencies: change.feature.dependencies,
|
||||||
priority: change.feature.priority,
|
priority: change.feature.priority,
|
||||||
status: 'backlog',
|
status: 'backlog',
|
||||||
|
branchName,
|
||||||
});
|
});
|
||||||
|
|
||||||
appliedChanges.push(`added:${newFeature.id}`);
|
appliedChanges.push(`added:${newFeature.id}`);
|
||||||
@@ -135,6 +151,17 @@ export function createApplyHandler() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clear the plan before responding
|
||||||
|
try {
|
||||||
|
await clearBacklogPlan(projectPath);
|
||||||
|
} catch (error) {
|
||||||
|
logger.warn(
|
||||||
|
`[BacklogPlan] Failed to clear backlog plan after apply:`,
|
||||||
|
getErrorMessage(error)
|
||||||
|
);
|
||||||
|
// Don't throw - operation succeeded, just cleanup failed
|
||||||
|
}
|
||||||
|
|
||||||
res.json({
|
res.json({
|
||||||
success: true,
|
success: true,
|
||||||
appliedChanges,
|
appliedChanges,
|
||||||
|
|||||||
25
apps/server/src/routes/backlog-plan/routes/clear.ts
Normal file
25
apps/server/src/routes/backlog-plan/routes/clear.ts
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
/**
|
||||||
|
* POST /clear endpoint - Clear saved backlog plan
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { Request, Response } from 'express';
|
||||||
|
import { clearBacklogPlan, getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
|
export function createClearHandler() {
|
||||||
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
|
try {
|
||||||
|
const { projectPath } = req.body as { projectPath: string };
|
||||||
|
|
||||||
|
if (!projectPath) {
|
||||||
|
res.status(400).json({ success: false, error: 'projectPath required' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
await clearBacklogPlan(projectPath);
|
||||||
|
res.json({ success: true });
|
||||||
|
} catch (error) {
|
||||||
|
logError(error, 'Clear backlog plan failed');
|
||||||
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -4,7 +4,13 @@
|
|||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import type { EventEmitter } from '../../../lib/events.js';
|
import type { EventEmitter } from '../../../lib/events.js';
|
||||||
import { getBacklogPlanStatus, setRunningState, getErrorMessage, logError } from '../common.js';
|
import {
|
||||||
|
getBacklogPlanStatus,
|
||||||
|
setRunningState,
|
||||||
|
setRunningDetails,
|
||||||
|
getErrorMessage,
|
||||||
|
logError,
|
||||||
|
} from '../common.js';
|
||||||
import { generateBacklogPlan } from '../generate-plan.js';
|
import { generateBacklogPlan } from '../generate-plan.js';
|
||||||
import type { SettingsService } from '../../../services/settings-service.js';
|
import type { SettingsService } from '../../../services/settings-service.js';
|
||||||
|
|
||||||
@@ -37,20 +43,26 @@ export function createGenerateHandler(events: EventEmitter, settingsService?: Se
|
|||||||
}
|
}
|
||||||
|
|
||||||
setRunningState(true);
|
setRunningState(true);
|
||||||
|
setRunningDetails({
|
||||||
|
projectPath,
|
||||||
|
prompt,
|
||||||
|
model,
|
||||||
|
startedAt: new Date().toISOString(),
|
||||||
|
});
|
||||||
const abortController = new AbortController();
|
const abortController = new AbortController();
|
||||||
setRunningState(true, abortController);
|
setRunningState(true, abortController);
|
||||||
|
|
||||||
// Start generation in background
|
// Start generation in background
|
||||||
|
// Note: generateBacklogPlan handles its own error event emission,
|
||||||
|
// so we only log here to avoid duplicate error toasts
|
||||||
generateBacklogPlan(projectPath, prompt, events, abortController, settingsService, model)
|
generateBacklogPlan(projectPath, prompt, events, abortController, settingsService, model)
|
||||||
.catch((error) => {
|
.catch((error) => {
|
||||||
|
// Just log - error event already emitted by generateBacklogPlan
|
||||||
logError(error, 'Generate backlog plan failed (background)');
|
logError(error, 'Generate backlog plan failed (background)');
|
||||||
events.emit('backlog-plan:event', {
|
|
||||||
type: 'backlog_plan_error',
|
|
||||||
error: getErrorMessage(error),
|
|
||||||
});
|
|
||||||
})
|
})
|
||||||
.finally(() => {
|
.finally(() => {
|
||||||
setRunningState(false, null);
|
setRunningState(false, null);
|
||||||
|
setRunningDetails(null);
|
||||||
});
|
});
|
||||||
|
|
||||||
res.json({ success: true });
|
res.json({ success: true });
|
||||||
|
|||||||
@@ -3,13 +3,15 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { getBacklogPlanStatus, getErrorMessage, logError } from '../common.js';
|
import { getBacklogPlanStatus, loadBacklogPlan, getErrorMessage, logError } from '../common.js';
|
||||||
|
|
||||||
export function createStatusHandler() {
|
export function createStatusHandler() {
|
||||||
return async (_req: Request, res: Response): Promise<void> => {
|
return async (req: Request, res: Response): Promise<void> => {
|
||||||
try {
|
try {
|
||||||
const status = getBacklogPlanStatus();
|
const status = getBacklogPlanStatus();
|
||||||
res.json({ success: true, ...status });
|
const projectPath = typeof req.query.projectPath === 'string' ? req.query.projectPath : '';
|
||||||
|
const savedPlan = projectPath ? await loadBacklogPlan(projectPath) : null;
|
||||||
|
res.json({ success: true, ...status, savedPlan });
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logError(error, 'Get backlog plan status failed');
|
logError(error, 'Get backlog plan status failed');
|
||||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||||
|
|||||||
@@ -3,7 +3,13 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { getAbortController, setRunningState, getErrorMessage, logError } from '../common.js';
|
import {
|
||||||
|
getAbortController,
|
||||||
|
setRunningState,
|
||||||
|
setRunningDetails,
|
||||||
|
getErrorMessage,
|
||||||
|
logError,
|
||||||
|
} from '../common.js';
|
||||||
|
|
||||||
export function createStopHandler() {
|
export function createStopHandler() {
|
||||||
return async (_req: Request, res: Response): Promise<void> => {
|
return async (_req: Request, res: Response): Promise<void> => {
|
||||||
@@ -12,6 +18,7 @@ export function createStopHandler() {
|
|||||||
if (abortController) {
|
if (abortController) {
|
||||||
abortController.abort();
|
abortController.abort();
|
||||||
setRunningState(false, null);
|
setRunningState(false, null);
|
||||||
|
setRunningDetails(null);
|
||||||
}
|
}
|
||||||
res.json({ success: true });
|
res.json({ success: true });
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@@ -34,6 +34,13 @@ export function createClaudeRoutes(service: ClaudeUsageService): Router {
|
|||||||
error: 'Authentication required',
|
error: 'Authentication required',
|
||||||
message: "Please run 'claude login' to authenticate",
|
message: "Please run 'claude login' to authenticate",
|
||||||
});
|
});
|
||||||
|
} else if (message.includes('TRUST_PROMPT_PENDING')) {
|
||||||
|
// Trust prompt appeared but couldn't be auto-approved
|
||||||
|
res.status(200).json({
|
||||||
|
error: 'Trust prompt pending',
|
||||||
|
message:
|
||||||
|
'Claude CLI needs folder permission. Please run "claude" in your terminal and approve access.',
|
||||||
|
});
|
||||||
} else if (message.includes('timed out')) {
|
} else if (message.includes('timed out')) {
|
||||||
res.status(200).json({
|
res.status(200).json({
|
||||||
error: 'Command timed out',
|
error: 'Command timed out',
|
||||||
|
|||||||
@@ -1,17 +1,21 @@
|
|||||||
import { Router, Request, Response } from 'express';
|
import { Router, Request, Response } from 'express';
|
||||||
import { CodexUsageService } from '../../services/codex-usage-service.js';
|
import { CodexUsageService } from '../../services/codex-usage-service.js';
|
||||||
|
import { CodexModelCacheService } from '../../services/codex-model-cache-service.js';
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
|
|
||||||
const logger = createLogger('Codex');
|
const logger = createLogger('Codex');
|
||||||
|
|
||||||
export function createCodexRoutes(service: CodexUsageService): Router {
|
export function createCodexRoutes(
|
||||||
|
usageService: CodexUsageService,
|
||||||
|
modelCacheService: CodexModelCacheService
|
||||||
|
): Router {
|
||||||
const router = Router();
|
const router = Router();
|
||||||
|
|
||||||
// Get current usage (attempts to fetch from Codex CLI)
|
// Get current usage (attempts to fetch from Codex CLI)
|
||||||
router.get('/usage', async (req: Request, res: Response) => {
|
router.get('/usage', async (_req: Request, res: Response) => {
|
||||||
try {
|
try {
|
||||||
// Check if Codex CLI is available first
|
// Check if Codex CLI is available first
|
||||||
const isAvailable = await service.isAvailable();
|
const isAvailable = await usageService.isAvailable();
|
||||||
if (!isAvailable) {
|
if (!isAvailable) {
|
||||||
// IMPORTANT: This endpoint is behind Automaker session auth already.
|
// IMPORTANT: This endpoint is behind Automaker session auth already.
|
||||||
// Use a 200 + error payload for Codex CLI issues so the UI doesn't
|
// Use a 200 + error payload for Codex CLI issues so the UI doesn't
|
||||||
@@ -23,7 +27,7 @@ export function createCodexRoutes(service: CodexUsageService): Router {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const usage = await service.fetchUsageData();
|
const usage = await usageService.fetchUsageData();
|
||||||
res.json(usage);
|
res.json(usage);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
const message = error instanceof Error ? error.message : 'Unknown error';
|
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||||
@@ -52,5 +56,35 @@ export function createCodexRoutes(service: CodexUsageService): Router {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Get available Codex models (cached)
|
||||||
|
router.get('/models', async (req: Request, res: Response) => {
|
||||||
|
try {
|
||||||
|
const forceRefresh = req.query.refresh === 'true';
|
||||||
|
const { models, cachedAt } = await modelCacheService.getModelsWithMetadata(forceRefresh);
|
||||||
|
|
||||||
|
if (models.length === 0) {
|
||||||
|
res.status(503).json({
|
||||||
|
success: false,
|
||||||
|
error: 'Codex CLI not available or not authenticated',
|
||||||
|
message: "Please install Codex CLI and run 'codex login' to authenticate",
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
models,
|
||||||
|
cachedAt,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Error fetching models:', error);
|
||||||
|
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||||
|
res.status(500).json({
|
||||||
|
success: false,
|
||||||
|
error: message,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
return router;
|
return router;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,17 +11,18 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
|
||||||
import { createLogger } from '@automaker/utils';
|
import { createLogger } from '@automaker/utils';
|
||||||
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
|
|
||||||
import { PathNotAllowedError } from '@automaker/platform';
|
import { PathNotAllowedError } from '@automaker/platform';
|
||||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||||
import { createCustomOptions } from '../../../lib/sdk-options.js';
|
import { simpleQuery } from '../../../providers/simple-query-service.js';
|
||||||
import { ProviderFactory } from '../../../providers/provider-factory.js';
|
|
||||||
import * as secureFs from '../../../lib/secure-fs.js';
|
import * as secureFs from '../../../lib/secure-fs.js';
|
||||||
import * as path from 'path';
|
import * as path from 'path';
|
||||||
import type { SettingsService } from '../../../services/settings-service.js';
|
import type { SettingsService } from '../../../services/settings-service.js';
|
||||||
import { getAutoLoadClaudeMdSetting } from '../../../lib/settings-helpers.js';
|
import {
|
||||||
|
getAutoLoadClaudeMdSetting,
|
||||||
|
getPromptCustomization,
|
||||||
|
getPhaseModelWithOverrides,
|
||||||
|
} from '../../../lib/settings-helpers.js';
|
||||||
|
|
||||||
const logger = createLogger('DescribeFile');
|
const logger = createLogger('DescribeFile');
|
||||||
|
|
||||||
@@ -49,31 +50,6 @@ interface DescribeFileErrorResponse {
|
|||||||
error: string;
|
error: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract text content from Claude SDK response messages
|
|
||||||
*/
|
|
||||||
async function extractTextFromStream(
|
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
||||||
stream: AsyncIterable<any>
|
|
||||||
): Promise<string> {
|
|
||||||
let responseText = '';
|
|
||||||
|
|
||||||
for await (const msg of stream) {
|
|
||||||
if (msg.type === 'assistant' && msg.message?.content) {
|
|
||||||
const blocks = msg.message.content as Array<{ type: string; text?: string }>;
|
|
||||||
for (const block of blocks) {
|
|
||||||
if (block.type === 'text' && block.text) {
|
|
||||||
responseText += block.text;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (msg.type === 'result' && msg.subtype === 'success') {
|
|
||||||
responseText = msg.result || responseText;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return responseText;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create the describe-file request handler
|
* Create the describe-file request handler
|
||||||
*
|
*
|
||||||
@@ -157,18 +133,17 @@ export function createDescribeFileHandler(
|
|||||||
// Get the filename for context
|
// Get the filename for context
|
||||||
const fileName = path.basename(resolvedPath);
|
const fileName = path.basename(resolvedPath);
|
||||||
|
|
||||||
|
// Get customized prompts from settings
|
||||||
|
const prompts = await getPromptCustomization(settingsService, '[DescribeFile]');
|
||||||
|
|
||||||
// Build prompt with file content passed as structured data
|
// Build prompt with file content passed as structured data
|
||||||
// The file content is included directly, not via tool invocation
|
// The file content is included directly, not via tool invocation
|
||||||
const instructionText = `Analyze the following file and provide a 1-2 sentence description suitable for use as context in an AI coding assistant. Focus on what the file contains, its purpose, and why an AI agent might want to use this context in the future (e.g., "API documentation for the authentication endpoints", "Configuration file for database connections", "Coding style guidelines for the project").
|
const prompt = `${prompts.contextDescription.describeFilePrompt}
|
||||||
|
|
||||||
Respond with ONLY the description text, no additional formatting, preamble, or explanation.
|
File: ${fileName}${truncated ? ' (truncated)' : ''}
|
||||||
|
|
||||||
File: ${fileName}${truncated ? ' (truncated)' : ''}`;
|
--- FILE CONTENT ---
|
||||||
|
${contentToAnalyze}`;
|
||||||
const promptContent = [
|
|
||||||
{ type: 'text' as const, text: instructionText },
|
|
||||||
{ type: 'text' as const, text: `\n\n--- FILE CONTENT ---\n${contentToAnalyze}` },
|
|
||||||
];
|
|
||||||
|
|
||||||
// Use the file's directory as the working directory
|
// Use the file's directory as the working directory
|
||||||
const cwd = path.dirname(resolvedPath);
|
const cwd = path.dirname(resolvedPath);
|
||||||
@@ -180,77 +155,39 @@ File: ${fileName}${truncated ? ' (truncated)' : ''}`;
|
|||||||
'[DescribeFile]'
|
'[DescribeFile]'
|
||||||
);
|
);
|
||||||
|
|
||||||
// Get model from phase settings
|
// Get model from phase settings with provider info
|
||||||
const settings = await settingsService?.getGlobalSettings();
|
const {
|
||||||
logger.info(`Raw phaseModels from settings:`, JSON.stringify(settings?.phaseModels, null, 2));
|
phaseModel: phaseModelEntry,
|
||||||
const phaseModelEntry =
|
provider,
|
||||||
settings?.phaseModels?.fileDescriptionModel || DEFAULT_PHASE_MODELS.fileDescriptionModel;
|
credentials,
|
||||||
logger.info(`fileDescriptionModel entry:`, JSON.stringify(phaseModelEntry));
|
} = await getPhaseModelWithOverrides(
|
||||||
|
'fileDescriptionModel',
|
||||||
|
settingsService,
|
||||||
|
cwd,
|
||||||
|
'[DescribeFile]'
|
||||||
|
);
|
||||||
const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
|
const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
|
||||||
|
|
||||||
logger.info(`Resolved model: ${model}, thinkingLevel: ${thinkingLevel}`);
|
logger.info(
|
||||||
|
`Resolved model: ${model}, thinkingLevel: ${thinkingLevel}`,
|
||||||
|
provider ? `via provider: ${provider.name}` : 'direct API'
|
||||||
|
);
|
||||||
|
|
||||||
let description: string;
|
// Use simpleQuery - provider abstraction handles routing to correct provider
|
||||||
|
const result = await simpleQuery({
|
||||||
// Route to appropriate provider based on model type
|
prompt,
|
||||||
if (isCursorModel(model)) {
|
|
||||||
// Use Cursor provider for Cursor models
|
|
||||||
logger.info(`Using Cursor provider for model: ${model}`);
|
|
||||||
|
|
||||||
const provider = ProviderFactory.getProviderForModel(model);
|
|
||||||
// Strip provider prefix - providers expect bare model IDs
|
|
||||||
const bareModel = stripProviderPrefix(model);
|
|
||||||
|
|
||||||
// Build a simple text prompt for Cursor (no multi-part content blocks)
|
|
||||||
const cursorPrompt = `${instructionText}\n\n--- FILE CONTENT ---\n${contentToAnalyze}`;
|
|
||||||
|
|
||||||
let responseText = '';
|
|
||||||
for await (const msg of provider.executeQuery({
|
|
||||||
prompt: cursorPrompt,
|
|
||||||
model: bareModel,
|
|
||||||
cwd,
|
|
||||||
maxTurns: 1,
|
|
||||||
allowedTools: [],
|
|
||||||
readOnly: true, // File description only reads, doesn't write
|
|
||||||
})) {
|
|
||||||
if (msg.type === 'assistant' && msg.message?.content) {
|
|
||||||
for (const block of msg.message.content) {
|
|
||||||
if (block.type === 'text' && block.text) {
|
|
||||||
responseText += block.text;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
description = responseText;
|
|
||||||
} else {
|
|
||||||
// Use Claude SDK for Claude models
|
|
||||||
logger.info(`Using Claude SDK for model: ${model}`);
|
|
||||||
|
|
||||||
// Use centralized SDK options with proper cwd validation
|
|
||||||
// No tools needed since we're passing file content directly
|
|
||||||
const sdkOptions = createCustomOptions({
|
|
||||||
cwd,
|
|
||||||
model,
|
model,
|
||||||
|
cwd,
|
||||||
maxTurns: 1,
|
maxTurns: 1,
|
||||||
allowedTools: [],
|
allowedTools: [],
|
||||||
autoLoadClaudeMd,
|
thinkingLevel,
|
||||||
thinkingLevel, // Pass thinking level for extended thinking
|
readOnly: true, // File description only reads, doesn't write
|
||||||
|
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
|
||||||
|
claudeCompatibleProvider: provider, // Pass provider for alternative endpoint configuration
|
||||||
|
credentials, // Pass credentials for resolving 'credentials' apiKeySource
|
||||||
});
|
});
|
||||||
|
|
||||||
const promptGenerator = (async function* () {
|
const description = result.text;
|
||||||
yield {
|
|
||||||
type: 'user' as const,
|
|
||||||
session_id: '',
|
|
||||||
message: { role: 'user' as const, content: promptContent },
|
|
||||||
parent_tool_use_id: null,
|
|
||||||
};
|
|
||||||
})();
|
|
||||||
|
|
||||||
const stream = query({ prompt: promptGenerator, options: sdkOptions });
|
|
||||||
|
|
||||||
// Extract the description from the response
|
|
||||||
description = await extractTextFromStream(stream);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!description || description.trim().length === 0) {
|
if (!description || description.trim().length === 0) {
|
||||||
logger.warn('Received empty response from Claude');
|
logger.warn('Received empty response from Claude');
|
||||||
|
|||||||
@@ -12,16 +12,18 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import type { Request, Response } from 'express';
|
import type { Request, Response } from 'express';
|
||||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
|
||||||
import { createLogger, readImageAsBase64 } from '@automaker/utils';
|
import { createLogger, readImageAsBase64 } from '@automaker/utils';
|
||||||
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
|
import { isCursorModel } from '@automaker/types';
|
||||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||||
import { createCustomOptions } from '../../../lib/sdk-options.js';
|
import { simpleQuery } from '../../../providers/simple-query-service.js';
|
||||||
import { ProviderFactory } from '../../../providers/provider-factory.js';
|
|
||||||
import * as secureFs from '../../../lib/secure-fs.js';
|
import * as secureFs from '../../../lib/secure-fs.js';
|
||||||
import * as path from 'path';
|
import * as path from 'path';
|
||||||
import type { SettingsService } from '../../../services/settings-service.js';
|
import type { SettingsService } from '../../../services/settings-service.js';
|
||||||
import { getAutoLoadClaudeMdSetting } from '../../../lib/settings-helpers.js';
|
import {
|
||||||
|
getAutoLoadClaudeMdSetting,
|
||||||
|
getPromptCustomization,
|
||||||
|
getPhaseModelWithOverrides,
|
||||||
|
} from '../../../lib/settings-helpers.js';
|
||||||
|
|
||||||
const logger = createLogger('DescribeImage');
|
const logger = createLogger('DescribeImage');
|
||||||
|
|
||||||
@@ -178,57 +180,10 @@ function mapDescribeImageError(rawMessage: string | undefined): {
|
|||||||
return baseResponse;
|
return baseResponse;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract text content from Claude SDK response messages and log high-signal stream events.
|
|
||||||
*/
|
|
||||||
async function extractTextFromStream(
|
|
||||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
||||||
stream: AsyncIterable<any>,
|
|
||||||
requestId: string
|
|
||||||
): Promise<string> {
|
|
||||||
let responseText = '';
|
|
||||||
let messageCount = 0;
|
|
||||||
|
|
||||||
logger.info(`[${requestId}] [Stream] Begin reading SDK stream...`);
|
|
||||||
|
|
||||||
for await (const msg of stream) {
|
|
||||||
messageCount++;
|
|
||||||
const msgType = msg?.type;
|
|
||||||
const msgSubtype = msg?.subtype;
|
|
||||||
|
|
||||||
// Keep this concise but informative. Full error object is logged in catch blocks.
|
|
||||||
logger.info(
|
|
||||||
`[${requestId}] [Stream] #${messageCount} type=${String(msgType)} subtype=${String(msgSubtype ?? '')}`
|
|
||||||
);
|
|
||||||
|
|
||||||
if (msgType === 'assistant' && msg.message?.content) {
|
|
||||||
const blocks = msg.message.content as Array<{ type: string; text?: string }>;
|
|
||||||
logger.info(`[${requestId}] [Stream] assistant blocks=${blocks.length}`);
|
|
||||||
for (const block of blocks) {
|
|
||||||
if (block.type === 'text' && block.text) {
|
|
||||||
responseText += block.text;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (msgType === 'result' && msgSubtype === 'success') {
|
|
||||||
if (typeof msg.result === 'string' && msg.result.length > 0) {
|
|
||||||
responseText = msg.result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
`[${requestId}] [Stream] End of stream. messages=${messageCount} textLength=${responseText.length}`
|
|
||||||
);
|
|
||||||
|
|
||||||
return responseText;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create the describe-image request handler
|
* Create the describe-image request handler
|
||||||
*
|
*
|
||||||
* Uses Claude SDK query with multi-part content blocks to include the image (base64),
|
* Uses the provider abstraction with multi-part content blocks to include the image (base64),
|
||||||
* matching the agent runner behavior.
|
* matching the agent runner behavior.
|
||||||
*
|
*
|
||||||
* @param settingsService - Optional settings service for loading autoLoadClaudeMd setting
|
* @param settingsService - Optional settings service for loading autoLoadClaudeMd setting
|
||||||
@@ -309,27 +264,6 @@ export function createDescribeImageHandler(
|
|||||||
`[${requestId}] image meta filename=${imageData.filename} mime=${imageData.mimeType} base64Len=${base64Length} estBytes=${estimatedBytes}`
|
`[${requestId}] image meta filename=${imageData.filename} mime=${imageData.mimeType} base64Len=${base64Length} estBytes=${estimatedBytes}`
|
||||||
);
|
);
|
||||||
|
|
||||||
// Build multi-part prompt with image block (no Read tool required)
|
|
||||||
const instructionText =
|
|
||||||
`Describe this image in 1-2 sentences suitable for use as context in an AI coding assistant. ` +
|
|
||||||
`Focus on what the image shows and its purpose (e.g., "UI mockup showing login form with email/password fields", ` +
|
|
||||||
`"Architecture diagram of microservices", "Screenshot of error message in terminal").\n\n` +
|
|
||||||
`Respond with ONLY the description text, no additional formatting, preamble, or explanation.`;
|
|
||||||
|
|
||||||
const promptContent = [
|
|
||||||
{ type: 'text' as const, text: instructionText },
|
|
||||||
{
|
|
||||||
type: 'image' as const,
|
|
||||||
source: {
|
|
||||||
type: 'base64' as const,
|
|
||||||
media_type: imageData.mimeType,
|
|
||||||
data: imageData.base64,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
logger.info(`[${requestId}] Built multi-part prompt blocks=${promptContent.length}`);
|
|
||||||
|
|
||||||
const cwd = path.dirname(actualPath);
|
const cwd = path.dirname(actualPath);
|
||||||
logger.info(`[${requestId}] Using cwd=${cwd}`);
|
logger.info(`[${requestId}] Using cwd=${cwd}`);
|
||||||
|
|
||||||
@@ -340,93 +274,78 @@ export function createDescribeImageHandler(
|
|||||||
'[DescribeImage]'
|
'[DescribeImage]'
|
||||||
);
|
);
|
||||||
|
|
||||||
// Get model from phase settings
|
// Get model from phase settings with provider info
|
||||||
const settings = await settingsService?.getGlobalSettings();
|
const {
|
||||||
const phaseModelEntry =
|
phaseModel: phaseModelEntry,
|
||||||
settings?.phaseModels?.imageDescriptionModel || DEFAULT_PHASE_MODELS.imageDescriptionModel;
|
provider,
|
||||||
|
credentials,
|
||||||
|
} = await getPhaseModelWithOverrides(
|
||||||
|
'imageDescriptionModel',
|
||||||
|
settingsService,
|
||||||
|
cwd,
|
||||||
|
'[DescribeImage]'
|
||||||
|
);
|
||||||
const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
|
const { model, thinkingLevel } = resolvePhaseModel(phaseModelEntry);
|
||||||
|
|
||||||
logger.info(`[${requestId}] Using model: ${model}`);
|
|
||||||
|
|
||||||
let description: string;
|
|
||||||
|
|
||||||
// Route to appropriate provider based on model type
|
|
||||||
if (isCursorModel(model)) {
|
|
||||||
// Use Cursor provider for Cursor models
|
|
||||||
// Note: Cursor may have limited support for image content blocks
|
|
||||||
logger.info(`[${requestId}] Using Cursor provider for model: ${model}`);
|
|
||||||
|
|
||||||
const provider = ProviderFactory.getProviderForModel(model);
|
|
||||||
// Strip provider prefix - providers expect bare model IDs
|
|
||||||
const bareModel = stripProviderPrefix(model);
|
|
||||||
|
|
||||||
// Build prompt with image reference for Cursor
|
|
||||||
// Note: Cursor CLI may not support base64 image blocks directly,
|
|
||||||
// so we include the image path as context
|
|
||||||
const cursorPrompt = `${instructionText}\n\nImage file: ${actualPath}\nMIME type: ${imageData.mimeType}`;
|
|
||||||
|
|
||||||
let responseText = '';
|
|
||||||
const queryStart = Date.now();
|
|
||||||
for await (const msg of provider.executeQuery({
|
|
||||||
prompt: cursorPrompt,
|
|
||||||
model: bareModel,
|
|
||||||
cwd,
|
|
||||||
maxTurns: 1,
|
|
||||||
allowedTools: ['Read'], // Allow Read tool so Cursor can read the image if needed
|
|
||||||
readOnly: true, // Image description only reads, doesn't write
|
|
||||||
})) {
|
|
||||||
if (msg.type === 'assistant' && msg.message?.content) {
|
|
||||||
for (const block of msg.message.content) {
|
|
||||||
if (block.type === 'text' && block.text) {
|
|
||||||
responseText += block.text;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
logger.info(`[${requestId}] Cursor query completed in ${Date.now() - queryStart}ms`);
|
|
||||||
description = responseText;
|
|
||||||
} else {
|
|
||||||
// Use Claude SDK for Claude models (supports image content blocks)
|
|
||||||
logger.info(`[${requestId}] Using Claude SDK for model: ${model}`);
|
|
||||||
|
|
||||||
// Use the same centralized option builder used across the server (validates cwd)
|
|
||||||
const sdkOptions = createCustomOptions({
|
|
||||||
cwd,
|
|
||||||
model,
|
|
||||||
maxTurns: 1,
|
|
||||||
allowedTools: [],
|
|
||||||
autoLoadClaudeMd,
|
|
||||||
thinkingLevel, // Pass thinking level for extended thinking
|
|
||||||
});
|
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
`[${requestId}] SDK options model=${sdkOptions.model} maxTurns=${sdkOptions.maxTurns} allowedTools=${JSON.stringify(
|
`[${requestId}] Using model: ${model}`,
|
||||||
sdkOptions.allowedTools
|
provider ? `via provider: ${provider.name}` : 'direct API'
|
||||||
)}`
|
|
||||||
);
|
);
|
||||||
|
|
||||||
const promptGenerator = (async function* () {
|
// Get customized prompts from settings
|
||||||
yield {
|
const prompts = await getPromptCustomization(settingsService, '[DescribeImage]');
|
||||||
type: 'user' as const,
|
|
||||||
session_id: '',
|
|
||||||
message: { role: 'user' as const, content: promptContent },
|
|
||||||
parent_tool_use_id: null,
|
|
||||||
};
|
|
||||||
})();
|
|
||||||
|
|
||||||
logger.info(`[${requestId}] Calling query()...`);
|
// Build the instruction text from centralized prompts
|
||||||
const queryStart = Date.now();
|
const instructionText = prompts.contextDescription.describeImagePrompt;
|
||||||
const stream = query({ prompt: promptGenerator, options: sdkOptions });
|
|
||||||
logger.info(`[${requestId}] query() returned stream in ${Date.now() - queryStart}ms`);
|
|
||||||
|
|
||||||
// Extract the description from the response
|
// Build prompt based on provider capability
|
||||||
const extractStart = Date.now();
|
// Some providers (like Cursor) may not support image content blocks
|
||||||
description = await extractTextFromStream(stream, requestId);
|
let prompt: string | Array<{ type: string; text?: string; source?: object }>;
|
||||||
logger.info(`[${requestId}] extractMs=${Date.now() - extractStart}`);
|
|
||||||
|
if (isCursorModel(model)) {
|
||||||
|
// Cursor may not support base64 image blocks directly
|
||||||
|
// Use text prompt with image path reference
|
||||||
|
logger.info(`[${requestId}] Using text prompt for Cursor model`);
|
||||||
|
prompt = `${instructionText}\n\nImage file: ${actualPath}\nMIME type: ${imageData.mimeType}`;
|
||||||
|
} else {
|
||||||
|
// Claude and other vision-capable models support multi-part prompts with images
|
||||||
|
logger.info(`[${requestId}] Using multi-part prompt with image block`);
|
||||||
|
prompt = [
|
||||||
|
{ type: 'text', text: instructionText },
|
||||||
|
{
|
||||||
|
type: 'image',
|
||||||
|
source: {
|
||||||
|
type: 'base64',
|
||||||
|
media_type: imageData.mimeType,
|
||||||
|
data: imageData.base64,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.info(`[${requestId}] Calling simpleQuery...`);
|
||||||
|
const queryStart = Date.now();
|
||||||
|
|
||||||
|
// Use simpleQuery - provider abstraction handles routing
|
||||||
|
const result = await simpleQuery({
|
||||||
|
prompt,
|
||||||
|
model,
|
||||||
|
cwd,
|
||||||
|
maxTurns: 1,
|
||||||
|
allowedTools: isCursorModel(model) ? ['Read'] : [], // Allow Read for Cursor to read image if needed
|
||||||
|
thinkingLevel,
|
||||||
|
readOnly: true, // Image description only reads, doesn't write
|
||||||
|
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
|
||||||
|
claudeCompatibleProvider: provider, // Pass provider for alternative endpoint configuration
|
||||||
|
credentials, // Pass credentials for resolving 'credentials' apiKeySource
|
||||||
|
});
|
||||||
|
|
||||||
|
logger.info(`[${requestId}] simpleQuery completed in ${Date.now() - queryStart}ms`);
|
||||||
|
|
||||||
|
const description = result.text;
|
||||||
|
|
||||||
if (!description || description.trim().length === 0) {
|
if (!description || description.trim().length === 0) {
|
||||||
logger.warn(`[${requestId}] Received empty response from Claude`);
|
logger.warn(`[${requestId}] Received empty response from AI`);
|
||||||
const response: DescribeImageErrorResponse = {
|
const response: DescribeImageErrorResponse = {
|
||||||
success: false,
|
success: false,
|
||||||
error: 'Failed to generate description - empty response',
|
error: 'Failed to generate description - empty response',
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user