mirror of
https://github.com/AutoMaker-Org/automaker.git
synced 2026-01-30 14:22:02 +00:00
Compare commits
945 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7e03af2dc6 | ||
|
|
ab9ef0d560 | ||
|
|
844be657c8 | ||
|
|
90c89ef338 | ||
|
|
fb46c0c9ea | ||
|
|
81bd57cf6a | ||
|
|
59d47928a7 | ||
|
|
bd432b1da3 | ||
|
|
b51aed849c | ||
|
|
90e62b8add | ||
|
|
67c6c9a9e7 | ||
|
|
2d66e38fa7 | ||
|
|
50aac1c218 | ||
|
|
8c8a4875ca | ||
|
|
eec36268fe | ||
|
|
f6efbd1b26 | ||
|
|
019793e047 | ||
|
|
a8a3711246 | ||
|
|
b867ca1407 | ||
|
|
75143c0792 | ||
|
|
f32f3e82b2 | ||
|
|
abe272ef4d | ||
|
|
6d4ab9cc13 | ||
|
|
98381441b9 | ||
|
|
eae60ab6b9 | ||
|
|
1d7b64cea8 | ||
|
|
6337e266c5 | ||
|
|
da38adcba6 | ||
|
|
af493fb73e | ||
|
|
79bf1c9bec | ||
|
|
b9a6e29ee8 | ||
|
|
2828431cca | ||
|
|
d3f46f565b | ||
|
|
3f4f2199eb | ||
|
|
38f0b16530 | ||
|
|
bd22323149 | ||
|
|
f6ce03d59a | ||
|
|
63816043cf | ||
|
|
eafe474dbc | ||
|
|
59bbbd43c5 | ||
|
|
2b89b0606c | ||
|
|
07327e48b4 | ||
|
|
e818922b0d | ||
|
|
04aac7ec07 | ||
|
|
944e2f5ffe | ||
|
|
847a8ff327 | ||
|
|
504c19aef5 | ||
|
|
ed2da7932c | ||
|
|
968d889346 | ||
|
|
04aca1c8cb | ||
|
|
784d7fc059 | ||
|
|
d6705fbfb5 | ||
|
|
c5ae9ad262 | ||
|
|
5a0ad75059 | ||
|
|
cf62dbbf7a | ||
|
|
a4d1a1497a | ||
|
|
b798260491 | ||
|
|
1fcaa52f72 | ||
|
|
46caae05d2 | ||
|
|
59a6a23f9b | ||
|
|
88bb5b923f | ||
|
|
504d9aa9d7 | ||
|
|
ab0cd95d9a | ||
|
|
4c65855140 | ||
|
|
adfc353b2d | ||
|
|
d5aea8355b | ||
|
|
e498f39153 | ||
|
|
d66259b411 | ||
|
|
e556521c8d | ||
|
|
e448d6d4e5 | ||
|
|
65a09b2d38 | ||
|
|
469ee5ff85 | ||
|
|
04e6ed30a2 | ||
|
|
ec3d78922e | ||
|
|
bc0ef47323 | ||
|
|
579246dc26 | ||
|
|
d68de99c15 | ||
|
|
57b7f92e61 | ||
|
|
dd822c41c5 | ||
|
|
7016985bf2 | ||
|
|
67a6c10edc | ||
|
|
0317dadcaf | ||
|
|
625fddb71e | ||
|
|
63b0ccd035 | ||
|
|
19aa86c027 | ||
|
|
76ad6667f1 | ||
|
|
25c9259b50 | ||
|
|
0e1e855cc5 | ||
|
|
69a847fe8c | ||
|
|
6f2402e16d | ||
|
|
bacd4f385d | ||
|
|
cc42b79fbc | ||
|
|
eaeb503ee7 | ||
|
|
d028932dc8 | ||
|
|
6bdac230df | ||
|
|
43728e451e | ||
|
|
b93b59951b | ||
|
|
b5a8ed229c | ||
|
|
97ae4b6362 | ||
|
|
5a1e53ca7c | ||
|
|
876d383936 | ||
|
|
96196f906f | ||
|
|
0ee9313441 | ||
|
|
496ace8a8e | ||
|
|
0a21c11a35 | ||
|
|
495af733da | ||
|
|
a526869f21 | ||
|
|
789b807542 | ||
|
|
35b3d3931e | ||
|
|
bad4393dda | ||
|
|
6012e8312b | ||
|
|
8f458e55e2 | ||
|
|
61881d99e2 | ||
|
|
3c719f05a1 | ||
|
|
9cba2e509a | ||
|
|
c61eaff525 | ||
|
|
ef0a96182a | ||
|
|
a680f3a9c1 | ||
|
|
ea6a39c6ab | ||
|
|
f0c2860dec | ||
|
|
1321a8bd4d | ||
|
|
85dfabec0a | ||
|
|
15dca79fb7 | ||
|
|
e9b366fa18 | ||
|
|
145dcf4b97 | ||
|
|
4a708aa305 | ||
|
|
3a1781eb39 | ||
|
|
5f328a4c13 | ||
|
|
f7a0365bee | ||
|
|
4eae231166 | ||
|
|
ba4540b13e | ||
|
|
01911287f2 | ||
|
|
7b7de2b601 | ||
|
|
b65fccbcf7 | ||
|
|
71c17e1fbb | ||
|
|
296ef20ef7 | ||
|
|
23d6756f03 | ||
|
|
01e6b7fa52 | ||
|
|
348a4d95e9 | ||
|
|
94e166636b | ||
|
|
920dcd105f | ||
|
|
b60e8f0392 | ||
|
|
35d2d8cc01 | ||
|
|
d4b2a3eb27 | ||
|
|
2caa63ae21 | ||
|
|
4c16e5e09c | ||
|
|
ad983c6422 | ||
|
|
0fe6a12d20 | ||
|
|
ce78165b59 | ||
|
|
17c1c733b7 | ||
|
|
3bb9d27dc6 | ||
|
|
04a5ae48e2 | ||
|
|
6d3314f980 | ||
|
|
35541f810d | ||
|
|
3d361028b3 | ||
|
|
7f4b60b8c0 | ||
|
|
1c59eabf5f | ||
|
|
f95282069d | ||
|
|
a3fcf5bda1 | ||
|
|
a7de6406ed | ||
|
|
fd51abb3ce | ||
|
|
cd30306afe | ||
|
|
bed8038d16 | ||
|
|
862a33982d | ||
|
|
90ebb52536 | ||
|
|
072ad72f14 | ||
|
|
387bb15a3d | ||
|
|
077dd31b4f | ||
|
|
99a19cb2a2 | ||
|
|
407cf633e0 | ||
|
|
b0ce01d008 | ||
|
|
3154121840 | ||
|
|
8f2d134d03 | ||
|
|
07bcb6b767 | ||
|
|
8a0226512d | ||
|
|
5418d04529 | ||
|
|
3325b91de9 | ||
|
|
aad5dfc745 | ||
|
|
60d4b5c877 | ||
|
|
9dee9fb366 | ||
|
|
ccc7c6c21d | ||
|
|
896e183e41 | ||
|
|
7c0d70ab3c | ||
|
|
91eeda3a73 | ||
|
|
e4235cbd4b | ||
|
|
fc7f342617 | ||
|
|
6aa9e5fbc9 | ||
|
|
97af998066 | ||
|
|
44e341ab41 | ||
|
|
34c0d39e39 | ||
|
|
686a24d3c6 | ||
|
|
38addacf1e | ||
|
|
a85e1aaa89 | ||
|
|
3307ff8100 | ||
|
|
502043f6de | ||
|
|
dd86e987a4 | ||
|
|
6cd2898923 | ||
|
|
7fec9e7c5c | ||
|
|
2c9a3c5161 | ||
|
|
bb3b1960c5 | ||
|
|
7007a8aa66 | ||
|
|
1ff617703c | ||
|
|
76b7cfec9e | ||
|
|
8d80c73faa | ||
|
|
0461045767 | ||
|
|
e07fba13d8 | ||
|
|
dbc21c8f73 | ||
|
|
7b61a274e5 | ||
|
|
ef8eaa0463 | ||
|
|
65319f93b4 | ||
|
|
dd27c5c4fb | ||
|
|
d1418aa054 | ||
|
|
0c9f05ee38 | ||
|
|
d50b15e639 | ||
|
|
172f1a7a3f | ||
|
|
5edb38691c | ||
|
|
f1f149c6c0 | ||
|
|
e0c5f55fe7 | ||
|
|
4958ee1dda | ||
|
|
3d00f40ea0 | ||
|
|
c9e0957dfe | ||
|
|
9d4f912c93 | ||
|
|
4898a1307e | ||
|
|
6acb751eb3 | ||
|
|
629b7e7433 | ||
|
|
190f18ecae | ||
|
|
e6eb5ad97e | ||
|
|
5f0ecc8dd6 | ||
|
|
e95912f931 | ||
|
|
eb1875f558 | ||
|
|
c761ce8120 | ||
|
|
ee9cb4deec | ||
|
|
a881d175bc | ||
|
|
17ed2be918 | ||
|
|
5a5165818e | ||
|
|
9a7d21438b | ||
|
|
d4d4b8fb3d | ||
|
|
48955e9a71 | ||
|
|
870df88cd1 | ||
|
|
7618a75d85 | ||
|
|
51281095ea | ||
|
|
50a595a8da | ||
|
|
a398367f00 | ||
|
|
fe6faf9aae | ||
|
|
a1331ed514 | ||
|
|
38f2e0beea | ||
|
|
ef4035a462 | ||
|
|
cb07206dae | ||
|
|
cc0405cf27 | ||
|
|
4dd00a98e4 | ||
|
|
b3c321ce02 | ||
|
|
12a796bcbb | ||
|
|
ffcdbf7d75 | ||
|
|
e70c3b7722 | ||
|
|
524a9736b4 | ||
|
|
036a7d9d26 | ||
|
|
c4df2c141a | ||
|
|
c4a2f2c2a8 | ||
|
|
d08be3c7f9 | ||
|
|
7c75c24b5c | ||
|
|
a69611dcb2 | ||
|
|
2588ecaafa | ||
|
|
83af319be3 | ||
|
|
55d7120576 | ||
|
|
73e7a8558d | ||
|
|
a071097c0d | ||
|
|
0b8a79bc25 | ||
|
|
59cb48b7fa | ||
|
|
f45ba5a4f5 | ||
|
|
a0fd19fe17 | ||
|
|
b930091c42 | ||
|
|
4cff240520 | ||
|
|
e40881ed1d | ||
|
|
6a8b2067fd | ||
|
|
3f3f02905f | ||
|
|
edef4c7cee | ||
|
|
53c1a46409 | ||
|
|
0c508ce130 | ||
|
|
3c48b2ceb7 | ||
|
|
64bf02d59c | ||
|
|
a2030d5877 | ||
|
|
f17d062440 | ||
|
|
3a43033fa6 | ||
|
|
9586589453 | ||
|
|
a85dec6dbb | ||
|
|
632d3181f2 | ||
|
|
4e876de458 | ||
|
|
dea504a671 | ||
|
|
877fcb094e | ||
|
|
fc75923211 | ||
|
|
73cab38ba5 | ||
|
|
0cd3275e4a | ||
|
|
9702f142c4 | ||
|
|
2906fec500 | ||
|
|
5e2718f8b2 | ||
|
|
3b0a1a7eb2 | ||
|
|
35cda4eb8c | ||
|
|
fd39f96b4c | ||
|
|
398f7b8fdd | ||
|
|
e2718b37e3 | ||
|
|
95bcd9a7ec | ||
|
|
8d578558ff | ||
|
|
584f5a3426 | ||
|
|
17c69ea1ca | ||
|
|
4ce163691e | ||
|
|
042fc61542 | ||
|
|
7869ec046a | ||
|
|
9beefd1ac3 | ||
|
|
0c59add31f | ||
|
|
26236d3d5b | ||
|
|
43c93fe19a | ||
|
|
7b1b2fa463 | ||
|
|
20cf120b8a | ||
|
|
9ea80123fd | ||
|
|
ee9ccd03d6 | ||
|
|
af7a7ebacc | ||
|
|
7ddd9f8be1 | ||
|
|
a40bb6df24 | ||
|
|
aafd0b3991 | ||
|
|
b641884c37 | ||
|
|
7fac115a36 | ||
|
|
7e8995df24 | ||
|
|
e47b34288b | ||
|
|
6365cc137c | ||
|
|
41ea6f78eb | ||
|
|
9f97426859 | ||
|
|
6e341c1c15 | ||
|
|
b00568176c | ||
|
|
b18672f66d | ||
|
|
887fb93b3b | ||
|
|
d3005393df | ||
|
|
49f04cf403 | ||
|
|
ebaecca949 | ||
|
|
13095a4445 | ||
|
|
b80773b90d | ||
|
|
7dff6ea0ed | ||
|
|
3b39df4b12 | ||
|
|
ab65d46d08 | ||
|
|
077a63b03b | ||
|
|
5be85a45b1 | ||
|
|
6028889909 | ||
|
|
2b5479ae0d | ||
|
|
6a13c8e16e | ||
|
|
89acada310 | ||
|
|
3a2d8d118d | ||
|
|
1b8d23688e | ||
|
|
1209e923fc | ||
|
|
012d1c452b | ||
|
|
ab0487664a | ||
|
|
f504a00ce6 | ||
|
|
f2582c4453 | ||
|
|
820f43078b | ||
|
|
6533a15653 | ||
|
|
7416c8b428 | ||
|
|
8f5e782583 | ||
|
|
39b21830dc | ||
|
|
86cbb2f970 | ||
|
|
0e944e274a | ||
|
|
5e789c2817 | ||
|
|
6150926a75 | ||
|
|
0a2b4287ff | ||
|
|
18ccfa21e0 | ||
|
|
ebc7c9a7a0 | ||
|
|
5bd2b705dc | ||
|
|
2b1a7660b6 | ||
|
|
195b98e688 | ||
|
|
5aedb4fadf | ||
|
|
9cf12b9006 | ||
|
|
86d92e610b | ||
|
|
f2c40ab21a | ||
|
|
0ce6b6d4b1 | ||
|
|
55c49516c8 | ||
|
|
f3c9e828e2 | ||
|
|
3928539ade | ||
|
|
c1386caeb2 | ||
|
|
ade80484bb | ||
|
|
49a5a7448c | ||
|
|
873429db19 | ||
|
|
d6baf4583a | ||
|
|
0bcd52290b | ||
|
|
823e42e635 | ||
|
|
30f4315c17 | ||
|
|
f30240267f | ||
|
|
8cccf74ace | ||
|
|
9b798732b2 | ||
|
|
a7c19f15cd | ||
|
|
493c392422 | ||
|
|
67788bee0b | ||
|
|
0cef537a3d | ||
|
|
46994bea34 | ||
|
|
7ab65b22ec | ||
|
|
9bc245bd40 | ||
|
|
32e2315697 | ||
|
|
3a0a2e3019 | ||
|
|
8ff4b5912a | ||
|
|
7d0656bb14 | ||
|
|
e65c4aead2 | ||
|
|
f43a9288fb | ||
|
|
92e7945329 | ||
|
|
723274523d | ||
|
|
01d78be748 | ||
|
|
bcd87cc7c5 | ||
|
|
c9e7e4f1e0 | ||
|
|
532d03c231 | ||
|
|
f367db741a | ||
|
|
f4f7b4d25b | ||
|
|
63c581577f | ||
|
|
6190bd5f39 | ||
|
|
e29880254e | ||
|
|
ba7904c189 | ||
|
|
46210c5a26 | ||
|
|
ee40f2720a | ||
|
|
f1eba5ea56 | ||
|
|
c76ba691a4 | ||
|
|
ace736c7c2 | ||
|
|
1a78304ca2 | ||
|
|
0c6447a6f5 | ||
|
|
fb87c8bbb9 | ||
|
|
1a4e6ff17b | ||
|
|
3e7695dd2d | ||
|
|
8fcc6cb4db | ||
|
|
dcf19fbd45 | ||
|
|
80ab5ddad2 | ||
|
|
84832a130b | ||
|
|
fcb2e904eb | ||
|
|
36e007e647 | ||
|
|
36b4bd6c5e | ||
|
|
1b676717ea | ||
|
|
4afd360f66 | ||
|
|
dd610b7ed9 | ||
|
|
56ab21558d | ||
|
|
89c53acdcf | ||
|
|
a84f2e5942 | ||
|
|
6cb085f192 | ||
|
|
19fd23c39c | ||
|
|
cf7a737646 | ||
|
|
ff6a5a5565 | ||
|
|
3842eb1328 | ||
|
|
bb5f68c2f0 | ||
|
|
ec7c2892c2 | ||
|
|
5c01706806 | ||
|
|
6c25680115 | ||
|
|
3ca1daf44c | ||
|
|
80cf932ea4 | ||
|
|
abc55cf5e9 | ||
|
|
d4365de4b9 | ||
|
|
5f92af4c0a | ||
|
|
1fc40da052 | ||
|
|
5907cc0c04 | ||
|
|
57588bfc20 | ||
|
|
4afa73521d | ||
|
|
3a69f973d0 | ||
|
|
108d52ce9f | ||
|
|
dd58b70730 | ||
|
|
7ad7b63da2 | ||
|
|
060a789b45 | ||
|
|
bafddd627a | ||
|
|
6f4269aacd | ||
|
|
8b31039557 | ||
|
|
27b80b3e08 | ||
|
|
bdb65f5729 | ||
|
|
f4b95ea5bf | ||
|
|
b149607747 | ||
|
|
9bfcb91774 | ||
|
|
6a8f5c6d9c | ||
|
|
d104a24446 | ||
|
|
a26ef4347a | ||
|
|
e9dba8c9e5 | ||
|
|
2b02db8ae3 | ||
|
|
1ad3b1739b | ||
|
|
9110693c75 | ||
|
|
b8afb6c804 | ||
|
|
37ce09e07c | ||
|
|
334b82bfb4 | ||
|
|
17a2053e79 | ||
|
|
46c3dd252f | ||
|
|
96b0e74794 | ||
|
|
18a2ed2a44 | ||
|
|
7d6ed0cb37 | ||
|
|
396100686c | ||
|
|
35ecb0dd2d | ||
|
|
8d6dae7495 | ||
|
|
340e76c3ed | ||
|
|
275037c73d | ||
|
|
a14ef30c69 | ||
|
|
18fa0f3066 | ||
|
|
3e015591d3 | ||
|
|
81d631ea72 | ||
|
|
2a1ab218ec | ||
|
|
6c31f725ff | ||
|
|
f0bea76141 | ||
|
|
46933a2a81 | ||
|
|
dccb5faa4b | ||
|
|
15ae1fe147 | ||
|
|
6f82f64195 | ||
|
|
1656b4fb7a | ||
|
|
419e954230 | ||
|
|
1a83c9b256 | ||
|
|
a0efa5d351 | ||
|
|
afcda98dc4 | ||
|
|
e508f9c1d1 | ||
|
|
8c2c54b0a4 | ||
|
|
65edddbc36 | ||
|
|
0dfe5a91e7 | ||
|
|
2f75c0bec5 | ||
|
|
516f26edae | ||
|
|
dd8862ce21 | ||
|
|
0c2192d039 | ||
|
|
a85390b289 | ||
|
|
c4a90d7f29 | ||
|
|
8794156f28 | ||
|
|
7603c827f6 | ||
|
|
7ad70a3923 | ||
|
|
95c6a69610 | ||
|
|
157dd71efa | ||
|
|
06ed965a3b | ||
|
|
ea7e273fb4 | ||
|
|
dcf05e4f1c | ||
|
|
50fb7ece4f | ||
|
|
f9db4fffa7 | ||
|
|
1cb6daaa07 | ||
|
|
adf9307796 | ||
|
|
7fdc2b2fab | ||
|
|
0d8043f1f2 | ||
|
|
2c079623a8 | ||
|
|
899c45fc1b | ||
|
|
c763f2a545 | ||
|
|
45dd1d45a1 | ||
|
|
a860b3cf45 | ||
|
|
9fcdd899b2 | ||
|
|
bacafd129f | ||
|
|
20d7fb1949 | ||
|
|
a192eaa20f | ||
|
|
99fe6f6497 | ||
|
|
35c6beca37 | ||
|
|
f7cb92fa9d | ||
|
|
b403d0d570 | ||
|
|
c80ae3367a | ||
|
|
c7312af3c8 | ||
|
|
f3dbc996d4 | ||
|
|
0549b8085a | ||
|
|
760f254f78 | ||
|
|
91bff6c572 | ||
|
|
019ac56ceb | ||
|
|
ecf34b178e | ||
|
|
3cd6e8c13b | ||
|
|
ca8341bf39 | ||
|
|
160bd8bfc7 | ||
|
|
0d1138dfcf | ||
|
|
b112747073 | ||
|
|
e1c3b7528f | ||
|
|
e78bfc80ec | ||
|
|
3eac848d4f | ||
|
|
76cb72812f | ||
|
|
bfc8f9bc26 | ||
|
|
8f2e06bc32 | ||
|
|
2c9f77356f | ||
|
|
ea1b10fea6 | ||
|
|
e6a63ccae1 | ||
|
|
784188cf52 | ||
|
|
1d945f4f75 | ||
|
|
df50cccb7b | ||
|
|
b0a9c89157 | ||
|
|
45eaf91cb3 | ||
|
|
c20b224f30 | ||
|
|
96b941b008 | ||
|
|
ad4da23743 | ||
|
|
5136c32b68 | ||
|
|
cffdec91f1 | ||
|
|
d9c87f8116 | ||
|
|
9954feafd8 | ||
|
|
acfb8d2255 | ||
|
|
fe6106e807 | ||
|
|
2af43d7c2d | ||
|
|
a7a6ff2e6c | ||
|
|
8f598d7ce3 | ||
|
|
7f8092264a | ||
|
|
e0471fef09 | ||
|
|
043edde63b | ||
|
|
4b9a211c49 | ||
|
|
b59bbd93ba | ||
|
|
2a782392bc | ||
|
|
fe56ba133e | ||
|
|
40a3046a3b | ||
|
|
1aa8b5b56b | ||
|
|
266e0c54b9 | ||
|
|
31550ab4e7 | ||
|
|
cce8d1569a | ||
|
|
ad051eb8f0 | ||
|
|
01098545cf | ||
|
|
d58bd782ef | ||
|
|
c11cb6a6cd | ||
|
|
64549f824c | ||
|
|
83fab5321e | ||
|
|
bb47f22d6c | ||
|
|
4996a63bcc | ||
|
|
f302234b0e | ||
|
|
58d6ae02a5 | ||
|
|
f9ec7222f2 | ||
|
|
360b7ebe08 | ||
|
|
ebc99d06eb | ||
|
|
f2600821d6 | ||
|
|
6e08126875 | ||
|
|
176eeca096 | ||
|
|
f6a9ae6335 | ||
|
|
30db67f89c | ||
|
|
da90bafde8 | ||
|
|
04d263b1ed | ||
|
|
e8e79d8446 | ||
|
|
8c24381759 | ||
|
|
8482cdab87 | ||
|
|
064a395c4c | ||
|
|
d103d0aa45 | ||
|
|
9509c8ea00 | ||
|
|
26b73fdaa9 | ||
|
|
a3c9c9cee5 | ||
|
|
0d088962a0 | ||
|
|
2ce4e02ada | ||
|
|
fad3ed1aae | ||
|
|
81444d5603 | ||
|
|
e8b65dbd0b | ||
|
|
f86bb3eab8 | ||
|
|
54a102f029 | ||
|
|
2ee4ec65b4 | ||
|
|
166679cd36 | ||
|
|
b95c54a539 | ||
|
|
e71be53459 | ||
|
|
8c5759d74e | ||
|
|
bd1c4e0690 | ||
|
|
9a428eefe0 | ||
|
|
8774d28bc4 | ||
|
|
9eb9c070cd | ||
|
|
7110a690e1 | ||
|
|
1194e7d51e | ||
|
|
1641f9da5e | ||
|
|
ff4887773e | ||
|
|
15a580ece9 | ||
|
|
b37d258698 | ||
|
|
e0e7bb9190 | ||
|
|
7131c70186 | ||
|
|
be98a59023 | ||
|
|
7e517101a0 | ||
|
|
92f60cceb5 | ||
|
|
b1dcb8a9d7 | ||
|
|
ec6ec7d569 | ||
|
|
31bb069e75 | ||
|
|
363be54303 | ||
|
|
ca0f6661d3 | ||
|
|
cd803cd9bc | ||
|
|
cbdc88c5d0 | ||
|
|
44b548c5c8 | ||
|
|
cc2ac3542d | ||
|
|
25044d40b9 | ||
|
|
676169b189 | ||
|
|
c8c05efb8d | ||
|
|
23cef5fd82 | ||
|
|
c0b0b30541 | ||
|
|
7eeba5f17c | ||
|
|
fcb2457e17 | ||
|
|
02e378905e | ||
|
|
87c0ab6daa | ||
|
|
60f9da9208 | ||
|
|
ff318d6ef5 | ||
|
|
93d9e08de1 | ||
|
|
7edca6b823 | ||
|
|
73eebe7c9e | ||
|
|
049f9a9e37 | ||
|
|
658cbb8bd6 | ||
|
|
19f1c32805 | ||
|
|
ece8ff8cbc | ||
|
|
a3a648aef1 | ||
|
|
3bc2b74d30 | ||
|
|
9d17cd7d9c | ||
|
|
091c6b2737 | ||
|
|
2880314931 | ||
|
|
0102719067 | ||
|
|
123b471b68 | ||
|
|
b66d228460 | ||
|
|
770d67d8c4 | ||
|
|
d42857ec26 | ||
|
|
54b977ee1b | ||
|
|
e8999ba908 | ||
|
|
96c4383b29 | ||
|
|
93d1d2c41a | ||
|
|
b075af5bc9 | ||
|
|
07ca7fccb8 | ||
|
|
797643ffdc | ||
|
|
7d4052be95 | ||
|
|
1036719f2a | ||
|
|
1ab520eda3 | ||
|
|
658f7d816e | ||
|
|
835ab516a6 | ||
|
|
00e098e57d | ||
|
|
f04cac8e2f | ||
|
|
c1b9f1cb28 | ||
|
|
7d8670ff1f | ||
|
|
9f9bcaff65 | ||
|
|
25b1789b0a | ||
|
|
2c8add3b54 | ||
|
|
f25d62fe25 | ||
|
|
7a9f55e1bd | ||
|
|
fbdf1689b3 | ||
|
|
29ba2c5936 | ||
|
|
493050fba1 | ||
|
|
455f6fa95b | ||
|
|
e14957900e | ||
|
|
c0e0f8d214 | ||
|
|
caae869501 | ||
|
|
b998d253bb | ||
|
|
0b1123e3ce | ||
|
|
a412f5d0fb | ||
|
|
919e08689a | ||
|
|
72e803b56d | ||
|
|
e378704c63 | ||
|
|
f6c50ce336 | ||
|
|
063224966c | ||
|
|
5d40e694a5 | ||
|
|
4405a97d9b | ||
|
|
6733de9e0d | ||
|
|
01bae7d43e | ||
|
|
6b30271441 | ||
|
|
cdc8334d82 | ||
|
|
4a3a98b562 | ||
|
|
c280225a4e | ||
|
|
b3ea506a73 | ||
|
|
590437c78b | ||
|
|
a5c61b0546 | ||
|
|
790a1b8e20 | ||
|
|
fa47264c76 | ||
|
|
a4075fb637 | ||
|
|
20a7c8b5a8 | ||
|
|
202494156b | ||
|
|
7558fed4e4 | ||
|
|
480589510e | ||
|
|
999ed5b51b | ||
|
|
589155fa1c | ||
|
|
ae13551033 | ||
|
|
038caeb2a0 | ||
|
|
7b34c9a108 | ||
|
|
5a1fe23ddb | ||
|
|
9bb843f82f | ||
|
|
ebc4f1422a | ||
|
|
96bfa8f131 | ||
|
|
406ba14af5 | ||
|
|
13841b1af6 | ||
|
|
7b3be213e4 | ||
|
|
b52b9ba236 | ||
|
|
58f466b443 | ||
|
|
13e3f05a7a | ||
|
|
7f5cdc0345 | ||
|
|
c21a298e07 | ||
|
|
3d940e21d5 | ||
|
|
6446dd5d3a | ||
|
|
38cff827b3 | ||
|
|
cc4310b368 | ||
|
|
d248e74492 | ||
|
|
a3d74fbe6e | ||
|
|
54dad0135f | ||
|
|
d9440e86a2 | ||
|
|
1f716142af | ||
|
|
f2eb6c3745 | ||
|
|
a08641a59b | ||
|
|
8dc3bdde67 | ||
|
|
223fff9ef9 | ||
|
|
986f6c034f | ||
|
|
10e647570b | ||
|
|
6ac888c5ce | ||
|
|
1bda0259db | ||
|
|
bea115d1e4 | ||
|
|
bc46a18372 | ||
|
|
e2c238f4f8 | ||
|
|
574680fc11 | ||
|
|
673dcd1113 | ||
|
|
41ae35bcdb | ||
|
|
ae02b30aba | ||
|
|
a2ad1d9420 | ||
|
|
1f4e801c58 | ||
|
|
ed65b096e3 | ||
|
|
ff06821fcd | ||
|
|
25edfecbd4 | ||
|
|
aa83583ee9 | ||
|
|
7fe3dff655 | ||
|
|
7c6d9d3723 | ||
|
|
a1ff498585 | ||
|
|
37f45ee89b | ||
|
|
83fbf55781 | ||
|
|
1db24ab887 | ||
|
|
e27e0b2343 | ||
|
|
54311a887c | ||
|
|
89216c01e5 | ||
|
|
9e9cffde6b | ||
|
|
c24cd9721c | ||
|
|
8c33b1c751 | ||
|
|
4d0d15d1d5 | ||
|
|
4ba82e131a | ||
|
|
23ff99d2e2 | ||
|
|
0473b35db3 | ||
|
|
f71533ab17 | ||
|
|
8709b5d34b | ||
|
|
88a059ca52 | ||
|
|
f3ffb22487 | ||
|
|
b915a43eb0 | ||
|
|
8b2b7662ee | ||
|
|
e6d3e8e5a5 | ||
|
|
26e01c930f | ||
|
|
8621a3095d | ||
|
|
6e7352e67e | ||
|
|
7e3f77cb38 | ||
|
|
75b73c55e0 | ||
|
|
ebd928e3b6 | ||
|
|
80cbabeeb0 | ||
|
|
05910905ee | ||
|
|
66fe3392ad | ||
|
|
3a553c892d | ||
|
|
ca506a208e | ||
|
|
cbca6fa6e4 | ||
|
|
951010b64d | ||
|
|
08221c6660 | ||
|
|
ffd8752cde | ||
|
|
deae01712a | ||
|
|
14d1562903 | ||
|
|
8c100230ab | ||
|
|
8eb374d77c | ||
|
|
998ad354d2 | ||
|
|
a2bd1b593b | ||
|
|
2ebb650609 | ||
|
|
11ddcfaf90 | ||
|
|
be4a0b292c | ||
|
|
18494547bc | ||
|
|
26074f9390 | ||
|
|
272905b884 | ||
|
|
0ad2de90ee | ||
|
|
21cbdba530 | ||
|
|
04ccd6f81c | ||
|
|
af04e69dc7 | ||
|
|
935316cb51 | ||
|
|
e608f46a49 | ||
|
|
8de4056417 | ||
|
|
9196a1afb4 | ||
|
|
eaef95c4a3 | ||
|
|
3dd10aa8c7 | ||
|
|
104f478f89 | ||
|
|
b32af0c86b | ||
|
|
c991d5f2f7 | ||
|
|
b3a4fd2be1 | ||
|
|
25f5f7d6b2 | ||
|
|
2f2eab6e02 | ||
|
|
6726050969 | ||
|
|
d08eba2331 | ||
|
|
7cbdb3db73 | ||
|
|
28328d7d1e | ||
|
|
0519aba820 | ||
|
|
a65b16cbae | ||
|
|
346c38d6da | ||
|
|
ca4809ca06 | ||
|
|
9afcc5fae6 | ||
|
|
383dc66952 | ||
|
|
3c466f0150 | ||
|
|
fe9b26c49e | ||
|
|
55603cb5c7 | ||
|
|
97d05148d4 | ||
|
|
437063630c | ||
|
|
0dc2e72263 | ||
|
|
5544031164 | ||
|
|
9cf5fff0ad | ||
|
|
77918018fa | ||
|
|
1a4e9ccfcb | ||
|
|
5873e888a9 | ||
|
|
28bbc3e0e1 | ||
|
|
be4aadb632 | ||
|
|
62082fbaf5 | ||
|
|
a4c5567768 | ||
|
|
6c7adb140d | ||
|
|
18182bbc94 | ||
|
|
0bb774375e | ||
|
|
4924cf1453 | ||
|
|
c079b3ef88 | ||
|
|
fade47afdc | ||
|
|
813ede2dde | ||
|
|
b950f13e11 | ||
|
|
ba24753630 | ||
|
|
8e65f0b338 | ||
|
|
4b9bd2641f | ||
|
|
9978de0377 | ||
|
|
0c776b173a | ||
|
|
02a1af3314 | ||
|
|
2c4a977b4a | ||
|
|
28cc1400e9 | ||
|
|
193627e166 | ||
|
|
72560cbd36 | ||
|
|
8b62bf9817 | ||
|
|
95913714cc | ||
|
|
9f0c648391 | ||
|
|
0b3ffb642a | ||
|
|
7282113a0f | ||
|
|
ada2703d3e | ||
|
|
2cc9e47747 | ||
|
|
e6857c6feb | ||
|
|
b5d31db57c | ||
|
|
1bb20e5070 | ||
|
|
7cb5a6a4df | ||
|
|
e1e84226c9 | ||
|
|
7ff97a42cb | ||
|
|
502ac9e100 | ||
|
|
e6e2fa70e2 | ||
|
|
067f051ff7 | ||
|
|
13de3131b7 | ||
|
|
c8a9ae64b6 | ||
|
|
7383babd68 | ||
|
|
9dc5d64a26 | ||
|
|
b39a88ba15 | ||
|
|
0510ab31e3 | ||
|
|
e6a6b4cd78 | ||
|
|
1e6412e90e | ||
|
|
7419288189 | ||
|
|
f757270198 | ||
|
|
ca57b9e3ca | ||
|
|
6352a1df19 | ||
|
|
f460e689f1 | ||
|
|
74efaadb59 | ||
|
|
a602b1b519 | ||
|
|
4b1f4576ae | ||
|
|
60065806f4 | ||
|
|
9381162a8e | ||
|
|
626219fa62 | ||
|
|
8d6f825c5c | ||
|
|
c198c10244 | ||
|
|
acae5526b7 | ||
|
|
d6ce71702e | ||
|
|
a2d2f52cf8 | ||
|
|
7c7a044417 | ||
|
|
58a59aa391 | ||
|
|
9c6ff4c2e3 | ||
|
|
f2a443afad | ||
|
|
9dc3124738 | ||
|
|
e553b39454 | ||
|
|
0fca89e0b5 | ||
|
|
67a448ce91 | ||
|
|
081fc09e4f | ||
|
|
f96fa6561e | ||
|
|
c667c1c682 | ||
|
|
d474208e8b | ||
|
|
41f14167a6 | ||
|
|
f17abc93c2 | ||
|
|
d08f922631 |
@@ -1,202 +0,0 @@
|
||||
<project_specification>
|
||||
<project_name>Automaker - Autonomous AI Development Studio</project_name>
|
||||
|
||||
<overview>
|
||||
Automaker is a sophisticated desktop application that empowers developers to build software autonomously through AI-powered agents. Built with Electron and Next.js, it provides an intelligent GUI for project management, feature tracking via Kanban boards, and autonomous code generation. The application leverages multiple AI models (Claude, GPT) and supports complex workflows including git worktree isolation, testing automation, and multi-model agent execution. It acts as a complete development orchestrator, managing the entire lifecycle from specification to verified implementation.
|
||||
</overview>
|
||||
|
||||
<technology_stack>
|
||||
<frontend>
|
||||
<framework>Next.js 16.0.7 (App Router)</framework>
|
||||
<ui_library>shadcn/ui with Radix UI primitives</ui_library>
|
||||
<styling>Tailwind CSS 4.0</styling>
|
||||
<state_management>Zustand with persistence</state_management>
|
||||
<drag_drop>@dnd-kit for Kanban board</drag_drop>
|
||||
<icons>Lucide React</icons>
|
||||
<query_client>TanStack Query for server state</query_client>
|
||||
</frontend>
|
||||
<desktop_shell>
|
||||
<framework>Electron 39.2.6</framework>
|
||||
<language>TypeScript 5.x</language>
|
||||
<inter_process_communication>Electron IPC with security sandboxing</inter_process_communication>
|
||||
<file_system>Node.js fs/promises with path validation</file_system>
|
||||
</desktop_shell>
|
||||
<ai_engine>
|
||||
<primary_model>Claude 3.5 (Opus, Sonnet, Haiku) via Anthropic Claude Agent SDK</primary_model>
|
||||
<secondary_model>GPT-5.1 Codex family via OpenAI CLI</secondary_model>
|
||||
<orchestration>Custom Agent Service with streaming responses</orchestration>
|
||||
<model_registry>Dynamic model provider system with CLI detection</model_registry>
|
||||
</ai_engine>
|
||||
<testing>
|
||||
<framework>Playwright for E2E testing</framework>
|
||||
<unit>Jest/Vitest compatible</unit>
|
||||
<integration>Agent-driven test execution and verification</integration>
|
||||
</testing>
|
||||
<version_control>
|
||||
<system>Git with worktree isolation support</system>
|
||||
<branching>Feature branch management</branching>
|
||||
<workflow>Automated commit and merge capabilities</workflow>
|
||||
</version_control>
|
||||
</technology_stack>
|
||||
|
||||
<core_capabilities>
|
||||
<project_management>
|
||||
- Open and manage multiple local projects
|
||||
- Project-specific themes and configurations
|
||||
- Session management with project context
|
||||
- Recently used project cycling (Q/E shortcuts)
|
||||
- Project search and type-ahead selection
|
||||
- Trash and restore functionality for projects
|
||||
</project_management>
|
||||
|
||||
<intelligent_analysis>
|
||||
- Auto-generation and updating of app_spec.txt
|
||||
- Feature extraction from existing codebases
|
||||
- Technology stack detection and documentation
|
||||
- Project structure analysis with file tree visualization - "Project Ingestion": Analyzes existing codebases to understand structure
|
||||
- Auto-generation of `.automaker/app_spec.txt` based on codebase analysis
|
||||
- Auto-generation of features in `.automaker/features/{id}/feature.json`:
|
||||
- Scans code for implemented features
|
||||
- Creates test cases for existing features
|
||||
- Marks existing features as "passes": true automatically
|
||||
</intelligent_analysis>
|
||||
|
||||
<kanban_workflow>
|
||||
- Visual representation of features from `.automaker/features/` folder
|
||||
- Drag-and-drop interface to reprioritize tasks
|
||||
- direct editing of feature details (steps, description) from the card
|
||||
- Visual Kanban board with drag-and-drop functionality
|
||||
- Multiple status columns: Backlog, In Progress, Waiting Approval, Verified
|
||||
- Feature cards with detailed information display (3 detail levels)
|
||||
- Real-time status updates during agent execution
|
||||
- Search and filtering capabilities
|
||||
- Category management and autocomplete
|
||||
- Image attachment support for feature descriptions
|
||||
</kanban_workflow>
|
||||
|
||||
<autonomous_agent_engine>
|
||||
- Multi-model agent system with profile-based execution
|
||||
- Streaming agent output with real-time logs
|
||||
- Git worktree isolation for safe feature development
|
||||
- Automatic testing and verification workflows
|
||||
- Context-aware prompt generation
|
||||
- Agent memory and learning capabilities
|
||||
- Concurrent feature processing with configurable limits
|
||||
- Follow-up and resume capabilities
|
||||
</autonomous_agent_engine>
|
||||
|
||||
<advanced_workflows>
|
||||
- Git worktree management for isolated development
|
||||
- Feature-specific branching and merging
|
||||
- Automated commit generation with file tracking
|
||||
- Test-driven development support
|
||||
- Code review and approval workflows
|
||||
- Revert and rollback capabilities
|
||||
</advanced_workflows>
|
||||
|
||||
<user_interface>
|
||||
- Dark/Light theme support with 12 custom themes
|
||||
- Per-project theme configurations
|
||||
- Comprehensive keyboard shortcut system
|
||||
- Sidebar navigation with project switching
|
||||
- Multi-view architecture (Board, Spec, Agent, Context, Settings)
|
||||
- Setup wizard for first-time configuration
|
||||
- CLI integration status monitoring
|
||||
</user_interface>
|
||||
|
||||
<extensibility>
|
||||
- AI Profile system for model/thinking level presets
|
||||
- Keyboard shortcut customization
|
||||
- Model provider plugin architecture
|
||||
- Context file management for agent guidance
|
||||
- Feature suggestion generation
|
||||
- Spec regeneration workflows
|
||||
</extensibility>
|
||||
</core_capabilities>
|
||||
|
||||
<ui_layout>
|
||||
<window_structure>
|
||||
- Sidebar: Project List, Settings, Logs, Plugins
|
||||
- Main Content:
|
||||
- **Spec View**: Split editor for `.automaker/app_spec.txt`
|
||||
- **Board View**: Kanban board for `.automaker/features/` folder
|
||||
- **Code View**: Read-only Monaco editor to see what the agent is writing
|
||||
- **Agent View**: Chat-like interface showing agent thought process and tool usage. Also used for the "New Project Interview".
|
||||
</window_structure>
|
||||
<theme>
|
||||
- Dark/Light mode support (system sync)
|
||||
- "Hacker" aesthetic option (terminal-like)
|
||||
- Professional/Clean default
|
||||
</theme>
|
||||
</ui_layout>
|
||||
|
||||
<development_workflow>
|
||||
<local_testing>
|
||||
- "Browser Mode": Run the Next.js frontend in a standard browser with mocked Electron IPC for rapid UI iteration.
|
||||
- "Electron Mode": Full desktop app testing.
|
||||
- Hot Reloading for both Main and Renderer processes.
|
||||
</local_testing>
|
||||
</development_workflow>
|
||||
|
||||
<implemented_features>
|
||||
- Complete Kanban board with drag-and-drop functionality
|
||||
- Multi-model AI agent execution (Claude + GPT/Codex)
|
||||
- Git worktree isolation for features
|
||||
- Real-time agent output streaming and logging
|
||||
- Project management with session persistence
|
||||
- Theme system with 12 themes + per-project themes
|
||||
- Comprehensive settings panel with all configurations
|
||||
- Feature image attachment and context system
|
||||
- Agent profiles with model/thinking level presets
|
||||
- Keyboard shortcut system with customization
|
||||
- CLI integration detection (Claude Code + Codex CLI)
|
||||
- Auto mode for autonomous feature processing
|
||||
- Feature suggestions generation
|
||||
- Spec regeneration and project analysis
|
||||
- Context file management
|
||||
- Chat history and session management
|
||||
- File diff viewing and git integration
|
||||
- Search and filtering across all features
|
||||
- Category management and autocomplete
|
||||
- Test automation and verification workflows
|
||||
</implemented_features>
|
||||
|
||||
<implementation_roadmap>
|
||||
<phase_1_foundation>
|
||||
- Enhanced error handling and recovery mechanisms
|
||||
- Performance optimization for large projects
|
||||
- Improved memory management for long-running sessions
|
||||
- Advanced logging and debugging capabilities
|
||||
</phase_1_foundation>
|
||||
|
||||
<phase_2_core_logic>
|
||||
- Plugin system for custom model providers
|
||||
- Advanced workflow customization engine
|
||||
- Team collaboration features
|
||||
- Cloud synchronization capabilities
|
||||
- Advanced project templates and scaffolding
|
||||
</phase_2_core_logic>
|
||||
|
||||
<phase_3_kanban_and_interaction>
|
||||
- Build Kanban board with drag-and-drop
|
||||
- Connect Kanban state to `.automaker/features/` filesystem
|
||||
- Implement "Run Feature" capability
|
||||
- Integrate standard prompts library
|
||||
</phase_3_kanban_and_interaction>
|
||||
|
||||
<phase_3_polish>
|
||||
- Enhanced accessibility features
|
||||
- Advanced theme customization
|
||||
- Performance monitoring and analytics
|
||||
- Documentation generation automation
|
||||
- Integration with external development tools
|
||||
- Advanced security auditing and sandboxing
|
||||
</phase_3_polish>
|
||||
|
||||
<phase_4_polish>
|
||||
- Advanced terminal integration
|
||||
- Settings & Extensibility
|
||||
- UI refinement
|
||||
</phase_4_polish>
|
||||
</implementation_roadmap>
|
||||
</project_specification>
|
||||
@@ -1,9 +0,0 @@
|
||||
[
|
||||
"Agent Runner",
|
||||
"Core",
|
||||
"Kanban",
|
||||
"Other",
|
||||
"Settings",
|
||||
"Uncategorized",
|
||||
"ka"
|
||||
]
|
||||
@@ -1,70 +0,0 @@
|
||||
You are a very strong reasoner and planner. Use these critical instructions to structure your plans, thoughts, and responses.
|
||||
|
||||
Before taking any action (either tool calls or responses to the user), you must proactively, methodically, and independently plan and reason about:
|
||||
|
||||
1. Logical dependencies and constraints:
|
||||
|
||||
Analyze the intended action against the following factors. Resolve conflicts in order of importance:
|
||||
|
||||
1.1) Policy-based rules, mandatory prerequisites, and constraints.
|
||||
1.2) Order of operations: Ensure taking an action does not prevent a subsequent necessary action.
|
||||
1.2.1) The user may request actions in a random order, but you may need to reorder operations to maximize successful completion of the task.
|
||||
1.3) Other prerequisites (information and/or actions needed).
|
||||
1.4) Explicit user constraints or preferences.
|
||||
|
||||
2. Risk assessment:
|
||||
|
||||
What are the consequences of taking the action? Will the new state cause any future issues?
|
||||
|
||||
2.1) For exploratory tasks (like searches), missing optional parameters is a LOW risk.
|
||||
Prefer calling the tool with the available information over asking the user, unless your Rule 1 (Logical Dependencies) reasoning determines that optional information is required for a later step in your plan.
|
||||
|
||||
3. Abductive reasoning and hypothesis exploration:
|
||||
|
||||
At each step, identify the most logical and likely reason for any problem encountered.
|
||||
|
||||
3.1) Look beyond immediate or obvious causes. The most likely reason may not be the simplest and may require deeper inference.
|
||||
3.2) Hypotheses may require additional research. Each hypothesis may take multiple steps to test.
|
||||
3.3) Prioritize hypotheses based on likelihood, but do not discard less likely ones prematurely. A low-probability event may still be the root cause.
|
||||
|
||||
4. Outcome evaluation and adaptability:
|
||||
|
||||
Does the previous observation require any changes to your plan?
|
||||
|
||||
4.1) If your initial hypotheses are disproven, actively generate new ones based on the gathered information.
|
||||
|
||||
5. Information availability:
|
||||
|
||||
Incorporate all applicable and alternative sources of information, including:
|
||||
|
||||
5.1) Using available tools and their capabilities
|
||||
5.2) All policies, rules, checklists, and constraints
|
||||
5.3) Previous observations and conversation history
|
||||
5.4) Information only available by asking the user
|
||||
|
||||
6. Precision and Grounding:
|
||||
|
||||
Ensure your reasoning is extremely precise and relevant to each exact ongoing situation.
|
||||
|
||||
6.1) Verify your claims by quoting the exact applicable information (including policies) when referring to them.
|
||||
|
||||
7. Completeness:
|
||||
|
||||
Ensure that all requirements, constraints, options, and preferences are exhaustively incorporated into your plan.
|
||||
|
||||
7.1) Resolve conflicts using the order of importance in #1.
|
||||
7.2) Avoid premature conclusions: There may be multiple relevant options for a given situation.
|
||||
7.2.1) To check for whether an option is relevant, reason about all information sources from #5.
|
||||
7.2.2) You may need to consult the user to even know whether something is applicable. Do not assume it is not applicable without checking.
|
||||
7.3) Review applicable sources of information from #5 to confirm which are relevant to the current state.
|
||||
|
||||
8. Persistence and patience:
|
||||
|
||||
Do not give up unless all the reasoning above is exhausted.
|
||||
|
||||
8.1) Don't be dissuaded by time taken or user frustration.
|
||||
8.2) This persistence must be intelligent: On transient errors (e.g. please try again), you must retry unless an explicit retry limit (e.g., max x tries) has been reached. If such a limit is hit, you must stop. On other errors, you must change your strategy or arguments, not repeat the same failed call.
|
||||
|
||||
9. Inhibit your response:
|
||||
|
||||
Only take an action after all the above reasoning is completed. Once you've taken an action, you cannot take it back.
|
||||
@@ -1,14 +0,0 @@
|
||||
{
|
||||
"category": "Core",
|
||||
"description": "do nothing, code nothing, print yolo",
|
||||
"steps": [],
|
||||
"status": "waiting_approval",
|
||||
"images": [],
|
||||
"imagePaths": [],
|
||||
"skipTests": true,
|
||||
"model": "opus",
|
||||
"thinkingLevel": "none",
|
||||
"id": "feature-1765414180387-4zcc7wpdv",
|
||||
"startedAt": "2025-12-11T00:49:41.713Z",
|
||||
"summary": "No code changes required. Feature requested 'do nothing, code nothing, print yolo' - completed as specified. YOLO!"
|
||||
}
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 37 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 37 KiB |
@@ -1,172 +0,0 @@
|
||||
# Agent Memory - Lessons Learned
|
||||
|
||||
This file documents issues encountered by previous agents and their solutions. Read this before starting work to avoid repeating mistakes.
|
||||
|
||||
## Testing Issues
|
||||
|
||||
### Issue: Mock project setup not navigating to board view
|
||||
|
||||
**Problem:** Setting `currentProject` in localStorage didn't automatically show the board view - app stayed on welcome view.
|
||||
**Fix:** The `currentView` state is not persisted in localStorage. Instead of trying to set it, have tests click on the recent project from the welcome view to trigger `setCurrentProject()` which handles the view transition properly.
|
||||
|
||||
```typescript
|
||||
// Don't do this:
|
||||
await setupMockProject(page); // Sets localStorage
|
||||
await page.goto("/");
|
||||
await waitForElement(page, "board-view"); // ❌ Fails - still on welcome view
|
||||
|
||||
// Do this instead:
|
||||
await setupMockProject(page);
|
||||
await page.goto("/");
|
||||
await waitForElement(page, "welcome-view");
|
||||
const recentProject = page.locator(
|
||||
'[data-testid="recent-project-test-project-1"]'
|
||||
);
|
||||
await recentProject.click(); // ✅ Triggers proper view transition
|
||||
await waitForElement(page, "board-view");
|
||||
```
|
||||
|
||||
### Issue: View output button test IDs are conditional
|
||||
|
||||
**Problem:** Tests failed looking for `view-output-inprogress-${featureId}` when the actual button had `view-output-${featureId}`.
|
||||
**Fix:** The button test ID depends on whether the feature is actively running:
|
||||
|
||||
- `view-output-${featureId}` - shown when feature is in `runningAutoTasks` (actively running)
|
||||
- `view-output-inprogress-${featureId}` - shown when status is "in_progress" but NOT actively running
|
||||
|
||||
After dragging a feature to in_progress, wait for the `auto_mode_feature_start` event to fire before looking for the button:
|
||||
|
||||
```typescript
|
||||
// Wait for feature to start running
|
||||
const viewOutputButton = page
|
||||
.locator(
|
||||
`[data-testid="view-output-${featureId}"], [data-testid="view-output-inprogress-${featureId}"]`
|
||||
)
|
||||
.first();
|
||||
await expect(viewOutputButton).toBeVisible({ timeout: 8000 });
|
||||
```
|
||||
|
||||
### Issue: Elements not appearing due to async event timing
|
||||
|
||||
**Problem:** Tests checked for UI elements before async events (like `auto_mode_feature_start`) had fired and updated the UI.
|
||||
**Fix:** Add appropriate timeouts when waiting for elements that depend on async events. The mock auto mode takes ~2.4 seconds to complete, so allow sufficient time:
|
||||
|
||||
```typescript
|
||||
// Mock auto mode timing: ~2.4s + 1.5s delay = ~4s total
|
||||
await waitForAgentOutputModalHidden(page, { timeout: 10000 });
|
||||
```
|
||||
|
||||
### Issue: Slider interaction testing
|
||||
|
||||
**Problem:** Clicking on slider track didn't reliably set specific values.
|
||||
**Fix:** Use the slider's keyboard interaction or calculate the exact click position on the track. For max value, click on the rightmost edge of the track.
|
||||
|
||||
### Issue: Port binding blocked in sandbox mode
|
||||
|
||||
**Problem:** Playwright tests couldn't bind to port in sandbox mode.
|
||||
**Fix:** Tests don't need sandbox disabled - the issue was TEST_REUSE_SERVER environment variable. Make sure to start the dev server separately or let Playwright's webServer config handle it.
|
||||
|
||||
## Code Architecture
|
||||
|
||||
### Issue: Understanding store state persistence
|
||||
|
||||
**Problem:** Not all store state is persisted to localStorage.
|
||||
**Fix:** Check the `partialize` function in `app-store.ts` to see which state is persisted:
|
||||
|
||||
```typescript
|
||||
partialize: (state) => ({
|
||||
projects: state.projects,
|
||||
currentProject: state.currentProject,
|
||||
theme: state.theme,
|
||||
sidebarOpen: state.sidebarOpen,
|
||||
apiKeys: state.apiKeys,
|
||||
chatSessions: state.chatSessions,
|
||||
chatHistoryOpen: state.chatHistoryOpen,
|
||||
maxConcurrency: state.maxConcurrency, // Added for concurrency feature
|
||||
});
|
||||
```
|
||||
|
||||
Note: `currentView` is NOT persisted - it's managed through actions.
|
||||
|
||||
### Issue: Auto mode task lifecycle
|
||||
|
||||
**Problem:** Confusion about when features are considered "running" vs "in_progress".
|
||||
**Fix:** Understand the task lifecycle:
|
||||
|
||||
1. Feature dragged to "in_progress" column → status becomes "in_progress"
|
||||
2. `auto_mode_feature_start` event fires → feature added to `runningAutoTasks`
|
||||
3. Agent works on feature → periodic events sent
|
||||
4. `auto_mode_feature_complete` event fires → feature removed from `runningAutoTasks`
|
||||
5. If `passes: true` → status becomes "verified", if `passes: false` → stays "in_progress"
|
||||
|
||||
### Issue: waiting_approval features not draggable when skipTests=true
|
||||
|
||||
**Problem:** Features in `waiting_approval` status couldn't be dragged to `verified` column, even though the code appeared to handle it.
|
||||
**Fix:** The order of condition checks in `handleDragEnd` matters. The `skipTests` check was catching `waiting_approval` features before the `waiting_approval` status check could handle them. Move the `waiting_approval` status check **before** the `skipTests` check in `board-view.tsx`:
|
||||
|
||||
```typescript
|
||||
// Correct order in handleDragEnd:
|
||||
if (draggedFeature.status === "backlog") {
|
||||
// ...
|
||||
} else if (draggedFeature.status === "waiting_approval") {
|
||||
// Handle waiting_approval BEFORE skipTests check
|
||||
// because waiting_approval features often have skipTests=true
|
||||
} else if (draggedFeature.skipTests) {
|
||||
// Handle other skipTests features
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices Discovered
|
||||
|
||||
### Testing utilities are critical
|
||||
|
||||
Create comprehensive testing utilities in `tests/utils.ts` to avoid repeating selector logic:
|
||||
|
||||
- `waitForElement` - waits for elements to appear
|
||||
- `waitForElementHidden` - waits for elements to disappear
|
||||
- `setupMockProject` - sets up mock localStorage state
|
||||
- `navigateToBoard` - handles navigation from welcome to board view
|
||||
|
||||
### Always add data-testid attributes
|
||||
|
||||
When implementing features, immediately add `data-testid` attributes to key UI elements. This makes tests more reliable and easier to write.
|
||||
|
||||
### Test timeouts should be generous but not excessive
|
||||
|
||||
- Default timeout: 30s (set in playwright.config.ts)
|
||||
- Element waits: 5-15s for critical elements
|
||||
- Auto mode completion: 10s (accounts for ~4s mock duration)
|
||||
- Don't increase timeouts past 10s for individual operations
|
||||
|
||||
### Mock auto mode timing
|
||||
|
||||
The mock auto mode in `electron.ts` has predictable timing:
|
||||
|
||||
- Total duration: ~2.4 seconds (300+500+300+300+500+500ms)
|
||||
- Plus 1.5s delay before auto-closing modals
|
||||
- Total: ~4 seconds from start to completion
|
||||
|
||||
### Issue: HotkeyButton conflicting with useKeyboardShortcuts
|
||||
|
||||
**Problem:** Adding `HotkeyButton` with a simple key (like "N") to buttons that already had keyboard shortcuts registered via `useKeyboardShortcuts` caused the hotkey to stop working. Both registered duplicate listeners, and the HotkeyButton's `stopPropagation()` call could interfere.
|
||||
**Fix:** When a simple single-key hotkey is already handled by `useKeyboardShortcuts`, set `hotkeyActive={false}` on the `HotkeyButton` so it only displays the indicator badge without registering a duplicate listener:
|
||||
|
||||
```tsx
|
||||
// In views that already use useKeyboardShortcuts for the "N" key:
|
||||
<HotkeyButton
|
||||
onClick={() => setShowAddDialog(true)}
|
||||
hotkey={shortcuts.addFeature}
|
||||
hotkeyActive={false} // <-- Important! Prevents duplicate listener
|
||||
>
|
||||
Add Feature
|
||||
</HotkeyButton>
|
||||
|
||||
// HotkeyButton should only actively listen when it's the sole handler (e.g., Cmd+Enter in dialogs)
|
||||
<HotkeyButton
|
||||
onClick={handleSubmit}
|
||||
hotkey={{ key: "Enter", cmdCtrl: true }}
|
||||
hotkeyActive={isDialogOpen} // Active when dialog is open
|
||||
>
|
||||
Submit
|
||||
</HotkeyButton>
|
||||
```
|
||||
74
.claude/commands/gh-issue.md
Normal file
74
.claude/commands/gh-issue.md
Normal file
@@ -0,0 +1,74 @@
|
||||
# GitHub Issue Fix Command
|
||||
|
||||
Fetch a GitHub issue by number, verify it's a real issue, and fix it if valid.
|
||||
|
||||
## Usage
|
||||
|
||||
This command accepts a GitHub issue number as input (e.g., `123`).
|
||||
|
||||
## Instructions
|
||||
|
||||
1. **Get the issue number from the user**
|
||||
- The issue number should be provided as an argument to this command
|
||||
- If no number is provided, ask the user for it
|
||||
|
||||
2. **Fetch the GitHub issue**
|
||||
- Determine the current project path (check if there's a current project context)
|
||||
- Verify the project has a GitHub remote:
|
||||
```bash
|
||||
git remote get-url origin
|
||||
```
|
||||
- Fetch the issue details using GitHub CLI:
|
||||
```bash
|
||||
gh issue view <ISSUE_NUMBER> --json number,title,state,author,createdAt,labels,url,body,assignees
|
||||
```
|
||||
- If the command fails, report the error and stop
|
||||
|
||||
3. **Verify the issue is real and valid**
|
||||
- Check that the issue exists (not 404)
|
||||
- Check the issue state:
|
||||
- If **closed**: Inform the user and ask if they still want to proceed
|
||||
- If **open**: Proceed with validation
|
||||
- Review the issue content:
|
||||
- Read the title and body to understand what needs to be fixed
|
||||
- Check labels for context (bug, enhancement, etc.)
|
||||
- Note any assignees or linked PRs
|
||||
|
||||
4. **Validate the issue**
|
||||
- Determine if this is a legitimate issue that needs fixing:
|
||||
- Is the description clear and actionable?
|
||||
- Does it describe a real problem or feature request?
|
||||
- Are there any obvious signs it's spam or invalid?
|
||||
- If the issue seems invalid or unclear:
|
||||
- Report findings to the user
|
||||
- Ask if they want to proceed anyway
|
||||
- Stop if user confirms it's not valid
|
||||
|
||||
5. **If the issue is valid, proceed to fix it**
|
||||
- Analyze what needs to be done based on the issue description
|
||||
- Check the current codebase state:
|
||||
- Run relevant tests to see current behavior
|
||||
- Check if the issue is already fixed
|
||||
- Look for related code that might need changes
|
||||
- Implement the fix:
|
||||
- Make necessary code changes
|
||||
- Update or add tests as needed
|
||||
- Ensure the fix addresses the issue description
|
||||
- Verify the fix:
|
||||
- Run tests to ensure nothing broke
|
||||
- If possible, manually verify the fix addresses the issue
|
||||
|
||||
6. **Report summary**
|
||||
- Issue number and title
|
||||
- Issue state (open/closed)
|
||||
- Whether the issue was validated as real
|
||||
- What was fixed (if anything)
|
||||
- Any tests that were updated or added
|
||||
- Next steps (if any)
|
||||
|
||||
## Error Handling
|
||||
|
||||
- If GitHub CLI (`gh`) is not installed or authenticated, report error and stop
|
||||
- If the project doesn't have a GitHub remote, report error and stop
|
||||
- If the issue number doesn't exist, report error and stop
|
||||
- If the issue is unclear or invalid, report findings and ask user before proceeding
|
||||
77
.claude/commands/release.md
Normal file
77
.claude/commands/release.md
Normal file
@@ -0,0 +1,77 @@
|
||||
# Release Command
|
||||
|
||||
Bump the package.json version (major, minor, or patch) and build the Electron app with the new version.
|
||||
|
||||
## Usage
|
||||
|
||||
This command accepts a version bump type as input:
|
||||
|
||||
- `patch` - Bump patch version (0.1.0 -> 0.1.1)
|
||||
- `minor` - Bump minor version (0.1.0 -> 0.2.0)
|
||||
- `major` - Bump major version (0.1.0 -> 1.0.0)
|
||||
|
||||
## Instructions
|
||||
|
||||
1. **Get the bump type from the user**
|
||||
- The bump type should be provided as an argument (patch, minor, or major)
|
||||
- If no type is provided, ask the user which type they want
|
||||
|
||||
2. **Bump the version**
|
||||
- Run the version bump script:
|
||||
```bash
|
||||
node apps/ui/scripts/bump-version.mjs <type>
|
||||
```
|
||||
- This updates both `apps/ui/package.json` and `apps/server/package.json` with the new version (keeps them in sync)
|
||||
- Verify the version was updated correctly by checking the output
|
||||
|
||||
3. **Build the Electron app**
|
||||
- Run the electron build:
|
||||
```bash
|
||||
npm run build:electron --workspace=apps/ui
|
||||
```
|
||||
- The build process automatically:
|
||||
- Uses the version from `package.json` for artifact names (e.g., `Automaker-1.2.3-x64.zip`)
|
||||
- Injects the version into the app via Vite's `__APP_VERSION__` constant
|
||||
- Displays the version below the logo in the sidebar
|
||||
|
||||
4. **Commit the version bump**
|
||||
- Stage the updated package.json files:
|
||||
```bash
|
||||
git add apps/ui/package.json apps/server/package.json
|
||||
```
|
||||
- Commit with a release message:
|
||||
```bash
|
||||
git commit -m "chore: release v<version>"
|
||||
```
|
||||
|
||||
5. **Create and push the git tag**
|
||||
- Create an annotated tag for the release:
|
||||
```bash
|
||||
git tag -a v<version> -m "Release v<version>"
|
||||
```
|
||||
- Push the commit and tag to remote:
|
||||
```bash
|
||||
git push && git push --tags
|
||||
```
|
||||
|
||||
6. **Verify the release**
|
||||
- Check that the build completed successfully
|
||||
- Confirm the version appears correctly in the built artifacts
|
||||
- The version will be displayed in the app UI below the logo
|
||||
- Verify the tag is visible on the remote repository
|
||||
|
||||
## Version Centralization
|
||||
|
||||
The version is centralized and synchronized in both `apps/ui/package.json` and `apps/server/package.json`:
|
||||
|
||||
- **Electron builds**: Automatically read from `apps/ui/package.json` via electron-builder's `${version}` variable in `artifactName`
|
||||
- **App display**: Injected at build time via Vite's `define` config as `__APP_VERSION__` constant (defined in `apps/ui/vite.config.mts`)
|
||||
- **Server API**: Read from `apps/server/package.json` via `apps/server/src/lib/version.ts` utility (used in health check endpoints)
|
||||
- **Type safety**: Defined in `apps/ui/src/vite-env.d.ts` as `declare const __APP_VERSION__: string`
|
||||
|
||||
This ensures consistency across:
|
||||
|
||||
- Build artifact names (e.g., `Automaker-1.2.3-x64.zip`)
|
||||
- App UI display (shown as `v1.2.3` below the logo in `apps/ui/src/components/layout/sidebar/components/automaker-logo.tsx`)
|
||||
- Server health endpoints (`/` and `/detailed`)
|
||||
- Package metadata (both UI and server packages stay in sync)
|
||||
49
.claude/commands/validate-build.md
Normal file
49
.claude/commands/validate-build.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# Project Build and Fix Command
|
||||
|
||||
Run all builds and intelligently fix any failures based on what changed.
|
||||
|
||||
## Instructions
|
||||
|
||||
1. **Run the build**
|
||||
|
||||
```bash
|
||||
npm run build
|
||||
```
|
||||
|
||||
This builds all packages and the UI application.
|
||||
|
||||
2. **If the build succeeds**, report success and stop.
|
||||
|
||||
3. **If the build fails**, analyze the failures:
|
||||
- Note which build step failed and the error messages
|
||||
- Check for TypeScript compilation errors, missing dependencies, or configuration issues
|
||||
- Run `git diff main` to see what code has changed
|
||||
|
||||
4. **Determine the nature of the failure**:
|
||||
- **If the failure is due to intentional changes** (new features, refactoring, dependency updates):
|
||||
- Fix any TypeScript type errors introduced by the changes
|
||||
- Update build configuration if needed (e.g., tsconfig.json, vite.config.mts)
|
||||
- Ensure all new dependencies are properly installed
|
||||
- Fix import paths or module resolution issues
|
||||
|
||||
- **If the failure appears to be a regression** (broken imports, missing files, configuration errors):
|
||||
- Fix the source code to restore the build
|
||||
- Check for accidentally deleted files or broken references
|
||||
- Verify build configuration files are correct
|
||||
|
||||
5. **Common build issues to check**:
|
||||
- **TypeScript errors**: Fix type mismatches, missing types, or incorrect imports
|
||||
- **Missing dependencies**: Run `npm install` if packages are missing
|
||||
- **Import/export errors**: Fix incorrect import paths or missing exports
|
||||
- **Build configuration**: Check tsconfig.json, vite.config.mts, or other build configs
|
||||
- **Package build order**: Ensure `build:packages` completes before building apps
|
||||
|
||||
6. **How to decide if it's intentional vs regression**:
|
||||
- Look at the git diff and commit messages
|
||||
- If the change was deliberate and introduced new code that needs fixing → fix the new code
|
||||
- If the change broke existing functionality that should still build → fix the regression
|
||||
- When in doubt, ask the user
|
||||
|
||||
7. **After making fixes**, re-run the build to verify everything compiles successfully.
|
||||
|
||||
8. **Report summary** of what was fixed (TypeScript errors, configuration issues, missing dependencies, etc.).
|
||||
36
.claude/commands/validate-tests.md
Normal file
36
.claude/commands/validate-tests.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Project Test and Fix Command
|
||||
|
||||
Run all tests and intelligently fix any failures based on what changed.
|
||||
|
||||
## Instructions
|
||||
|
||||
1. **Run all tests**
|
||||
|
||||
```bash
|
||||
npm run test:all
|
||||
```
|
||||
|
||||
2. **If all tests pass**, report success and stop.
|
||||
|
||||
3. **If any tests fail**, analyze the failures:
|
||||
- Note which tests failed and their error messages
|
||||
- Run `git diff main` to see what code has changed
|
||||
|
||||
4. **Determine the nature of the change**:
|
||||
- **If the logic change is intentional** (new feature, refactor, behavior change):
|
||||
- Update the failing tests to match the new expected behavior
|
||||
- The tests should reflect what the code NOW does correctly
|
||||
|
||||
- **If the logic change appears to be a bug** (regression, unintended side effect):
|
||||
- Fix the source code to restore the expected behavior
|
||||
- Do NOT modify the tests - they are catching a real bug
|
||||
|
||||
5. **How to decide if it's a bug vs intentional change**:
|
||||
- Look at the git diff and commit messages
|
||||
- If the change was deliberate and the test expectations are now outdated → update tests
|
||||
- If the change broke existing functionality that should still work → fix the code
|
||||
- When in doubt, ask the user
|
||||
|
||||
6. **After making fixes**, re-run the tests to verify everything passes.
|
||||
|
||||
7. **Report summary** of what was fixed (tests updated vs code fixed).
|
||||
@@ -1,24 +0,0 @@
|
||||
{
|
||||
"sandbox": {
|
||||
"enabled": true,
|
||||
"autoAllowBashIfSandboxed": true
|
||||
},
|
||||
"permissions": {
|
||||
"defaultMode": "acceptEdits",
|
||||
"allow": [
|
||||
"Read(./**)",
|
||||
"Write(./**)",
|
||||
"Edit(./**)",
|
||||
"Glob(./**)",
|
||||
"Grep(./**)",
|
||||
"Bash(*)",
|
||||
"mcp__puppeteer__puppeteer_navigate",
|
||||
"mcp__puppeteer__puppeteer_screenshot",
|
||||
"mcp__puppeteer__puppeteer_click",
|
||||
"mcp__puppeteer__puppeteer_fill",
|
||||
"mcp__puppeteer__puppeteer_select",
|
||||
"mcp__puppeteer__puppeteer_hover",
|
||||
"mcp__puppeteer__puppeteer_evaluate"
|
||||
]
|
||||
}
|
||||
}
|
||||
117
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
117
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
name: Bug Report
|
||||
description: File a bug report to help us improve Automaker
|
||||
title: '[Bug]: '
|
||||
labels: ['bug']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to report a bug! Please fill out the form below with as much detail as possible.
|
||||
|
||||
- type: dropdown
|
||||
id: operating-system
|
||||
attributes:
|
||||
label: Operating System
|
||||
description: What operating system are you using?
|
||||
options:
|
||||
- macOS
|
||||
- Windows
|
||||
- Linux
|
||||
- Other
|
||||
default: 0
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: run-mode
|
||||
attributes:
|
||||
label: Run Mode
|
||||
description: How are you running Automaker?
|
||||
options:
|
||||
- Electron (Desktop App)
|
||||
- Web (Browser)
|
||||
- Docker
|
||||
default: 0
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: app-version
|
||||
attributes:
|
||||
label: App Version
|
||||
description: What version of Automaker are you using? (e.g., 0.1.0)
|
||||
placeholder: '0.1.0'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: bug-description
|
||||
attributes:
|
||||
label: Bug Description
|
||||
description: A clear and concise description of what the bug is.
|
||||
placeholder: Describe the bug...
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: steps-to-reproduce
|
||||
attributes:
|
||||
label: Steps to Reproduce
|
||||
description: Steps to reproduce the behavior
|
||||
placeholder: |
|
||||
1. Go to '...'
|
||||
2. Click on '...'
|
||||
3. Scroll down to '...'
|
||||
4. See error
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expected-behavior
|
||||
attributes:
|
||||
label: Expected Behavior
|
||||
description: A clear and concise description of what you expected to happen.
|
||||
placeholder: What should have happened?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: actual-behavior
|
||||
attributes:
|
||||
label: Actual Behavior
|
||||
description: A clear and concise description of what actually happened.
|
||||
placeholder: What actually happened?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: screenshots
|
||||
attributes:
|
||||
label: Screenshots
|
||||
description: If applicable, add screenshots to help explain your problem.
|
||||
placeholder: Drag and drop screenshots here or paste image URLs
|
||||
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Relevant Logs
|
||||
description: If applicable, paste relevant logs or error messages.
|
||||
placeholder: Paste logs here...
|
||||
render: shell
|
||||
|
||||
- type: textarea
|
||||
id: additional-context
|
||||
attributes:
|
||||
label: Additional Context
|
||||
description: Add any other context about the problem here.
|
||||
placeholder: Any additional information that might be helpful...
|
||||
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
attributes:
|
||||
label: Checklist
|
||||
options:
|
||||
- label: I have searched existing issues to ensure this bug hasn't been reported already
|
||||
required: true
|
||||
- label: I have provided all required information above
|
||||
required: true
|
||||
71
.github/actions/setup-project/action.yml
vendored
Normal file
71
.github/actions/setup-project/action.yml
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
name: 'Setup Project'
|
||||
description: 'Common setup steps for CI workflows - checkout, Node.js, dependencies, and native modules'
|
||||
|
||||
inputs:
|
||||
node-version:
|
||||
description: 'Node.js version to use'
|
||||
required: false
|
||||
default: '22'
|
||||
check-lockfile:
|
||||
description: 'Run lockfile lint check for SSH URLs'
|
||||
required: false
|
||||
default: 'false'
|
||||
rebuild-node-pty-path:
|
||||
description: 'Working directory for node-pty rebuild (empty = root)'
|
||||
required: false
|
||||
default: ''
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ inputs.node-version }}
|
||||
cache: 'npm'
|
||||
cache-dependency-path: package-lock.json
|
||||
|
||||
- name: Check for SSH URLs in lockfile
|
||||
if: inputs.check-lockfile == 'true'
|
||||
shell: bash
|
||||
run: npm run lint:lockfile
|
||||
|
||||
- name: Configure Git for HTTPS
|
||||
shell: bash
|
||||
# Convert SSH URLs to HTTPS for git dependencies (e.g., @electron/node-gyp)
|
||||
# This is needed because SSH authentication isn't available in CI
|
||||
run: git config --global url."https://github.com/".insteadOf "git@github.com:"
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
# Use npm install instead of npm ci to correctly resolve platform-specific
|
||||
# optional dependencies (e.g., @tailwindcss/oxide, lightningcss binaries)
|
||||
# Skip scripts to avoid electron-builder install-app-deps which uses too much memory
|
||||
run: npm install --ignore-scripts
|
||||
|
||||
- name: Install Linux native bindings
|
||||
shell: bash
|
||||
# Workaround for npm optional dependencies bug (npm/cli#4828)
|
||||
# Explicitly install Linux bindings needed for build tools
|
||||
run: |
|
||||
npm install --no-save --force --ignore-scripts \
|
||||
@rollup/rollup-linux-x64-gnu@4.53.3 \
|
||||
@tailwindcss/oxide-linux-x64-gnu@4.1.17
|
||||
|
||||
- name: Build shared packages
|
||||
shell: bash
|
||||
# Build shared packages (types, utils, platform, etc.) before apps can use them
|
||||
run: npm run build:packages
|
||||
|
||||
- name: Rebuild native modules (root)
|
||||
if: inputs.rebuild-node-pty-path == ''
|
||||
shell: bash
|
||||
# Rebuild node-pty and other native modules for Electron
|
||||
run: npm rebuild node-pty
|
||||
|
||||
- name: Rebuild native modules (workspace)
|
||||
if: inputs.rebuild-node-pty-path != ''
|
||||
shell: bash
|
||||
# Rebuild node-pty and other native modules needed for server
|
||||
run: npm rebuild node-pty
|
||||
working-directory: ${{ inputs.rebuild-node-pty-path }}
|
||||
355
.github/scripts/upload-to-r2.js
vendored
Normal file
355
.github/scripts/upload-to-r2.js
vendored
Normal file
@@ -0,0 +1,355 @@
|
||||
const { S3Client, PutObjectCommand, GetObjectCommand } = require('@aws-sdk/client-s3');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const https = require('https');
|
||||
const { pipeline } = require('stream/promises');
|
||||
|
||||
const s3Client = new S3Client({
|
||||
region: 'auto',
|
||||
endpoint: process.env.R2_ENDPOINT,
|
||||
credentials: {
|
||||
accessKeyId: process.env.R2_ACCESS_KEY_ID,
|
||||
secretAccessKey: process.env.R2_SECRET_ACCESS_KEY,
|
||||
},
|
||||
});
|
||||
|
||||
const BUCKET = process.env.R2_BUCKET_NAME;
|
||||
const PUBLIC_URL = process.env.R2_PUBLIC_URL;
|
||||
const VERSION = process.env.RELEASE_VERSION;
|
||||
const RELEASE_TAG = process.env.RELEASE_TAG || `v${VERSION}`;
|
||||
const GITHUB_REPO = process.env.GITHUB_REPOSITORY;
|
||||
|
||||
async function fetchExistingReleases() {
|
||||
try {
|
||||
const response = await s3Client.send(
|
||||
new GetObjectCommand({
|
||||
Bucket: BUCKET,
|
||||
Key: 'releases.json',
|
||||
})
|
||||
);
|
||||
const body = await response.Body.transformToString();
|
||||
return JSON.parse(body);
|
||||
} catch (error) {
|
||||
if (error.name === 'NoSuchKey' || error.$metadata?.httpStatusCode === 404) {
|
||||
console.log('No existing releases.json found, creating new one');
|
||||
return { latestVersion: null, releases: [] };
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async function uploadFile(localPath, r2Key, contentType) {
|
||||
const fileBuffer = fs.readFileSync(localPath);
|
||||
const stats = fs.statSync(localPath);
|
||||
|
||||
await s3Client.send(
|
||||
new PutObjectCommand({
|
||||
Bucket: BUCKET,
|
||||
Key: r2Key,
|
||||
Body: fileBuffer,
|
||||
ContentType: contentType,
|
||||
})
|
||||
);
|
||||
|
||||
console.log(`Uploaded: ${r2Key} (${stats.size} bytes)`);
|
||||
return stats.size;
|
||||
}
|
||||
|
||||
function findArtifacts(dir, pattern) {
|
||||
if (!fs.existsSync(dir)) return [];
|
||||
const files = fs.readdirSync(dir);
|
||||
return files.filter((f) => pattern.test(f)).map((f) => path.join(dir, f));
|
||||
}
|
||||
|
||||
async function checkUrlAccessible(url, maxRetries = 10, initialDelay = 1000) {
|
||||
for (let attempt = 0; attempt < maxRetries; attempt++) {
|
||||
try {
|
||||
const result = await new Promise((resolve, reject) => {
|
||||
const request = https.get(url, { timeout: 10000 }, (response) => {
|
||||
const statusCode = response.statusCode;
|
||||
|
||||
// Follow redirects
|
||||
if (
|
||||
statusCode === 302 ||
|
||||
statusCode === 301 ||
|
||||
statusCode === 307 ||
|
||||
statusCode === 308
|
||||
) {
|
||||
const redirectUrl = response.headers.location;
|
||||
response.destroy();
|
||||
if (!redirectUrl) {
|
||||
resolve({
|
||||
accessible: false,
|
||||
statusCode,
|
||||
error: 'Redirect without location header',
|
||||
});
|
||||
return;
|
||||
}
|
||||
// Follow the redirect URL
|
||||
return https
|
||||
.get(redirectUrl, { timeout: 10000 }, (redirectResponse) => {
|
||||
const redirectStatus = redirectResponse.statusCode;
|
||||
const contentType = redirectResponse.headers['content-type'] || '';
|
||||
// Check if it's actually a file (zip/tar.gz) and not HTML
|
||||
const isFile =
|
||||
contentType.includes('application/zip') ||
|
||||
contentType.includes('application/gzip') ||
|
||||
contentType.includes('application/x-gzip') ||
|
||||
contentType.includes('application/x-tar') ||
|
||||
redirectUrl.includes('.zip') ||
|
||||
redirectUrl.includes('.tar.gz');
|
||||
const isGood = redirectStatus >= 200 && redirectStatus < 300 && isFile;
|
||||
redirectResponse.destroy();
|
||||
resolve({
|
||||
accessible: isGood,
|
||||
statusCode: redirectStatus,
|
||||
finalUrl: redirectUrl,
|
||||
contentType,
|
||||
});
|
||||
})
|
||||
.on('error', (error) => {
|
||||
resolve({
|
||||
accessible: false,
|
||||
statusCode,
|
||||
error: error.message,
|
||||
});
|
||||
})
|
||||
.on('timeout', function () {
|
||||
this.destroy();
|
||||
resolve({
|
||||
accessible: false,
|
||||
statusCode,
|
||||
error: 'Timeout following redirect',
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// Check if status is good (200-299 range) and it's actually a file
|
||||
const contentType = response.headers['content-type'] || '';
|
||||
const isFile =
|
||||
contentType.includes('application/zip') ||
|
||||
contentType.includes('application/gzip') ||
|
||||
contentType.includes('application/x-gzip') ||
|
||||
contentType.includes('application/x-tar') ||
|
||||
url.includes('.zip') ||
|
||||
url.includes('.tar.gz');
|
||||
const isGood = statusCode >= 200 && statusCode < 300 && isFile;
|
||||
response.destroy();
|
||||
resolve({ accessible: isGood, statusCode, contentType });
|
||||
});
|
||||
|
||||
request.on('error', (error) => {
|
||||
resolve({
|
||||
accessible: false,
|
||||
statusCode: null,
|
||||
error: error.message,
|
||||
});
|
||||
});
|
||||
|
||||
request.on('timeout', () => {
|
||||
request.destroy();
|
||||
resolve({
|
||||
accessible: false,
|
||||
statusCode: null,
|
||||
error: 'Request timeout',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
if (result.accessible) {
|
||||
if (attempt > 0) {
|
||||
console.log(
|
||||
`✓ URL ${url} is now accessible after ${attempt} retries (status: ${result.statusCode})`
|
||||
);
|
||||
} else {
|
||||
console.log(`✓ URL ${url} is accessible (status: ${result.statusCode})`);
|
||||
}
|
||||
return result.finalUrl || url; // Return the final URL (after redirects) if available
|
||||
} else {
|
||||
const errorMsg = result.error ? ` - ${result.error}` : '';
|
||||
const statusMsg = result.statusCode ? ` (status: ${result.statusCode})` : '';
|
||||
const contentTypeMsg = result.contentType ? ` [content-type: ${result.contentType}]` : '';
|
||||
console.log(`✗ URL ${url} not accessible${statusMsg}${contentTypeMsg}${errorMsg}`);
|
||||
}
|
||||
} catch (error) {
|
||||
console.log(`✗ URL ${url} check failed: ${error.message}`);
|
||||
}
|
||||
|
||||
if (attempt < maxRetries - 1) {
|
||||
const delay = initialDelay * Math.pow(2, attempt);
|
||||
console.log(` Retrying in ${delay}ms... (attempt ${attempt + 1}/${maxRetries})`);
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(`URL ${url} is not accessible after ${maxRetries} attempts`);
|
||||
}
|
||||
|
||||
async function downloadFromGitHub(url, outputPath) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const request = https.get(url, { timeout: 30000 }, (response) => {
|
||||
const statusCode = response.statusCode;
|
||||
|
||||
// Follow redirects (all redirect types)
|
||||
if (statusCode === 301 || statusCode === 302 || statusCode === 307 || statusCode === 308) {
|
||||
const redirectUrl = response.headers.location;
|
||||
response.destroy();
|
||||
if (!redirectUrl) {
|
||||
reject(new Error(`Redirect without location header for ${url}`));
|
||||
return;
|
||||
}
|
||||
// Resolve relative redirects
|
||||
const finalRedirectUrl = redirectUrl.startsWith('http')
|
||||
? redirectUrl
|
||||
: new URL(redirectUrl, url).href;
|
||||
console.log(` Following redirect: ${finalRedirectUrl}`);
|
||||
return downloadFromGitHub(finalRedirectUrl, outputPath).then(resolve).catch(reject);
|
||||
}
|
||||
|
||||
if (statusCode !== 200) {
|
||||
response.destroy();
|
||||
reject(new Error(`Failed to download ${url}: ${statusCode} ${response.statusMessage}`));
|
||||
return;
|
||||
}
|
||||
|
||||
const fileStream = fs.createWriteStream(outputPath);
|
||||
response.pipe(fileStream);
|
||||
fileStream.on('finish', () => {
|
||||
fileStream.close();
|
||||
resolve();
|
||||
});
|
||||
fileStream.on('error', (error) => {
|
||||
response.destroy();
|
||||
reject(error);
|
||||
});
|
||||
});
|
||||
|
||||
request.on('error', reject);
|
||||
request.on('timeout', () => {
|
||||
request.destroy();
|
||||
reject(new Error(`Request timeout for ${url}`));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const artifactsDir = 'artifacts';
|
||||
const tempDir = path.join(artifactsDir, 'temp');
|
||||
|
||||
// Create temp directory for downloaded GitHub archives
|
||||
if (!fs.existsSync(tempDir)) {
|
||||
fs.mkdirSync(tempDir, { recursive: true });
|
||||
}
|
||||
|
||||
// Download source archives from GitHub
|
||||
const githubZipUrl = `https://github.com/${GITHUB_REPO}/archive/refs/tags/${RELEASE_TAG}.zip`;
|
||||
const githubTarGzUrl = `https://github.com/${GITHUB_REPO}/archive/refs/tags/${RELEASE_TAG}.tar.gz`;
|
||||
|
||||
const sourceZipPath = path.join(tempDir, `automaker-${VERSION}.zip`);
|
||||
const sourceTarGzPath = path.join(tempDir, `automaker-${VERSION}.tar.gz`);
|
||||
|
||||
console.log(`Waiting for source archives to be available on GitHub...`);
|
||||
console.log(` ZIP: ${githubZipUrl}`);
|
||||
console.log(` TAR.GZ: ${githubTarGzUrl}`);
|
||||
|
||||
// Wait for archives to be accessible with exponential backoff
|
||||
// This returns the final URL after following redirects
|
||||
const finalZipUrl = await checkUrlAccessible(githubZipUrl);
|
||||
const finalTarGzUrl = await checkUrlAccessible(githubTarGzUrl);
|
||||
|
||||
console.log(`Downloading source archives from GitHub...`);
|
||||
await downloadFromGitHub(finalZipUrl, sourceZipPath);
|
||||
await downloadFromGitHub(finalTarGzUrl, sourceTarGzPath);
|
||||
|
||||
console.log(`Downloaded source archives successfully`);
|
||||
|
||||
// Find all artifacts
|
||||
const artifacts = {
|
||||
windows: findArtifacts(path.join(artifactsDir, 'windows-builds'), /\.exe$/),
|
||||
macos: findArtifacts(path.join(artifactsDir, 'macos-builds'), /-x64\.dmg$/),
|
||||
macosArm: findArtifacts(path.join(artifactsDir, 'macos-builds'), /-arm64\.dmg$/),
|
||||
linux: findArtifacts(path.join(artifactsDir, 'linux-builds'), /\.AppImage$/),
|
||||
sourceZip: [sourceZipPath],
|
||||
sourceTarGz: [sourceTarGzPath],
|
||||
};
|
||||
|
||||
console.log('Found artifacts:');
|
||||
for (const [platform, files] of Object.entries(artifacts)) {
|
||||
console.log(
|
||||
` ${platform}: ${files.length > 0 ? files.map((f) => path.basename(f)).join(', ') : 'none'}`
|
||||
);
|
||||
}
|
||||
|
||||
// Upload each artifact to R2
|
||||
const assets = {};
|
||||
const contentTypes = {
|
||||
windows: 'application/x-msdownload',
|
||||
macos: 'application/x-apple-diskimage',
|
||||
macosArm: 'application/x-apple-diskimage',
|
||||
linux: 'application/x-executable',
|
||||
sourceZip: 'application/zip',
|
||||
sourceTarGz: 'application/gzip',
|
||||
};
|
||||
|
||||
for (const [platform, files] of Object.entries(artifacts)) {
|
||||
if (files.length === 0) {
|
||||
console.warn(`Warning: No artifact found for ${platform}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Use the first matching file for each platform
|
||||
const localPath = files[0];
|
||||
const filename = path.basename(localPath);
|
||||
const r2Key = `releases/${VERSION}/${filename}`;
|
||||
const size = await uploadFile(localPath, r2Key, contentTypes[platform]);
|
||||
|
||||
assets[platform] = {
|
||||
url: `${PUBLIC_URL}/releases/${VERSION}/${filename}`,
|
||||
filename,
|
||||
size,
|
||||
arch:
|
||||
platform === 'macosArm'
|
||||
? 'arm64'
|
||||
: platform === 'sourceZip' || platform === 'sourceTarGz'
|
||||
? 'source'
|
||||
: 'x64',
|
||||
};
|
||||
}
|
||||
|
||||
// Fetch and update releases.json
|
||||
const releasesData = await fetchExistingReleases();
|
||||
|
||||
const newRelease = {
|
||||
version: VERSION,
|
||||
date: new Date().toISOString(),
|
||||
assets,
|
||||
githubReleaseUrl: `https://github.com/${GITHUB_REPO}/releases/tag/${RELEASE_TAG}`,
|
||||
};
|
||||
|
||||
// Remove existing entry for this version if re-running
|
||||
releasesData.releases = releasesData.releases.filter((r) => r.version !== VERSION);
|
||||
|
||||
// Prepend new release
|
||||
releasesData.releases.unshift(newRelease);
|
||||
releasesData.latestVersion = VERSION;
|
||||
|
||||
// Upload updated releases.json
|
||||
await s3Client.send(
|
||||
new PutObjectCommand({
|
||||
Bucket: BUCKET,
|
||||
Key: 'releases.json',
|
||||
Body: JSON.stringify(releasesData, null, 2),
|
||||
ContentType: 'application/json',
|
||||
CacheControl: 'public, max-age=60',
|
||||
})
|
||||
);
|
||||
|
||||
console.log('Successfully updated releases.json');
|
||||
console.log(`Latest version: ${VERSION}`);
|
||||
console.log(`Total releases: ${releasesData.releases.length}`);
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error('Failed to upload to R2:', err);
|
||||
process.exit(1);
|
||||
});
|
||||
49
.github/workflows/claude.yml
vendored
Normal file
49
.github/workflows/claude.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
name: Claude Code
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_review_comment:
|
||||
types: [created]
|
||||
issues:
|
||||
types: [opened, assigned]
|
||||
pull_request_review:
|
||||
types: [submitted]
|
||||
|
||||
jobs:
|
||||
claude:
|
||||
if: |
|
||||
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
|
||||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
issues: read
|
||||
id-token: write
|
||||
actions: read # Required for Claude to read CI results on PRs
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Run Claude Code
|
||||
id: claude
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
||||
|
||||
# This is an optional setting that allows Claude to read CI results on PRs
|
||||
additional_permissions: |
|
||||
actions: read
|
||||
|
||||
# Optional: Give a custom prompt to Claude. If this is not specified, Claude will perform the instructions specified in the comment that tagged it.
|
||||
# prompt: 'Update the pull request description to include a summary of changes.'
|
||||
|
||||
# Optional: Add claude_args to customize behavior and configuration
|
||||
# See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
|
||||
# or https://code.claude.com/docs/en/cli-reference for available options
|
||||
# claude_args: '--allowed-tools Bash(gh pr:*)'
|
||||
77
.github/workflows/e2e-tests.yml
vendored
Normal file
77
.github/workflows/e2e-tests.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
name: E2E Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
|
||||
jobs:
|
||||
e2e:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup project
|
||||
uses: ./.github/actions/setup-project
|
||||
with:
|
||||
check-lockfile: 'true'
|
||||
rebuild-node-pty-path: 'apps/server'
|
||||
|
||||
- name: Install Playwright browsers
|
||||
run: npx playwright install --with-deps chromium
|
||||
working-directory: apps/ui
|
||||
|
||||
- name: Build server
|
||||
run: npm run build --workspace=apps/server
|
||||
|
||||
- name: Start backend server
|
||||
run: npm run start --workspace=apps/server &
|
||||
env:
|
||||
PORT: 3008
|
||||
NODE_ENV: test
|
||||
|
||||
- name: Wait for backend server
|
||||
run: |
|
||||
echo "Waiting for backend server to be ready..."
|
||||
for i in {1..30}; do
|
||||
if curl -s http://localhost:3008/api/health > /dev/null 2>&1; then
|
||||
echo "Backend server is ready!"
|
||||
exit 0
|
||||
fi
|
||||
echo "Waiting... ($i/30)"
|
||||
sleep 1
|
||||
done
|
||||
echo "Backend server failed to start!"
|
||||
exit 1
|
||||
|
||||
- name: Run E2E tests
|
||||
# Playwright automatically starts the Vite frontend via webServer config
|
||||
# (see apps/ui/playwright.config.ts) - no need to start it manually
|
||||
run: npm run test --workspace=apps/ui
|
||||
env:
|
||||
CI: true
|
||||
VITE_SERVER_URL: http://localhost:3008
|
||||
VITE_SKIP_SETUP: 'true'
|
||||
|
||||
- name: Upload Playwright report
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: playwright-report
|
||||
path: apps/ui/playwright-report/
|
||||
retention-days: 7
|
||||
|
||||
- name: Upload test results
|
||||
uses: actions/upload-artifact@v4
|
||||
if: failure()
|
||||
with:
|
||||
name: test-results
|
||||
path: apps/ui/test-results/
|
||||
retention-days: 7
|
||||
31
.github/workflows/format-check.yml
vendored
Normal file
31
.github/workflows/format-check.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Format Check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
|
||||
jobs:
|
||||
format:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '22'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: package-lock.json
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm install --ignore-scripts
|
||||
|
||||
- name: Check formatting
|
||||
run: npm run format:check
|
||||
26
.github/workflows/pr-check.yml
vendored
Normal file
26
.github/workflows/pr-check.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: PR Build Check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup project
|
||||
uses: ./.github/actions/setup-project
|
||||
with:
|
||||
check-lockfile: 'true'
|
||||
|
||||
- name: Run build:electron (dir only - faster CI)
|
||||
run: npm run build:electron:dir
|
||||
143
.github/workflows/release.yml
vendored
143
.github/workflows/release.yml
vendored
@@ -1,82 +1,111 @@
|
||||
name: Build and Release Electron App
|
||||
name: Release Build
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*' # Triggers on version tags like v1.0.0
|
||||
workflow_dispatch: # Allows manual triggering
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to release (e.g., v1.0.0)'
|
||||
required: true
|
||||
default: 'v0.1.0'
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build-and-release:
|
||||
build:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: macos-latest
|
||||
name: macOS
|
||||
- os: windows-latest
|
||||
name: Windows
|
||||
- os: ubuntu-latest
|
||||
name: Linux
|
||||
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
- name: Extract version from tag
|
||||
id: version
|
||||
shell: bash
|
||||
run: |
|
||||
# Remove 'v' prefix if present (e.g., "v1.2.3" -> "1.2.3")
|
||||
VERSION="${{ github.event.release.tag_name }}"
|
||||
VERSION="${VERSION#v}"
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "Extracted version: ${VERSION}"
|
||||
|
||||
- name: Update package.json version
|
||||
shell: bash
|
||||
run: |
|
||||
node apps/ui/scripts/update-version.mjs "${{ steps.version.outputs.version }}"
|
||||
|
||||
- name: Setup project
|
||||
uses: ./.github/actions/setup-project
|
||||
with:
|
||||
node-version: '20'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: app/package-lock.json
|
||||
check-lockfile: 'true'
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: ./app
|
||||
run: npm ci
|
||||
|
||||
- name: Build Electron App (macOS)
|
||||
- name: Build Electron app (macOS)
|
||||
if: matrix.os == 'macos-latest'
|
||||
working-directory: ./app
|
||||
shell: bash
|
||||
run: npm run build:electron:mac --workspace=apps/ui
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: npm run build:electron -- --mac --x64 --arm64
|
||||
CSC_IDENTITY_AUTO_DISCOVERY: false
|
||||
|
||||
- name: Build Electron App (Windows)
|
||||
- name: Build Electron app (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
working-directory: ./app
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: npm run build:electron -- --win --x64
|
||||
shell: bash
|
||||
run: npm run build:electron:win --workspace=apps/ui
|
||||
|
||||
- name: Build Electron App (Linux)
|
||||
- name: Build Electron app (Linux)
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
working-directory: ./app
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: npm run build:electron -- --linux --x64
|
||||
shell: bash
|
||||
run: npm run build:electron:linux --workspace=apps/ui
|
||||
|
||||
- name: Upload Release Assets
|
||||
uses: softprops/action-gh-release@v1
|
||||
- name: Upload macOS artifacts
|
||||
if: matrix.os == 'macos-latest'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: macos-builds
|
||||
path: apps/ui/release/*.{dmg,zip}
|
||||
retention-days: 30
|
||||
|
||||
- name: Upload Windows artifacts
|
||||
if: matrix.os == 'windows-latest'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: windows-builds
|
||||
path: apps/ui/release/*.exe
|
||||
retention-days: 30
|
||||
|
||||
- name: Upload Linux artifacts
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: linux-builds
|
||||
path: apps/ui/release/*.{AppImage,deb}
|
||||
retention-days: 30
|
||||
|
||||
upload:
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.release.draft == false
|
||||
|
||||
steps:
|
||||
- name: Download macOS artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: macos-builds
|
||||
path: artifacts/macos-builds
|
||||
|
||||
- name: Download Windows artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: windows-builds
|
||||
path: artifacts/windows-builds
|
||||
|
||||
- name: Download Linux artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: linux-builds
|
||||
path: artifacts/linux-builds
|
||||
|
||||
- name: Upload to GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
tag_name: ${{ github.event.inputs.version || github.ref_name }}
|
||||
files: |
|
||||
app/dist/*.exe
|
||||
app/dist/*.dmg
|
||||
app/dist/*.AppImage
|
||||
app/dist/*.zip
|
||||
app/dist/*.deb
|
||||
app/dist/*.rpm
|
||||
draft: false
|
||||
prerelease: false
|
||||
artifacts/macos-builds/*
|
||||
artifacts/windows-builds/*
|
||||
artifacts/linux-builds/*
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
30
.github/workflows/security-audit.yml
vendored
Normal file
30
.github/workflows/security-audit.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Security Audit
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
schedule:
|
||||
# Run weekly on Mondays at 9 AM UTC
|
||||
- cron: '0 9 * * 1'
|
||||
|
||||
jobs:
|
||||
audit:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup project
|
||||
uses: ./.github/actions/setup-project
|
||||
with:
|
||||
check-lockfile: 'true'
|
||||
|
||||
- name: Run npm audit
|
||||
run: npm audit --audit-level=moderate
|
||||
continue-on-error: false
|
||||
44
.github/workflows/test.yml
vendored
Normal file
44
.github/workflows/test.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
name: Test Suite
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup project
|
||||
uses: ./.github/actions/setup-project
|
||||
with:
|
||||
check-lockfile: 'true'
|
||||
rebuild-node-pty-path: 'apps/server'
|
||||
|
||||
- name: Run package tests
|
||||
run: npm run test:packages
|
||||
env:
|
||||
NODE_ENV: test
|
||||
|
||||
- name: Run server tests with coverage
|
||||
run: npm run test:server:coverage
|
||||
env:
|
||||
NODE_ENV: test
|
||||
|
||||
# - name: Upload coverage reports
|
||||
# uses: codecov/codecov-action@v4
|
||||
# if: always()
|
||||
# with:
|
||||
# files: ./apps/server/coverage/coverage-final.json
|
||||
# flags: server
|
||||
# name: server-coverage
|
||||
# env:
|
||||
# CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
86
.gitignore
vendored
86
.gitignore
vendored
@@ -1,2 +1,86 @@
|
||||
#added by trueheads > will remove once supercombo adds multi-os support
|
||||
#added by trueheads > will remove once supercombo adds multi-os support
|
||||
launch.sh
|
||||
|
||||
# Dependencies
|
||||
node_modules/
|
||||
|
||||
# Build outputs
|
||||
dist/
|
||||
build/
|
||||
out/
|
||||
.next/
|
||||
.turbo/
|
||||
|
||||
# Automaker
|
||||
.automaker/images/
|
||||
.automaker/
|
||||
/.automaker/*
|
||||
/.automaker/
|
||||
|
||||
.worktrees/
|
||||
|
||||
/logs
|
||||
# Logs
|
||||
logs/
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
|
||||
# OS-specific files
|
||||
.DS_Store
|
||||
.DS_Store?
|
||||
._*
|
||||
Thumbs.db
|
||||
ehthumbs.db
|
||||
Desktop.ini
|
||||
|
||||
# IDE/Editor configs
|
||||
.vscode/
|
||||
.idea/
|
||||
*.sublime-workspace
|
||||
*.sublime-project
|
||||
|
||||
# Editor backup/temp files
|
||||
*~
|
||||
*.bak
|
||||
*.backup
|
||||
*.orig
|
||||
*.swp
|
||||
*.swo
|
||||
*.tmp
|
||||
*.temp
|
||||
|
||||
# Local settings (user-specific)
|
||||
*.local.json
|
||||
|
||||
# Application state/backup
|
||||
backup.json
|
||||
|
||||
# Test artifacts
|
||||
test-results/
|
||||
coverage/
|
||||
.nyc_output/
|
||||
*.lcov
|
||||
playwright-report/
|
||||
blob-report/
|
||||
|
||||
# Environment files (keep .example)
|
||||
.env
|
||||
.env.local
|
||||
.env.*.local
|
||||
!.env.example
|
||||
!.env.local.example
|
||||
|
||||
# TypeScript
|
||||
*.tsbuildinfo
|
||||
|
||||
# Misc
|
||||
*.pem
|
||||
|
||||
docker-compose.override.yml
|
||||
.claude/docker-compose.override.yml
|
||||
|
||||
pnpm-lock.yaml
|
||||
yarn.lock
|
||||
1
.husky/pre-commit
Executable file
1
.husky/pre-commit
Executable file
@@ -0,0 +1 @@
|
||||
npx lint-staged
|
||||
16
.npmrc
Normal file
16
.npmrc
Normal file
@@ -0,0 +1,16 @@
|
||||
# Cross-platform compatibility for Tailwind CSS v4 and lightningcss
|
||||
# These packages use platform-specific optional dependencies that npm
|
||||
# automatically resolves based on your OS (macOS, Linux, Windows, WSL)
|
||||
#
|
||||
# IMPORTANT: When switching platforms or getting platform mismatch errors:
|
||||
# 1. Delete node_modules: rm -rf node_modules apps/*/node_modules
|
||||
# 2. Run: npm install
|
||||
#
|
||||
# In CI/CD: Use "npm install" instead of "npm ci" to allow npm to resolve
|
||||
# the correct platform-specific binaries at install time.
|
||||
|
||||
# Include bindings for all platforms in package-lock.json to support CI/CD
|
||||
# This ensures Linux, macOS, and Windows bindings are all present
|
||||
# NOTE: Only enable when regenerating package-lock.json, then comment out to keep installs fast
|
||||
# supportedArchitectures.os=linux,darwin,win32
|
||||
# supportedArchitectures.cpu=x64,arm64
|
||||
39
.prettierignore
Normal file
39
.prettierignore
Normal file
@@ -0,0 +1,39 @@
|
||||
# Dependencies
|
||||
node_modules/
|
||||
|
||||
# Build outputs
|
||||
dist/
|
||||
build/
|
||||
out/
|
||||
.next/
|
||||
.turbo/
|
||||
release/
|
||||
|
||||
# Automaker
|
||||
.automaker/
|
||||
|
||||
# Logs
|
||||
logs/
|
||||
*.log
|
||||
|
||||
# Lock files
|
||||
package-lock.json
|
||||
pnpm-lock.yaml
|
||||
|
||||
# Generated files
|
||||
*.min.js
|
||||
*.min.css
|
||||
|
||||
# Test artifacts
|
||||
test-results/
|
||||
coverage/
|
||||
playwright-report/
|
||||
blob-report/
|
||||
|
||||
# IDE/Editor
|
||||
.vscode/
|
||||
.idea/
|
||||
|
||||
# Electron
|
||||
dist-electron/
|
||||
server-bundle/
|
||||
10
.prettierrc
Normal file
10
.prettierrc
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"semi": true,
|
||||
"singleQuote": true,
|
||||
"tabWidth": 2,
|
||||
"trailingComma": "es5",
|
||||
"printWidth": 100,
|
||||
"bracketSpacing": true,
|
||||
"arrowParens": "always",
|
||||
"endOfLine": "lf"
|
||||
}
|
||||
172
CLAUDE.md
Normal file
172
CLAUDE.md
Normal file
@@ -0,0 +1,172 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
Automaker is an autonomous AI development studio built as an npm workspace monorepo. It provides a Kanban-based workflow where AI agents (powered by Claude Agent SDK) implement features in isolated git worktrees.
|
||||
|
||||
## Common Commands
|
||||
|
||||
```bash
|
||||
# Development
|
||||
npm run dev # Interactive launcher (choose web or electron)
|
||||
npm run dev:web # Web browser mode (localhost:3007)
|
||||
npm run dev:electron # Desktop app mode
|
||||
npm run dev:electron:debug # Desktop with DevTools open
|
||||
|
||||
# Building
|
||||
npm run build # Build web application
|
||||
npm run build:packages # Build all shared packages (required before other builds)
|
||||
npm run build:electron # Build desktop app for current platform
|
||||
npm run build:server # Build server only
|
||||
|
||||
# Testing
|
||||
npm run test # E2E tests (Playwright, headless)
|
||||
npm run test:headed # E2E tests with browser visible
|
||||
npm run test:server # Server unit tests (Vitest)
|
||||
npm run test:packages # All shared package tests
|
||||
npm run test:all # All tests (packages + server)
|
||||
|
||||
# Single test file
|
||||
npm run test:server -- tests/unit/specific.test.ts
|
||||
|
||||
# Linting and formatting
|
||||
npm run lint # ESLint
|
||||
npm run format # Prettier write
|
||||
npm run format:check # Prettier check
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
### Monorepo Structure
|
||||
|
||||
```
|
||||
automaker/
|
||||
├── apps/
|
||||
│ ├── ui/ # React + Vite + Electron frontend (port 3007)
|
||||
│ └── server/ # Express + WebSocket backend (port 3008)
|
||||
└── libs/ # Shared packages (@automaker/*)
|
||||
├── types/ # Core TypeScript definitions (no dependencies)
|
||||
├── utils/ # Logging, errors, image processing, context loading
|
||||
├── prompts/ # AI prompt templates
|
||||
├── platform/ # Path management, security, process spawning
|
||||
├── model-resolver/ # Claude model alias resolution
|
||||
├── dependency-resolver/ # Feature dependency ordering
|
||||
└── git-utils/ # Git operations & worktree management
|
||||
```
|
||||
|
||||
### Package Dependency Chain
|
||||
|
||||
Packages can only depend on packages above them:
|
||||
|
||||
```
|
||||
@automaker/types (no dependencies)
|
||||
↓
|
||||
@automaker/utils, @automaker/prompts, @automaker/platform, @automaker/model-resolver, @automaker/dependency-resolver
|
||||
↓
|
||||
@automaker/git-utils
|
||||
↓
|
||||
@automaker/server, @automaker/ui
|
||||
```
|
||||
|
||||
### Key Technologies
|
||||
|
||||
- **Frontend**: React 19, Vite 7, Electron 39, TanStack Router, Zustand 5, Tailwind CSS 4
|
||||
- **Backend**: Express 5, WebSocket (ws), Claude Agent SDK, node-pty
|
||||
- **Testing**: Playwright (E2E), Vitest (unit)
|
||||
|
||||
### Server Architecture
|
||||
|
||||
The server (`apps/server/src/`) follows a modular pattern:
|
||||
|
||||
- `routes/` - Express route handlers organized by feature (agent, features, auto-mode, worktree, etc.)
|
||||
- `services/` - Business logic (AgentService, AutoModeService, FeatureLoader, TerminalService)
|
||||
- `providers/` - AI provider abstraction (currently Claude via Claude Agent SDK)
|
||||
- `lib/` - Utilities (events, auth, worktree metadata)
|
||||
|
||||
### Frontend Architecture
|
||||
|
||||
The UI (`apps/ui/src/`) uses:
|
||||
|
||||
- `routes/` - TanStack Router file-based routing
|
||||
- `components/views/` - Main view components (board, settings, terminal, etc.)
|
||||
- `store/` - Zustand stores with persistence (app-store.ts, setup-store.ts)
|
||||
- `hooks/` - Custom React hooks
|
||||
- `lib/` - Utilities and API client
|
||||
|
||||
## Data Storage
|
||||
|
||||
### Per-Project Data (`.automaker/`)
|
||||
|
||||
```
|
||||
.automaker/
|
||||
├── features/ # Feature JSON files and images
|
||||
│ └── {featureId}/
|
||||
│ ├── feature.json
|
||||
│ ├── agent-output.md
|
||||
│ └── images/
|
||||
├── context/ # Context files for AI agents (CLAUDE.md, etc.)
|
||||
├── settings.json # Project-specific settings
|
||||
├── spec.md # Project specification
|
||||
└── analysis.json # Project structure analysis
|
||||
```
|
||||
|
||||
### Global Data (`DATA_DIR`, default `./data`)
|
||||
|
||||
```
|
||||
data/
|
||||
├── settings.json # Global settings, profiles, shortcuts
|
||||
├── credentials.json # API keys
|
||||
├── sessions-metadata.json # Chat session metadata
|
||||
└── agent-sessions/ # Conversation histories
|
||||
```
|
||||
|
||||
## Import Conventions
|
||||
|
||||
Always import from shared packages, never from old paths:
|
||||
|
||||
```typescript
|
||||
// ✅ Correct
|
||||
import type { Feature, ExecuteOptions } from '@automaker/types';
|
||||
import { createLogger, classifyError } from '@automaker/utils';
|
||||
import { getEnhancementPrompt } from '@automaker/prompts';
|
||||
import { getFeatureDir, ensureAutomakerDir } from '@automaker/platform';
|
||||
import { resolveModelString } from '@automaker/model-resolver';
|
||||
import { resolveDependencies } from '@automaker/dependency-resolver';
|
||||
import { getGitRepositoryDiffs } from '@automaker/git-utils';
|
||||
|
||||
// ❌ Never import from old paths
|
||||
import { Feature } from '../services/feature-loader'; // Wrong
|
||||
import { createLogger } from '../lib/logger'; // Wrong
|
||||
```
|
||||
|
||||
## Key Patterns
|
||||
|
||||
### Event-Driven Architecture
|
||||
|
||||
All server operations emit events that stream to the frontend via WebSocket. Events are created using `createEventEmitter()` from `lib/events.ts`.
|
||||
|
||||
### Git Worktree Isolation
|
||||
|
||||
Each feature executes in an isolated git worktree, created via `@automaker/git-utils`. This protects the main branch during AI agent execution.
|
||||
|
||||
### Context Files
|
||||
|
||||
Project-specific rules are stored in `.automaker/context/` and automatically loaded into agent prompts via `loadContextFiles()` from `@automaker/utils`.
|
||||
|
||||
### Model Resolution
|
||||
|
||||
Use `resolveModelString()` from `@automaker/model-resolver` to convert model aliases:
|
||||
|
||||
- `haiku` → `claude-haiku-4-5`
|
||||
- `sonnet` → `claude-sonnet-4-20250514`
|
||||
- `opus` → `claude-opus-4-5-20251101`
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- `ANTHROPIC_API_KEY` - Anthropic API key (or use Claude Code CLI auth)
|
||||
- `PORT` - Server port (default: 3008)
|
||||
- `DATA_DIR` - Data storage directory (default: ./data)
|
||||
- `ALLOWED_ROOT_DIRECTORY` - Restrict file operations to specific directory
|
||||
- `AUTOMAKER_MOCK_AGENT=true` - Enable mock agent mode for CI testing
|
||||
685
CONTRIBUTING.md
Normal file
685
CONTRIBUTING.md
Normal file
@@ -0,0 +1,685 @@
|
||||
# Contributing to Automaker
|
||||
|
||||
Thank you for your interest in contributing to Automaker! We're excited to have you join our community of developers building the future of autonomous AI development.
|
||||
|
||||
Automaker is an autonomous AI development studio that provides a Kanban-based workflow where AI agents implement features in isolated git worktrees. Whether you're fixing bugs, adding features, improving documentation, or suggesting ideas, your contributions help make this project better for everyone.
|
||||
|
||||
This guide will help you get started with contributing to Automaker. Please take a moment to read through these guidelines to ensure a smooth contribution process.
|
||||
|
||||
## Contribution License Agreement
|
||||
|
||||
**Important:** By submitting, pushing, or contributing any code, documentation, pull requests, issues, or other materials to the Automaker project, you agree to assign all right, title, and interest in and to your contributions, including all copyrights, patents, and other intellectual property rights, to the Core Contributors of Automaker. This assignment is irrevocable and includes the right to use, modify, distribute, and monetize your contributions in any manner.
|
||||
|
||||
**You understand and agree that you will have no right to receive any royalties, compensation, or other financial benefits from any revenue, income, or commercial use generated from your contributed code or any derivative works thereof.** All contributions are made without expectation of payment or financial return.
|
||||
|
||||
For complete details on contribution terms and rights assignment, please review [Section 5 (CONTRIBUTIONS AND RIGHTS ASSIGNMENT) of the LICENSE](LICENSE#5-contributions-and-rights-assignment).
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Contributing to Automaker](#contributing-to-automaker)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Fork and Clone](#fork-and-clone)
|
||||
- [Development Setup](#development-setup)
|
||||
- [Project Structure](#project-structure)
|
||||
- [Pull Request Process](#pull-request-process)
|
||||
- [Branch Naming Convention](#branch-naming-convention)
|
||||
- [Commit Message Format](#commit-message-format)
|
||||
- [Submitting a Pull Request](#submitting-a-pull-request)
|
||||
- [1. Prepare Your Changes](#1-prepare-your-changes)
|
||||
- [2. Run Pre-submission Checks](#2-run-pre-submission-checks)
|
||||
- [3. Push Your Changes](#3-push-your-changes)
|
||||
- [4. Open a Pull Request](#4-open-a-pull-request)
|
||||
- [PR Requirements Checklist](#pr-requirements-checklist)
|
||||
- [Review Process](#review-process)
|
||||
- [What to Expect](#what-to-expect)
|
||||
- [Review Focus Areas](#review-focus-areas)
|
||||
- [Responding to Feedback](#responding-to-feedback)
|
||||
- [Approval Criteria](#approval-criteria)
|
||||
- [Getting Help](#getting-help)
|
||||
- [Code Style Guidelines](#code-style-guidelines)
|
||||
- [Testing Requirements](#testing-requirements)
|
||||
- [Running Tests](#running-tests)
|
||||
- [Test Frameworks](#test-frameworks)
|
||||
- [End-to-End Tests (Playwright)](#end-to-end-tests-playwright)
|
||||
- [Unit Tests (Vitest)](#unit-tests-vitest)
|
||||
- [Writing Tests](#writing-tests)
|
||||
- [When to Write Tests](#when-to-write-tests)
|
||||
- [CI/CD Pipeline](#cicd-pipeline)
|
||||
- [CI Checks](#ci-checks)
|
||||
- [CI Testing Environment](#ci-testing-environment)
|
||||
- [Viewing CI Results](#viewing-ci-results)
|
||||
- [Common CI Failures](#common-ci-failures)
|
||||
- [Coverage Requirements](#coverage-requirements)
|
||||
- [Issue Reporting](#issue-reporting)
|
||||
- [Bug Reports](#bug-reports)
|
||||
- [Before Reporting](#before-reporting)
|
||||
- [Bug Report Template](#bug-report-template)
|
||||
- [Feature Requests](#feature-requests)
|
||||
- [Before Requesting](#before-requesting)
|
||||
- [Feature Request Template](#feature-request-template)
|
||||
- [Security Issues](#security-issues)
|
||||
|
||||
---
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Before contributing to Automaker, ensure you have the following installed on your system:
|
||||
|
||||
- **Node.js 18+** (tested with Node.js 22)
|
||||
- Download from [nodejs.org](https://nodejs.org/)
|
||||
- Verify installation: `node --version`
|
||||
- **npm** (comes with Node.js)
|
||||
- Verify installation: `npm --version`
|
||||
- **Git** for version control
|
||||
- Verify installation: `git --version`
|
||||
- **Claude Code CLI** or **Anthropic API Key** (for AI agent functionality)
|
||||
- Required to run the AI development features
|
||||
|
||||
**Optional but recommended:**
|
||||
|
||||
- A code editor with TypeScript support (VS Code recommended)
|
||||
- GitHub CLI (`gh`) for easier PR management
|
||||
|
||||
### Fork and Clone
|
||||
|
||||
1. **Fork the repository** on GitHub
|
||||
- Navigate to [https://github.com/AutoMaker-Org/automaker](https://github.com/AutoMaker-Org/automaker)
|
||||
- Click the "Fork" button in the top-right corner
|
||||
- This creates your own copy of the repository
|
||||
|
||||
2. **Clone your fork locally**
|
||||
|
||||
```bash
|
||||
git clone https://github.com/YOUR_USERNAME/automaker.git
|
||||
cd automaker
|
||||
```
|
||||
|
||||
3. **Add the upstream remote** to keep your fork in sync
|
||||
|
||||
```bash
|
||||
git remote add upstream https://github.com/AutoMaker-Org/automaker.git
|
||||
```
|
||||
|
||||
4. **Verify remotes**
|
||||
```bash
|
||||
git remote -v
|
||||
# Should show:
|
||||
# origin https://github.com/YOUR_USERNAME/automaker.git (fetch)
|
||||
# origin https://github.com/YOUR_USERNAME/automaker.git (push)
|
||||
# upstream https://github.com/AutoMaker-Org/automaker.git (fetch)
|
||||
# upstream https://github.com/AutoMaker-Org/automaker.git (push)
|
||||
```
|
||||
|
||||
### Development Setup
|
||||
|
||||
1. **Install dependencies**
|
||||
|
||||
```bash
|
||||
npm install
|
||||
```
|
||||
|
||||
2. **Build shared packages** (required before running the app)
|
||||
|
||||
```bash
|
||||
npm run build:packages
|
||||
```
|
||||
|
||||
3. **Start the development server**
|
||||
```bash
|
||||
npm run dev # Interactive launcher - choose mode
|
||||
npm run dev:web # Browser mode (web interface)
|
||||
npm run dev:electron # Desktop app mode
|
||||
```
|
||||
|
||||
**Common development commands:**
|
||||
|
||||
| Command | Description |
|
||||
| ------------------------ | -------------------------------- |
|
||||
| `npm run dev` | Interactive development launcher |
|
||||
| `npm run dev:web` | Start in browser mode |
|
||||
| `npm run dev:electron` | Start desktop app |
|
||||
| `npm run build` | Build all packages and apps |
|
||||
| `npm run build:packages` | Build shared packages only |
|
||||
| `npm run lint` | Run ESLint checks |
|
||||
| `npm run format` | Format code with Prettier |
|
||||
| `npm run format:check` | Check formatting without changes |
|
||||
| `npm run test` | Run E2E tests (Playwright) |
|
||||
| `npm run test:server` | Run server unit tests |
|
||||
| `npm run test:packages` | Run package tests |
|
||||
| `npm run test:all` | Run all tests |
|
||||
|
||||
### Project Structure
|
||||
|
||||
Automaker is organized as an npm workspace monorepo:
|
||||
|
||||
```
|
||||
automaker/
|
||||
├── apps/
|
||||
│ ├── ui/ # React + Vite + Electron frontend
|
||||
│ └── server/ # Express + WebSocket backend
|
||||
├── libs/
|
||||
│ ├── @automaker/types/ # Shared TypeScript types
|
||||
│ ├── @automaker/utils/ # Utility functions
|
||||
│ ├── @automaker/prompts/ # AI prompt templates
|
||||
│ ├── @automaker/platform/ # Platform abstractions
|
||||
│ ├── @automaker/model-resolver/ # AI model resolution
|
||||
│ ├── @automaker/dependency-resolver/ # Dependency management
|
||||
│ └── @automaker/git-utils/ # Git operations
|
||||
├── docs/ # Documentation
|
||||
└── package.json # Root package configuration
|
||||
```
|
||||
|
||||
**Key conventions:**
|
||||
|
||||
- Always import from `@automaker/*` shared packages, never use relative paths to `libs/`
|
||||
- Frontend code lives in `apps/ui/`
|
||||
- Backend code lives in `apps/server/`
|
||||
- Shared logic should be in the appropriate `libs/` package
|
||||
|
||||
---
|
||||
|
||||
## Pull Request Process
|
||||
|
||||
This section covers everything you need to know about contributing changes through pull requests, from creating your branch to getting your code merged.
|
||||
|
||||
### Branch Naming Convention
|
||||
|
||||
We use a consistent branch naming pattern to keep our repository organized:
|
||||
|
||||
```
|
||||
<type>/<description>
|
||||
```
|
||||
|
||||
**Branch types:**
|
||||
|
||||
| Type | Purpose | Example |
|
||||
| ---------- | ------------------------ | --------------------------------- |
|
||||
| `feature` | New functionality | `feature/add-user-authentication` |
|
||||
| `fix` | Bug fixes | `fix/resolve-memory-leak` |
|
||||
| `docs` | Documentation changes | `docs/update-contributing-guide` |
|
||||
| `refactor` | Code restructuring | `refactor/simplify-api-handlers` |
|
||||
| `test` | Adding or updating tests | `test/add-utils-unit-tests` |
|
||||
| `chore` | Maintenance tasks | `chore/update-dependencies` |
|
||||
|
||||
**Guidelines:**
|
||||
|
||||
- Use lowercase letters and hyphens (no underscores or spaces)
|
||||
- Keep descriptions short but descriptive
|
||||
- Include issue number when applicable: `feature/123-add-login`
|
||||
|
||||
```bash
|
||||
# Create and checkout a new feature branch
|
||||
git checkout -b feature/add-dark-mode
|
||||
|
||||
# Create a fix branch with issue reference
|
||||
git checkout -b fix/456-resolve-login-error
|
||||
```
|
||||
|
||||
### Commit Message Format
|
||||
|
||||
We follow the **Conventional Commits** style for clear, readable commit history:
|
||||
|
||||
```
|
||||
<type>: <description>
|
||||
|
||||
[optional body]
|
||||
```
|
||||
|
||||
**Commit types:**
|
||||
|
||||
| Type | Purpose |
|
||||
| ---------- | --------------------------- |
|
||||
| `feat` | New feature |
|
||||
| `fix` | Bug fix |
|
||||
| `docs` | Documentation only |
|
||||
| `style` | Formatting (no code change) |
|
||||
| `refactor` | Code restructuring |
|
||||
| `test` | Adding or updating tests |
|
||||
| `chore` | Maintenance tasks |
|
||||
|
||||
**Guidelines:**
|
||||
|
||||
- Use **imperative mood** ("Add feature" not "Added feature")
|
||||
- Keep first line under **72 characters**
|
||||
- Capitalize the first letter after the type prefix
|
||||
- No period at the end of the subject line
|
||||
- Add a blank line before the body for detailed explanations
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Simple commit
|
||||
git commit -m "feat: Add user authentication flow"
|
||||
|
||||
# Commit with body for more context
|
||||
git commit -m "fix: Resolve memory leak in WebSocket handler
|
||||
|
||||
The connection cleanup was not being called when clients
|
||||
disconnected unexpectedly. Added proper cleanup in the
|
||||
error handler to prevent memory accumulation."
|
||||
|
||||
# Documentation update
|
||||
git commit -m "docs: Update API documentation"
|
||||
|
||||
# Refactoring
|
||||
git commit -m "refactor: Simplify state management logic"
|
||||
```
|
||||
|
||||
### Submitting a Pull Request
|
||||
|
||||
Follow these steps to submit your contribution:
|
||||
|
||||
#### 1. Prepare Your Changes
|
||||
|
||||
Ensure you've synced with the latest upstream changes:
|
||||
|
||||
```bash
|
||||
# Fetch latest changes from upstream
|
||||
git fetch upstream
|
||||
|
||||
# Rebase your branch on main (if needed)
|
||||
git rebase upstream/main
|
||||
```
|
||||
|
||||
#### 2. Run Pre-submission Checks
|
||||
|
||||
Before opening your PR, verify everything passes locally:
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
npm run test:all
|
||||
|
||||
# Check formatting
|
||||
npm run format:check
|
||||
|
||||
# Run linter
|
||||
npm run lint
|
||||
|
||||
# Build to verify no compile errors
|
||||
npm run build
|
||||
```
|
||||
|
||||
#### 3. Push Your Changes
|
||||
|
||||
```bash
|
||||
# Push your branch to your fork
|
||||
git push origin feature/your-feature-name
|
||||
```
|
||||
|
||||
#### 4. Open a Pull Request
|
||||
|
||||
1. Go to your fork on GitHub
|
||||
2. Click "Compare & pull request" for your branch
|
||||
3. Ensure the base repository is `AutoMaker-Org/automaker` and base branch is `main`
|
||||
4. Fill out the PR template completely
|
||||
|
||||
#### PR Requirements Checklist
|
||||
|
||||
Your PR should include:
|
||||
|
||||
- [ ] **Clear title** describing the change (use conventional commit format)
|
||||
- [ ] **Description** explaining what changed and why
|
||||
- [ ] **Link to related issue** (if applicable): `Closes #123` or `Fixes #456`
|
||||
- [ ] **All CI checks passing** (format, lint, build, tests)
|
||||
- [ ] **No merge conflicts** with main branch
|
||||
- [ ] **Tests included** for new functionality
|
||||
- [ ] **Documentation updated** if adding/changing public APIs
|
||||
|
||||
**Example PR Description:**
|
||||
|
||||
```markdown
|
||||
## Summary
|
||||
|
||||
This PR adds dark mode support to the Automaker UI.
|
||||
|
||||
- Implements theme toggle in settings panel
|
||||
- Adds CSS custom properties for theme colors
|
||||
- Persists theme preference to localStorage
|
||||
|
||||
## Related Issue
|
||||
|
||||
Closes #123
|
||||
|
||||
## Testing
|
||||
|
||||
- [x] Tested toggle functionality in Chrome and Firefox
|
||||
- [x] Verified theme persists across page reloads
|
||||
- [x] Checked accessibility contrast ratios
|
||||
|
||||
## Screenshots
|
||||
|
||||
[Include before/after screenshots for UI changes]
|
||||
```
|
||||
|
||||
### Review Process
|
||||
|
||||
All contributions go through code review to maintain quality:
|
||||
|
||||
#### What to Expect
|
||||
|
||||
1. **CI Checks Run First** - Automated checks (format, lint, build, tests) must pass before review
|
||||
2. **Maintainer Review** - The project maintainers will review your PR and decide whether to merge it
|
||||
3. **Feedback & Discussion** - The reviewer may ask questions or request changes
|
||||
4. **Iteration** - Make requested changes and push updates to the same branch
|
||||
5. **Approval & Merge** - Once approved and checks pass, your PR will be merged
|
||||
|
||||
#### Review Focus Areas
|
||||
|
||||
The reviewer checks for:
|
||||
|
||||
- **Correctness** - Does the code work as intended?
|
||||
- **Clean Code** - Does it follow our [code style guidelines](#code-style-guidelines)?
|
||||
- **Test Coverage** - Are new features properly tested?
|
||||
- **Documentation** - Are public APIs documented?
|
||||
- **Breaking Changes** - Are any breaking changes discussed first?
|
||||
|
||||
#### Responding to Feedback
|
||||
|
||||
- Respond to **all** review comments, even if just to acknowledge
|
||||
- Ask questions if feedback is unclear
|
||||
- Push additional commits to address feedback (don't force-push during review)
|
||||
- Mark conversations as resolved once addressed
|
||||
|
||||
#### Approval Criteria
|
||||
|
||||
Your PR is ready to merge when:
|
||||
|
||||
- ✅ All CI checks pass
|
||||
- ✅ The maintainer has approved the changes
|
||||
- ✅ All review comments are addressed
|
||||
- ✅ No unresolved merge conflicts
|
||||
|
||||
#### Getting Help
|
||||
|
||||
If your PR seems stuck:
|
||||
|
||||
- Comment asking for status update (mention @webdevcody if needed)
|
||||
- Reach out on [Discord](https://discord.gg/jjem7aEDKU)
|
||||
- Make sure all checks are passing and you've responded to all feedback
|
||||
|
||||
---
|
||||
|
||||
## Code Style Guidelines
|
||||
|
||||
Automaker uses automated tooling to enforce code style. Run `npm run format` to format code and `npm run lint` to check for issues. Pre-commit hooks automatically format staged files before committing.
|
||||
|
||||
---
|
||||
|
||||
## Testing Requirements
|
||||
|
||||
Testing helps prevent regressions. Automaker uses **Playwright** for end-to-end testing and **Vitest** for unit tests.
|
||||
|
||||
### Running Tests
|
||||
|
||||
Use these commands to run tests locally:
|
||||
|
||||
| Command | Description |
|
||||
| ------------------------------ | ------------------------------------- |
|
||||
| `npm run test` | Run E2E tests (Playwright) |
|
||||
| `npm run test:server` | Run server unit tests (Vitest) |
|
||||
| `npm run test:packages` | Run shared package tests |
|
||||
| `npm run test:all` | Run all tests |
|
||||
| `npm run test:server:coverage` | Run server tests with coverage report |
|
||||
|
||||
**Before submitting a PR**, always run the full test suite:
|
||||
|
||||
```bash
|
||||
npm run test:all
|
||||
```
|
||||
|
||||
### Test Frameworks
|
||||
|
||||
#### End-to-End Tests (Playwright)
|
||||
|
||||
E2E tests verify the entire application works correctly from a user's perspective.
|
||||
|
||||
- **Framework:** [Playwright](https://playwright.dev/)
|
||||
- **Location:** `e2e/` directory
|
||||
- **Test ports:** UI on port 3007, Server on port 3008
|
||||
|
||||
**Running E2E tests:**
|
||||
|
||||
```bash
|
||||
# Run all E2E tests
|
||||
npm run test
|
||||
|
||||
# Run with headed browser (useful for debugging)
|
||||
npx playwright test --headed
|
||||
|
||||
# Run a specific test file
|
||||
npm test --workspace=@automaker/ui -- tests/example.spec.ts
|
||||
```
|
||||
|
||||
**E2E Test Guidelines:**
|
||||
|
||||
- Write tests from a user's perspective
|
||||
- Use descriptive test names that explain the scenario
|
||||
- Clean up test data after each test
|
||||
- Use appropriate timeouts for async operations
|
||||
- Prefer `locator` over direct selectors for resilience
|
||||
|
||||
#### Unit Tests (Vitest)
|
||||
|
||||
Unit tests verify individual functions and modules work correctly in isolation.
|
||||
|
||||
- **Framework:** [Vitest](https://vitest.dev/)
|
||||
- **Location:** In the `tests/` directory within each package (e.g., `apps/server/tests/`)
|
||||
|
||||
**Running unit tests:**
|
||||
|
||||
```bash
|
||||
# Run all server unit tests
|
||||
npm run test:server
|
||||
|
||||
# Run with coverage report
|
||||
npm run test:server:coverage
|
||||
|
||||
# Run package tests
|
||||
npm run test:packages
|
||||
|
||||
# Run in watch mode during development
|
||||
npx vitest --watch
|
||||
```
|
||||
|
||||
**Unit Test Guidelines:**
|
||||
|
||||
- Keep tests small and focused on one behavior
|
||||
- Use descriptive test names: `it('should return null when user is not found')`
|
||||
- Follow the AAA pattern: Arrange, Act, Assert
|
||||
- Mock external dependencies to isolate the unit under test
|
||||
- Aim for meaningful coverage, not just line coverage
|
||||
|
||||
### Writing Tests
|
||||
|
||||
#### When to Write Tests
|
||||
|
||||
- **New features:** All new features should include tests
|
||||
- **Bug fixes:** Add a test that reproduces the bug before fixing
|
||||
- **Refactoring:** Ensure existing tests pass after refactoring
|
||||
- **Public APIs:** All public APIs must have test coverage
|
||||
|
||||
### CI/CD Pipeline
|
||||
|
||||
Automaker uses **GitHub Actions** for continuous integration. Every pull request triggers automated checks.
|
||||
|
||||
#### CI Checks
|
||||
|
||||
The following checks must pass before your PR can be merged:
|
||||
|
||||
| Check | Description |
|
||||
| ----------------- | --------------------------------------------- |
|
||||
| **Format** | Verifies code is formatted with Prettier |
|
||||
| **Build** | Ensures the project compiles without errors |
|
||||
| **Package Tests** | Runs tests for shared `@automaker/*` packages |
|
||||
| **Server Tests** | Runs server unit tests with coverage |
|
||||
|
||||
#### CI Testing Environment
|
||||
|
||||
For CI environments, Automaker supports a mock agent mode:
|
||||
|
||||
```bash
|
||||
# Enable mock agent mode for CI testing
|
||||
AUTOMAKER_MOCK_AGENT=true npm run test
|
||||
```
|
||||
|
||||
This allows tests to run without requiring a real Claude API connection.
|
||||
|
||||
#### Viewing CI Results
|
||||
|
||||
1. Go to your PR on GitHub
|
||||
2. Scroll to the "Checks" section at the bottom
|
||||
3. Click on any failed check to see detailed logs
|
||||
4. Fix issues locally and push updates
|
||||
|
||||
#### Common CI Failures
|
||||
|
||||
| Issue | Solution |
|
||||
| ------------------- | --------------------------------------------- |
|
||||
| Format check failed | Run `npm run format` locally |
|
||||
| Build failed | Run `npm run build` and fix TypeScript errors |
|
||||
| Tests failed | Run `npm run test:all` locally to reproduce |
|
||||
| Coverage decreased | Add tests for new code paths |
|
||||
|
||||
### Coverage Requirements
|
||||
|
||||
While we don't enforce strict coverage percentages, we expect:
|
||||
|
||||
- **New features:** Should include comprehensive tests
|
||||
- **Bug fixes:** Should include a regression test
|
||||
- **Critical paths:** Must have test coverage (authentication, data persistence, etc.)
|
||||
|
||||
To view coverage reports locally:
|
||||
|
||||
```bash
|
||||
npm run test:server:coverage
|
||||
```
|
||||
|
||||
This generates an HTML report you can open in your browser to see which lines are covered.
|
||||
|
||||
---
|
||||
|
||||
## Issue Reporting
|
||||
|
||||
Found a bug or have an idea for a new feature? We'd love to hear from you! This section explains how to report issues effectively.
|
||||
|
||||
### Bug Reports
|
||||
|
||||
When reporting a bug, please provide as much information as possible to help us understand and reproduce the issue.
|
||||
|
||||
#### Before Reporting
|
||||
|
||||
1. **Search existing issues** - Check if the bug has already been reported
|
||||
2. **Try the latest version** - Make sure you're running the latest version of Automaker
|
||||
3. **Reproduce the issue** - Verify you can consistently reproduce the bug
|
||||
|
||||
#### Bug Report Template
|
||||
|
||||
When creating a bug report, include:
|
||||
|
||||
- **Title:** A clear, descriptive title summarizing the issue
|
||||
- **Environment:**
|
||||
- Operating System and version
|
||||
- Node.js version (`node --version`)
|
||||
- Automaker version or commit hash
|
||||
- **Steps to Reproduce:** Numbered list of steps to reproduce the bug
|
||||
- **Expected Behavior:** What you expected to happen
|
||||
- **Actual Behavior:** What actually happened
|
||||
- **Logs/Screenshots:** Any relevant error messages, console output, or screenshots
|
||||
|
||||
**Example Bug Report:**
|
||||
|
||||
```markdown
|
||||
## Bug: WebSocket connection drops after 5 minutes of inactivity
|
||||
|
||||
### Environment
|
||||
|
||||
- OS: Windows 11
|
||||
- Node.js: 22.11.0
|
||||
- Automaker: commit abc1234
|
||||
|
||||
### Steps to Reproduce
|
||||
|
||||
1. Start the application with `npm run dev:web`
|
||||
2. Open the Kanban board
|
||||
3. Leave the browser tab open for 5+ minutes without interaction
|
||||
4. Try to move a card
|
||||
|
||||
### Expected Behavior
|
||||
|
||||
The card should move to the new column.
|
||||
|
||||
### Actual Behavior
|
||||
|
||||
The UI shows "Connection lost" and the card doesn't move.
|
||||
|
||||
### Logs
|
||||
|
||||
[WebSocket] Connection closed: 1006
|
||||
```
|
||||
|
||||
### Feature Requests
|
||||
|
||||
We welcome ideas for improving Automaker! Here's how to submit a feature request:
|
||||
|
||||
#### Before Requesting
|
||||
|
||||
1. **Check existing issues** - Your idea may already be proposed or in development
|
||||
2. **Consider scope** - Think about whether the feature fits Automaker's mission as an autonomous AI development studio
|
||||
|
||||
#### Feature Request Template
|
||||
|
||||
A good feature request includes:
|
||||
|
||||
- **Title:** A brief, descriptive title
|
||||
- **Problem Statement:** What problem does this feature solve?
|
||||
- **Proposed Solution:** How do you envision this working?
|
||||
- **Alternatives Considered:** What other approaches did you consider?
|
||||
- **Additional Context:** Mockups, examples, or references that help explain your idea
|
||||
|
||||
**Example Feature Request:**
|
||||
|
||||
```markdown
|
||||
## Feature: Dark Mode Support
|
||||
|
||||
### Problem Statement
|
||||
|
||||
Working late at night, the bright UI causes eye strain and doesn't match
|
||||
my system's dark theme preference.
|
||||
|
||||
### Proposed Solution
|
||||
|
||||
Add a theme toggle in the settings panel that allows switching between
|
||||
light and dark modes. Ideally, it should also detect system preference.
|
||||
|
||||
### Alternatives Considered
|
||||
|
||||
- Browser extension to force dark mode (doesn't work well with custom styling)
|
||||
- Custom CSS override (breaks with updates)
|
||||
|
||||
### Additional Context
|
||||
|
||||
Similar to how VS Code handles themes - a dropdown in settings with
|
||||
immediate preview.
|
||||
```
|
||||
|
||||
### Security Issues
|
||||
|
||||
**Important:** If you discover a security vulnerability, please do NOT open a public issue. Instead:
|
||||
|
||||
1. Join our [Discord server](https://discord.gg/jjem7aEDKU) and send a direct message to the user `@webdevcody`
|
||||
2. Include detailed steps to reproduce
|
||||
3. Allow time for us to address the issue before public disclosure
|
||||
|
||||
We take security seriously and appreciate responsible disclosure.
|
||||
|
||||
---
|
||||
|
||||
For license and contribution terms, see the [LICENSE](LICENSE) file in the repository root and the [README.md](README.md#license) for more details.
|
||||
|
||||
---
|
||||
|
||||
Thank you for contributing to Automaker!
|
||||
@@ -19,30 +19,57 @@ While we have made efforts to review this codebase for security vulnerabilities
|
||||
## Recommendations
|
||||
|
||||
### 1. Review the Code First
|
||||
|
||||
Before running Automaker, we strongly recommend reviewing the source code yourself to understand what operations it performs and ensure you are comfortable with its behavior.
|
||||
|
||||
### 2. Use Sandboxing (Highly Recommended)
|
||||
|
||||
**We do not recommend running Automaker directly on your local computer** due to the risk of AI agents having access to your entire file system. Instead, consider:
|
||||
|
||||
- **Docker**: Run Automaker in a Docker container to isolate it from your host system
|
||||
- **Virtual Machine**: Use a VM (such as VirtualBox, VMware, or Parallels) to create an isolated environment
|
||||
- **Cloud Development Environment**: Use a cloud-based development environment that provides isolation
|
||||
|
||||
#### Running in Isolated Docker Container
|
||||
|
||||
For maximum security, run Automaker in an isolated Docker container that **cannot access your laptop's files**:
|
||||
|
||||
```bash
|
||||
# 1. Set your API key (bash/Linux/Mac - creates UTF-8 file)
|
||||
echo "ANTHROPIC_API_KEY=your-api-key-here" > .env
|
||||
|
||||
# On Windows PowerShell, use instead:
|
||||
Set-Content -Path .env -Value "ANTHROPIC_API_KEY=your-api-key-here" -Encoding UTF8
|
||||
|
||||
# 2. Build and run isolated container
|
||||
docker-compose up -d
|
||||
|
||||
# 3. Access the UI at http://localhost:3007
|
||||
# API at http://localhost:3008/api/health
|
||||
```
|
||||
|
||||
The container uses only Docker-managed volumes and has no access to your host filesystem. See [docker-isolation.md](docs/docker-isolation.md) for full documentation.
|
||||
|
||||
### 3. Limit Access
|
||||
|
||||
If you must run locally:
|
||||
|
||||
- Create a dedicated user account with limited permissions
|
||||
- Only grant access to specific project directories
|
||||
- Avoid running with administrator/root privileges
|
||||
- Keep sensitive files and credentials outside of project directories
|
||||
|
||||
### 4. Monitor Activity
|
||||
|
||||
- Review the agent's actions in the output logs
|
||||
- Pay attention to file modifications and command executions
|
||||
- Stop the agent immediately if you notice unexpected behavior
|
||||
|
||||
## No Warranty
|
||||
## No Warranty & Limitation of Liability
|
||||
|
||||
This software is provided "as is", without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, and noninfringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the software or the use or other dealings in the software.
|
||||
THE SOFTWARE UTILIZES ARTIFICIAL INTELLIGENCE TO GENERATE CODE, EXECUTE COMMANDS, AND INTERACT WITH YOUR FILE SYSTEM. YOU ACKNOWLEDGE THAT AI SYSTEMS CAN BE UNPREDICTABLE, MAY GENERATE INCORRECT, INSECURE, OR DESTRUCTIVE CODE, AND MAY TAKE ACTIONS THAT COULD DAMAGE YOUR SYSTEM, FILES, OR HARDWARE.
|
||||
|
||||
This software is provided "as is", without warranty of any kind, express or implied. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, including but not limited to hardware damage, data loss, financial loss, or business interruption, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the software or the use or other dealings in the software.
|
||||
|
||||
## Acknowledgment
|
||||
|
||||
|
||||
154
Dockerfile
Normal file
154
Dockerfile
Normal file
@@ -0,0 +1,154 @@
|
||||
# Automaker Multi-Stage Dockerfile
|
||||
# Single Dockerfile for both server and UI builds
|
||||
# Usage:
|
||||
# docker build --target server -t automaker-server .
|
||||
# docker build --target ui -t automaker-ui .
|
||||
# Or use docker-compose which selects targets automatically
|
||||
|
||||
# =============================================================================
|
||||
# BASE STAGE - Common setup for all builds (DRY: defined once, used by all)
|
||||
# =============================================================================
|
||||
FROM node:22-alpine AS base
|
||||
|
||||
# Install build dependencies for native modules (node-pty)
|
||||
RUN apk add --no-cache python3 make g++
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy root package files
|
||||
COPY package*.json ./
|
||||
|
||||
# Copy all libs package.json files (centralized - add new libs here)
|
||||
COPY libs/types/package*.json ./libs/types/
|
||||
COPY libs/utils/package*.json ./libs/utils/
|
||||
COPY libs/prompts/package*.json ./libs/prompts/
|
||||
COPY libs/platform/package*.json ./libs/platform/
|
||||
COPY libs/model-resolver/package*.json ./libs/model-resolver/
|
||||
COPY libs/dependency-resolver/package*.json ./libs/dependency-resolver/
|
||||
COPY libs/git-utils/package*.json ./libs/git-utils/
|
||||
|
||||
# Copy scripts (needed by npm workspace)
|
||||
COPY scripts ./scripts
|
||||
|
||||
# =============================================================================
|
||||
# SERVER BUILD STAGE
|
||||
# =============================================================================
|
||||
FROM base AS server-builder
|
||||
|
||||
# Copy server-specific package.json
|
||||
COPY apps/server/package*.json ./apps/server/
|
||||
|
||||
# Install dependencies (--ignore-scripts to skip husky/prepare, then rebuild native modules)
|
||||
RUN npm ci --ignore-scripts && npm rebuild node-pty
|
||||
|
||||
# Copy all source files
|
||||
COPY libs ./libs
|
||||
COPY apps/server ./apps/server
|
||||
|
||||
# Build packages in dependency order, then build server
|
||||
RUN npm run build:packages && npm run build --workspace=apps/server
|
||||
|
||||
# =============================================================================
|
||||
# SERVER PRODUCTION STAGE
|
||||
# =============================================================================
|
||||
FROM node:22-alpine AS server
|
||||
|
||||
# Install git, curl, bash (for terminal), and GitHub CLI (pinned version, multi-arch)
|
||||
RUN apk add --no-cache git curl bash && \
|
||||
GH_VERSION="2.63.2" && \
|
||||
ARCH=$(uname -m) && \
|
||||
case "$ARCH" in \
|
||||
x86_64) GH_ARCH="amd64" ;; \
|
||||
aarch64|arm64) GH_ARCH="arm64" ;; \
|
||||
*) echo "Unsupported architecture: $ARCH" && exit 1 ;; \
|
||||
esac && \
|
||||
curl -L "https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${GH_ARCH}.tar.gz" -o gh.tar.gz && \
|
||||
tar -xzf gh.tar.gz && \
|
||||
mv gh_${GH_VERSION}_linux_${GH_ARCH}/bin/gh /usr/local/bin/gh && \
|
||||
rm -rf gh.tar.gz gh_${GH_VERSION}_linux_${GH_ARCH}
|
||||
|
||||
# Install Claude CLI globally
|
||||
RUN npm install -g @anthropic-ai/claude-code
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Create non-root user
|
||||
RUN addgroup -g 1001 -S automaker && \
|
||||
adduser -S automaker -u 1001
|
||||
|
||||
# Copy root package.json (needed for workspace resolution)
|
||||
COPY --from=server-builder /app/package*.json ./
|
||||
|
||||
# Copy built libs (workspace packages are symlinked in node_modules)
|
||||
COPY --from=server-builder /app/libs ./libs
|
||||
|
||||
# Copy built server
|
||||
COPY --from=server-builder /app/apps/server/dist ./apps/server/dist
|
||||
COPY --from=server-builder /app/apps/server/package*.json ./apps/server/
|
||||
|
||||
# Copy node_modules (includes symlinks to libs)
|
||||
COPY --from=server-builder /app/node_modules ./node_modules
|
||||
|
||||
# Create data and projects directories
|
||||
RUN mkdir -p /data /projects && chown automaker:automaker /data /projects
|
||||
|
||||
# Configure git for mounted volumes and authentication
|
||||
# Use --system so it's not overwritten by mounted user .gitconfig
|
||||
RUN git config --system --add safe.directory '*' && \
|
||||
# Use gh as credential helper (works with GH_TOKEN env var)
|
||||
git config --system credential.helper '!gh auth git-credential'
|
||||
|
||||
# Switch to non-root user
|
||||
USER automaker
|
||||
|
||||
# Environment variables
|
||||
ENV PORT=3008
|
||||
ENV DATA_DIR=/data
|
||||
|
||||
# Expose port
|
||||
EXPOSE 3008
|
||||
|
||||
# Health check (using curl since it's already installed, more reliable than busybox wget)
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://localhost:3008/api/health || exit 1
|
||||
|
||||
# Start server
|
||||
CMD ["node", "apps/server/dist/index.js"]
|
||||
|
||||
# =============================================================================
|
||||
# UI BUILD STAGE
|
||||
# =============================================================================
|
||||
FROM base AS ui-builder
|
||||
|
||||
# Copy UI-specific package.json
|
||||
COPY apps/ui/package*.json ./apps/ui/
|
||||
|
||||
# Install dependencies (--ignore-scripts to skip husky and build:packages in prepare script)
|
||||
RUN npm ci --ignore-scripts
|
||||
|
||||
# Copy all source files
|
||||
COPY libs ./libs
|
||||
COPY apps/ui ./apps/ui
|
||||
|
||||
# Build packages in dependency order, then build UI
|
||||
# VITE_SERVER_URL tells the UI where to find the API server
|
||||
# Use ARG to allow overriding at build time: --build-arg VITE_SERVER_URL=http://api.example.com
|
||||
ARG VITE_SERVER_URL=http://localhost:3008
|
||||
ENV VITE_SKIP_ELECTRON=true
|
||||
ENV VITE_SERVER_URL=${VITE_SERVER_URL}
|
||||
RUN npm run build:packages && npm run build --workspace=apps/ui
|
||||
|
||||
# =============================================================================
|
||||
# UI PRODUCTION STAGE
|
||||
# =============================================================================
|
||||
FROM nginx:alpine AS ui
|
||||
|
||||
# Copy built files
|
||||
COPY --from=ui-builder /app/apps/ui/dist /usr/share/nginx/html
|
||||
|
||||
# Copy nginx config for SPA routing
|
||||
COPY apps/ui/nginx.conf /etc/nginx/conf.d/default.conf
|
||||
|
||||
EXPOSE 80
|
||||
|
||||
CMD ["nginx", "-g", "daemon off;"]
|
||||
154
LICENSE
154
LICENSE
@@ -1,21 +1,141 @@
|
||||
MIT License
|
||||
AUTOMAKER LICENSE AGREEMENT
|
||||
|
||||
Copyright (c) 2025 Cody Seibert
|
||||
This License Agreement ("Agreement") is entered into between you ("Licensee") and the copyright holders of Automaker ("Licensor"). By using, copying, modifying, downloading, cloning, or distributing the Software (as defined below), you agree to be bound by the terms of this Agreement.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
1. DEFINITIONS
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
"Software" means the Automaker software, including all source code, object code, documentation, and related materials.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
"Generated Files" means files created by the Software during normal operation to store internal state, configuration, or working data, including but not limited to app_spec.txt, feature.json, and similar files generated by the Software. Generated Files are not considered part of the Software for the purposes of this license and are not subject to the restrictions herein.
|
||||
|
||||
"Derivative Work" means any work that is based on, derived from, or incorporates the Software or any substantial portion of it, including but not limited to modifications, forks, adaptations, translations, or any altered version of the Software.
|
||||
|
||||
"Monetization" means any activity that generates revenue, income, or commercial benefit from the Software itself or any Derivative Work, including but not limited to:
|
||||
|
||||
- Reselling, redistributing, or sublicensing the Software, any Derivative Work, or any substantial portion thereof
|
||||
- Including the Software, any Derivative Work, or substantial portions thereof in a product or service that you sell or distribute
|
||||
- Offering the Software, any Derivative Work, or substantial portions thereof as a standalone product or service for sale
|
||||
- Hosting the Software or any Derivative Work as a service (whether free or paid) for use by others, including cloud hosting, Software-as-a-Service (SaaS), or any other form of hosted access for third parties
|
||||
- Extracting, reselling, redistributing, or sublicensing any prompts, context, or other instructional content bundled within the Software
|
||||
- Creating, distributing, or selling modified versions, forks, or Derivative Works of the Software
|
||||
|
||||
Monetization does NOT include:
|
||||
|
||||
- Using the Software internally within your organization, regardless of whether your organization is for-profit
|
||||
- Using the Software to build products or services that generate revenue, as long as you are not reselling or redistributing the Software itself
|
||||
- Using the Software to provide services for which fees are charged, as long as the Software itself is not being resold or redistributed
|
||||
- Hosting the Software anywhere for personal use by a single developer, as long as the Software is not made accessible to others
|
||||
|
||||
"Core Contributors" means the following individuals who are granted perpetual, royalty-free licenses:
|
||||
|
||||
- Cody Seibert (webdevcody)
|
||||
- SuperComboGamer (SCG)
|
||||
- Kacper Lachowicz (Shironex, Shirone)
|
||||
- Ben Scott (trueheads)
|
||||
|
||||
2. GRANT OF LICENSE
|
||||
|
||||
Subject to the terms and conditions of this Agreement, Licensor hereby grants to Licensee a non-exclusive, non-transferable license to use, copy, modify, and distribute the Software, provided that:
|
||||
|
||||
a) Licensee may freely clone, install, and use the Software locally or within an organization for the purpose of building, developing, and maintaining other products, software, or services. There are no restrictions on the products you build _using_ the Software.
|
||||
|
||||
b) Licensee may run the Software on personal or organizational infrastructure for internal use.
|
||||
|
||||
c) Core Contributors are each individually granted a perpetual, worldwide, royalty-free, non-exclusive license to use, copy, modify, distribute, and sublicense the Software for any purpose, including Monetization, without payment of any fees or royalties. Each Core Contributor may exercise these rights independently and does not require permission, consent, or approval from any other Core Contributor to Monetize the Software in any way they see fit.
|
||||
|
||||
d) Commercial licenses for the Software may be discussed and issued to external parties or companies seeking to use the Software for financial gain or Monetization purposes. Core Contributors already have full rights under section 2(c) and do not require commercial licenses. Any commercial license issued to external parties shall require a unanimous vote by all Core Contributors and shall be granted in writing and signed by all Core Contributors.
|
||||
|
||||
e) The list of individuals defined as "Core Contributors" in Section 1 shall be amended to reflect any revocation or reinstatement of status made under this section.
|
||||
|
||||
3. RESTRICTIONS
|
||||
|
||||
Licensee may NOT:
|
||||
|
||||
- Engage in any Monetization of the Software or any Derivative Work without explicit written permission from all Core Contributors
|
||||
- Resell, redistribute, or sublicense the Software, any Derivative Work, or any substantial portion thereof
|
||||
- Create, distribute, or sell modified versions, forks, or Derivative Works of the Software for any commercial purpose
|
||||
- Include the Software, any Derivative Work, or substantial portions thereof in a product or service that you sell or distribute
|
||||
- Offer the Software, any Derivative Work, or substantial portions thereof as a standalone product or service for sale
|
||||
- Extract, resell, redistribute, or sublicense any prompts, context, or other instructional content bundled within the Software
|
||||
- Host the Software or any Derivative Work as a service (whether free or paid) for use by others (except Core Contributors)
|
||||
- Remove or alter any copyright notices or license terms
|
||||
- Use the Software in any manner that violates applicable laws or regulations
|
||||
|
||||
Licensee MAY:
|
||||
|
||||
- Use the Software internally within their organization (commercial or non-profit)
|
||||
- Use the Software to build other commercial products (products that do NOT contain the Software or Derivative Works)
|
||||
- Modify the Software for internal use within their organization (commercial or non-profit)
|
||||
|
||||
4. CORE CONTRIBUTOR STATUS MANAGEMENT
|
||||
|
||||
a) Core Contributor status may be revoked indefinitely by the remaining Core Contributors if:
|
||||
|
||||
- A Core Contributor cannot be reached for a period of one (1) month through reasonable means of communication (including but not limited to email, Discord, GitHub, or other project communication channels)
|
||||
- AND the Core Contributor has not contributed to the project during that one-month period. For purposes of this section, "contributed" means at least one of the following activities:
|
||||
- Discussing the Software through project communication channels
|
||||
- Committing code changes to the project repository
|
||||
- Submitting bug fixes or patches
|
||||
- Participating in project-related discussions or decision-making
|
||||
|
||||
b) Revocation of Core Contributor status requires a unanimous vote by all other Core Contributors (excluding the Core Contributor whose status is being considered for revocation).
|
||||
|
||||
c) Upon revocation of Core Contributor status, the individual shall no longer be considered a Core Contributor and shall lose the rights granted under section 2(c) of this Agreement. However, any Contributions made prior to revocation shall remain subject to the terms of section 5 (CONTRIBUTIONS AND RIGHTS ASSIGNMENT).
|
||||
|
||||
d) A revoked Core Contributor may be reinstated to Core Contributor status with a unanimous vote by all current Core Contributors. Upon reinstatement, the individual shall regain all rights granted under section 2(c) of this Agreement.
|
||||
|
||||
5. CONTRIBUTIONS AND RIGHTS ASSIGNMENT
|
||||
|
||||
By submitting, pushing, or contributing any code, documentation, pull requests, issues, or other materials ("Contributions") to the Automaker project, you agree to the following terms without reservation:
|
||||
|
||||
a) **Full Ownership Transfer & Rights Grant:** You hereby assign to the Core Contributors all right, title, and interest in and to your Contributions, including all copyrights, patents, and other intellectual property rights. If such assignment is not effective under applicable law, you grant the Core Contributors an unrestricted, perpetual, worldwide, non-exclusive, royalty-free, fully paid-up, irrevocable, sublicensable, and transferable license to use, reproduce, modify, adapt, publish, translate, create derivative works from, distribute, perform, display, and otherwise exploit your Contributions in any manner they see fit, including for any commercial purpose or Monetization.
|
||||
|
||||
b) **No Take-Backs:** You understand and agree that this grant of rights is irrevocable ("no take-backs"). You cannot revoke, rescind, or terminate this grant of rights once your Contribution has been submitted.
|
||||
|
||||
c) **Waiver of Moral Rights:** You waive any "moral rights" or other rights with respect to attribution of authorship or integrity of materials regarding your Contributions that you may have under any applicable law.
|
||||
|
||||
d) **Right to Contribute:** You represent and warrant that you are the original author of the Contributions, or that you have sufficient rights to grant the rights conveyed by this section, and that your Contributions do not infringe upon the rights of any third party.
|
||||
|
||||
6. TERMINATION
|
||||
|
||||
This license will terminate automatically if Licensee breaches any term of this Agreement. Upon termination, Licensee must immediately cease all use of the Software and destroy all copies in their possession.
|
||||
|
||||
7. HIGH RISK DISCLAIMER AND LIMITATION OF LIABILITY
|
||||
|
||||
a) **AI RISKS:** THE SOFTWARE UTILIZES ARTIFICIAL INTELLIGENCE TO GENERATE CODE, EXECUTE COMMANDS, AND INTERACT WITH YOUR FILE SYSTEM. YOU ACKNOWLEDGE THAT AI SYSTEMS CAN BE UNPREDICTABLE, MAY GENERATE INCORRECT, INSECURE, OR DESTRUCTIVE CODE, AND MAY TAKE ACTIONS THAT COULD DAMAGE YOUR SYSTEM, FILES, OR HARDWARE.
|
||||
|
||||
b) **USE AT YOUR OWN RISK:** YOU AGREE THAT YOUR USE OF THE SOFTWARE IS SOLELY AT YOUR OWN RISK. THE CORE CONTRIBUTORS AND LICENSOR DO NOT GUARANTEE THAT THE SOFTWARE OR ANY CODE GENERATED BY IT WILL BE SAFE, BUG-FREE, OR FUNCTIONAL.
|
||||
|
||||
c) **NO WARRANTY:** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT.
|
||||
|
||||
d) **LIMITATION OF LIABILITY:** IN NO EVENT SHALL THE CORE CONTRIBUTORS, LICENSORS, OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE, INCLUDING BUT NOT LIMITED TO:
|
||||
|
||||
- DAMAGE TO HARDWARE OR COMPUTER SYSTEMS
|
||||
- DATA LOSS OR CORRUPTION
|
||||
- GENERATION OF BAD, VULNERABLE, OR MALICIOUS CODE
|
||||
- FINANCIAL LOSSES
|
||||
- BUSINESS INTERRUPTION
|
||||
|
||||
8. LICENSE AMENDMENTS
|
||||
|
||||
Any amendment, modification, or update to this License Agreement must be agreed upon unanimously by all Core Contributors. No changes to this Agreement shall be effective unless all Core Contributors have provided their written consent or approval through a unanimous vote.
|
||||
|
||||
9. CONTACT
|
||||
|
||||
For inquiries regarding this license or permissions for Monetization, please contact the Core Contributors through the official project channels:
|
||||
|
||||
- Agentic Jumpstart Discord: https://discord.gg/JUDWZDN3VT
|
||||
- Website: https://automaker.app
|
||||
- Email: automakerapp@gmail.com
|
||||
|
||||
Any permission for Monetization requires the unanimous written consent of all Core Contributors.
|
||||
|
||||
10. GOVERNING LAW
|
||||
|
||||
This Agreement shall be governed by and construed in accordance with the laws of the State of Tennessee, USA, without regard to conflict of law principles.
|
||||
|
||||
By using the Software, you acknowledge that you have read this Agreement, understand it, and agree to be bound by its terms and conditions.
|
||||
|
||||
---
|
||||
|
||||
Copyright (c) 2025 Automaker Core Contributors
|
||||
|
||||
724
README.md
724
README.md
@@ -1,6 +1,612 @@
|
||||
<p align="center">
|
||||
<img src="apps/ui/public/readme_logo.svg" alt="Automaker Logo" height="80" />
|
||||
</p>
|
||||
|
||||
> **[!TIP]**
|
||||
>
|
||||
> **Learn more about Agentic Coding!**
|
||||
>
|
||||
> Automaker itself was built by a group of engineers using AI and agentic coding techniques to build features faster than ever. By leveraging tools like Cursor IDE and Claude Code CLI, the team orchestrated AI agents to implement complex functionality in days instead of weeks.
|
||||
>
|
||||
> **Learn how:** Master these same techniques and workflows in the [Agentic Jumpstart course](https://agenticjumpstart.com/?utm=automaker-gh).
|
||||
|
||||
# Automaker
|
||||
|
||||
Automaker is an autonomous AI development studio that helps you build software faster using AI-powered agents. It provides a visual Kanban board interface to manage features, automatically assigns AI agents to implement them, and tracks progress through an intuitive workflow from backlog to verified completion.
|
||||
**Stop typing code. Start directing AI agents.**
|
||||
|
||||
<details open>
|
||||
<summary><h2>Table of Contents</h2></summary>
|
||||
|
||||
- [What Makes Automaker Different?](#what-makes-automaker-different)
|
||||
- [The Workflow](#the-workflow)
|
||||
- [Powered by Claude Agent SDK](#powered-by-claude-agent-sdk)
|
||||
- [Why This Matters](#why-this-matters)
|
||||
- [Security Disclaimer](#security-disclaimer)
|
||||
- [Community & Support](#community--support)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Quick Start](#quick-start)
|
||||
- [How to Run](#how-to-run)
|
||||
- [Development Mode](#development-mode)
|
||||
- [Building for Production](#building-for-production)
|
||||
- [Testing](#testing)
|
||||
- [Linting](#linting)
|
||||
- [Environment Configuration](#environment-configuration)
|
||||
- [Authentication Setup](#authentication-setup)
|
||||
- [Features](#features)
|
||||
- [Core Workflow](#core-workflow)
|
||||
- [AI & Planning](#ai--planning)
|
||||
- [Project Management](#project-management)
|
||||
- [Collaboration & Review](#collaboration--review)
|
||||
- [Developer Tools](#developer-tools)
|
||||
- [Advanced Features](#advanced-features)
|
||||
- [Tech Stack](#tech-stack)
|
||||
- [Frontend](#frontend)
|
||||
- [Backend](#backend)
|
||||
- [Testing & Quality](#testing--quality)
|
||||
- [Shared Libraries](#shared-libraries)
|
||||
- [Available Views](#available-views)
|
||||
- [Architecture](#architecture)
|
||||
- [Monorepo Structure](#monorepo-structure)
|
||||
- [How It Works](#how-it-works)
|
||||
- [Key Architectural Patterns](#key-architectural-patterns)
|
||||
- [Security & Isolation](#security--isolation)
|
||||
- [Data Storage](#data-storage)
|
||||
- [Learn More](#learn-more)
|
||||
- [License](#license)
|
||||
|
||||
</details>
|
||||
|
||||
Automaker is an autonomous AI development studio that transforms how you build software. Instead of manually writing every line of code, you describe features on a Kanban board and watch as AI agents powered by Claude Agent SDK automatically implement them. Built with React, Vite, Electron, and Express, Automaker provides a complete workflow for managing AI agents through a desktop application (or web browser), with features like real-time streaming, git worktree isolation, plan approval, and multi-agent task execution.
|
||||
|
||||

|
||||
|
||||
## What Makes Automaker Different?
|
||||
|
||||
Traditional development tools help you write code. Automaker helps you **orchestrate AI agents** to build entire features autonomously. Think of it as having a team of AI developers working for you—you define what needs to be built, and Automaker handles the implementation.
|
||||
|
||||
### The Workflow
|
||||
|
||||
1. **Add Features** - Describe features you want built (with text, images, or screenshots)
|
||||
2. **Move to "In Progress"** - Automaker automatically assigns an AI agent to implement the feature
|
||||
3. **Watch It Build** - See real-time progress as the agent writes code, runs tests, and makes changes
|
||||
4. **Review & Verify** - Review the changes, run tests, and approve when ready
|
||||
5. **Ship Faster** - Build entire applications in days, not weeks
|
||||
|
||||
### Powered by Claude Agent SDK
|
||||
|
||||
Automaker leverages the [Claude Agent SDK](https://www.npmjs.com/package/@anthropic-ai/claude-agent-sdk) to give AI agents full access to your codebase. Agents can read files, write code, execute commands, run tests, and make git commits—all while working in isolated git worktrees to keep your main branch safe. The SDK provides autonomous AI agents that can use tools, make decisions, and complete complex multi-step tasks without constant human intervention.
|
||||
|
||||
### Why This Matters
|
||||
|
||||
The future of software development is **agentic coding**—where developers become architects directing AI agents rather than manual coders. Automaker puts this future in your hands today, letting you experience what it's like to build software 10x faster with AI agents handling the implementation while you focus on architecture and business logic.
|
||||
|
||||
## Community & Support
|
||||
|
||||
Join the **Agentic Jumpstart** to connect with other builders exploring **agentic coding** and autonomous development workflows.
|
||||
|
||||
In the Discord, you can:
|
||||
|
||||
- 💬 Discuss agentic coding patterns and best practices
|
||||
- 🧠 Share ideas for AI-driven development workflows
|
||||
- 🛠️ Get help setting up or extending Automaker
|
||||
- 🚀 Show off projects built with AI agents
|
||||
- 🤝 Collaborate with other developers and contributors
|
||||
|
||||
👉 **Join the Discord:** [Agentic Jumpstart Discord](https://discord.gg/jjem7aEDKU)
|
||||
|
||||
---
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- **Node.js 18+** (tested with Node.js 22)
|
||||
- **npm** (comes with Node.js)
|
||||
- **Authentication** (choose one):
|
||||
- **[Claude Code CLI](https://code.claude.com/docs/en/overview)** (recommended) - Install and authenticate, credentials used automatically
|
||||
- **Anthropic API Key** - Direct API key for Claude Agent SDK ([get one here](https://console.anthropic.com/))
|
||||
|
||||
### Quick Start
|
||||
|
||||
```bash
|
||||
# 1. Clone the repository
|
||||
git clone https://github.com/AutoMaker-Org/automaker.git
|
||||
cd automaker
|
||||
|
||||
# 2. Install dependencies
|
||||
npm install
|
||||
|
||||
# 3. Build shared packages (Now can be skipped npm install / run dev does it automaticly)
|
||||
npm run build:packages
|
||||
|
||||
# 4. Set up authentication (skip if using Claude Code CLI)
|
||||
# If using Claude Code CLI: credentials are detected automatically
|
||||
# If using API key directly, choose one method:
|
||||
|
||||
# Option A: Environment variable
|
||||
export ANTHROPIC_API_KEY="sk-ant-..."
|
||||
|
||||
# Option B: Create .env file in project root
|
||||
echo "ANTHROPIC_API_KEY=sk-ant-..." > .env
|
||||
|
||||
# 5. Start Automaker (interactive launcher)
|
||||
npm run dev
|
||||
# Choose between:
|
||||
# 1. Web Application (browser at localhost:3007)
|
||||
# 2. Desktop Application (Electron - recommended)
|
||||
```
|
||||
|
||||
**Note:** The `npm run dev` command will:
|
||||
|
||||
- Check for dependencies and install if needed
|
||||
- Install Playwright browsers for E2E tests
|
||||
- Kill any processes on ports 3007/3008
|
||||
- Present an interactive menu to choose your run mode
|
||||
|
||||
## How to Run
|
||||
|
||||
### Development Mode
|
||||
|
||||
Start Automaker in development mode:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
|
||||
This will prompt you to choose your run mode, or you can specify a mode directly:
|
||||
|
||||
#### Electron Desktop App (Recommended)
|
||||
|
||||
```bash
|
||||
# Standard development mode
|
||||
npm run dev:electron
|
||||
|
||||
# With DevTools open automatically
|
||||
npm run dev:electron:debug
|
||||
|
||||
# For WSL (Windows Subsystem for Linux)
|
||||
npm run dev:electron:wsl
|
||||
|
||||
# For WSL with GPU acceleration
|
||||
npm run dev:electron:wsl:gpu
|
||||
```
|
||||
|
||||
#### Web Browser Mode
|
||||
|
||||
```bash
|
||||
# Run in web browser (http://localhost:3007)
|
||||
npm run dev:web
|
||||
```
|
||||
|
||||
### Building for Production
|
||||
|
||||
#### Web Application
|
||||
|
||||
```bash
|
||||
# Build for web deployment (uses Vite)
|
||||
npm run build
|
||||
|
||||
# Run production build
|
||||
npm run start
|
||||
```
|
||||
|
||||
#### Desktop Application
|
||||
|
||||
```bash
|
||||
# Build for current platform (macOS/Windows/Linux)
|
||||
npm run build:electron
|
||||
|
||||
# Platform-specific builds
|
||||
npm run build:electron:mac # macOS (DMG + ZIP, x64 + arm64)
|
||||
npm run build:electron:win # Windows (NSIS installer, x64)
|
||||
npm run build:electron:linux # Linux (AppImage + DEB, x64)
|
||||
|
||||
# Output directory: apps/ui/release/
|
||||
```
|
||||
|
||||
#### Docker Deployment
|
||||
|
||||
Docker provides the most secure way to run Automaker by isolating it from your host filesystem.
|
||||
|
||||
```bash
|
||||
# Build and run with Docker Compose
|
||||
docker-compose up -d
|
||||
|
||||
# Access UI at http://localhost:3007
|
||||
# API at http://localhost:3008
|
||||
|
||||
# View logs
|
||||
docker-compose logs -f
|
||||
|
||||
# Stop containers
|
||||
docker-compose down
|
||||
```
|
||||
|
||||
##### Configuration
|
||||
|
||||
Create a `.env` file in the project root if using API key authentication:
|
||||
|
||||
```bash
|
||||
# Optional: Anthropic API key (not needed if using Claude CLI authentication)
|
||||
ANTHROPIC_API_KEY=sk-ant-...
|
||||
```
|
||||
|
||||
**Note:** Most users authenticate via Claude CLI instead of API keys. See [Claude CLI Authentication](#claude-cli-authentication-optional) below.
|
||||
|
||||
##### Working with Projects (Host Directory Access)
|
||||
|
||||
By default, the container is isolated from your host filesystem. To work on projects from your host machine, create a `docker-compose.override.yml` file (gitignored):
|
||||
|
||||
```yaml
|
||||
services:
|
||||
server:
|
||||
volumes:
|
||||
# Mount your project directories
|
||||
- /path/to/your/project:/projects/your-project
|
||||
```
|
||||
|
||||
##### Claude CLI Authentication (Optional)
|
||||
|
||||
To use Claude Code CLI authentication instead of an API key, mount your Claude CLI config directory:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
server:
|
||||
volumes:
|
||||
# Linux/macOS
|
||||
- ~/.claude:/home/automaker/.claude
|
||||
# Windows
|
||||
- C:/Users/YourName/.claude:/home/automaker/.claude
|
||||
```
|
||||
|
||||
**Note:** The Claude CLI config must be writable (do not use `:ro` flag) as the CLI writes debug files.
|
||||
|
||||
##### GitHub CLI Authentication (For Git Push/PR Operations)
|
||||
|
||||
To enable git push and GitHub CLI operations inside the container:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
server:
|
||||
volumes:
|
||||
# Mount GitHub CLI config
|
||||
# Linux/macOS
|
||||
- ~/.config/gh:/home/automaker/.config/gh
|
||||
# Windows
|
||||
- 'C:/Users/YourName/AppData/Roaming/GitHub CLI:/home/automaker/.config/gh'
|
||||
|
||||
# Mount git config for user identity (name, email)
|
||||
- ~/.gitconfig:/home/automaker/.gitconfig:ro
|
||||
environment:
|
||||
# GitHub token (required on Windows where tokens are in Credential Manager)
|
||||
# Get your token with: gh auth token
|
||||
- GH_TOKEN=${GH_TOKEN}
|
||||
```
|
||||
|
||||
Then add `GH_TOKEN` to your `.env` file:
|
||||
|
||||
```bash
|
||||
GH_TOKEN=gho_your_github_token_here
|
||||
```
|
||||
|
||||
##### Complete docker-compose.override.yml Example
|
||||
|
||||
```yaml
|
||||
services:
|
||||
server:
|
||||
volumes:
|
||||
# Your projects
|
||||
- /path/to/project1:/projects/project1
|
||||
- /path/to/project2:/projects/project2
|
||||
|
||||
# Authentication configs
|
||||
- ~/.claude:/home/automaker/.claude
|
||||
- ~/.config/gh:/home/automaker/.config/gh
|
||||
- ~/.gitconfig:/home/automaker/.gitconfig:ro
|
||||
environment:
|
||||
- GH_TOKEN=${GH_TOKEN}
|
||||
```
|
||||
|
||||
##### Architecture Support
|
||||
|
||||
The Docker image supports both AMD64 and ARM64 architectures. The GitHub CLI and Claude CLI are automatically downloaded for the correct architecture during build.
|
||||
|
||||
### Testing
|
||||
|
||||
#### End-to-End Tests (Playwright)
|
||||
|
||||
```bash
|
||||
npm run test # Headless E2E tests
|
||||
npm run test:headed # Browser visible E2E tests
|
||||
```
|
||||
|
||||
#### Unit Tests (Vitest)
|
||||
|
||||
```bash
|
||||
npm run test:server # Server unit tests
|
||||
npm run test:server:coverage # Server tests with coverage
|
||||
npm run test:packages # All shared package tests
|
||||
npm run test:all # Packages + server tests
|
||||
```
|
||||
|
||||
#### Test Configuration
|
||||
|
||||
- E2E tests run on ports 3007 (UI) and 3008 (server)
|
||||
- Automatically starts test servers before running
|
||||
- Uses Chromium browser via Playwright
|
||||
- Mock agent mode available in CI with `AUTOMAKER_MOCK_AGENT=true`
|
||||
|
||||
### Linting
|
||||
|
||||
```bash
|
||||
# Run ESLint
|
||||
npm run lint
|
||||
```
|
||||
|
||||
### Environment Configuration
|
||||
|
||||
#### Authentication (if not using Claude Code CLI)
|
||||
|
||||
- `ANTHROPIC_API_KEY` - Your Anthropic API key for Claude Agent SDK (not needed if using Claude Code CLI)
|
||||
|
||||
#### Optional - Server
|
||||
|
||||
- `PORT` - Server port (default: 3008)
|
||||
- `DATA_DIR` - Data storage directory (default: ./data)
|
||||
- `ENABLE_REQUEST_LOGGING` - HTTP request logging (default: true)
|
||||
|
||||
#### Optional - Security
|
||||
|
||||
- `AUTOMAKER_API_KEY` - Optional API authentication for the server
|
||||
- `ALLOWED_ROOT_DIRECTORY` - Restrict file operations to specific directory
|
||||
- `CORS_ORIGIN` - CORS policy (default: \*)
|
||||
|
||||
#### Optional - Development
|
||||
|
||||
- `VITE_SKIP_ELECTRON` - Skip Electron in dev mode
|
||||
- `OPEN_DEVTOOLS` - Auto-open DevTools in Electron
|
||||
|
||||
### Authentication Setup
|
||||
|
||||
#### Option 1: Claude Code CLI (Recommended)
|
||||
|
||||
Install and authenticate the Claude Code CLI following the [official quickstart guide](https://code.claude.com/docs/en/quickstart).
|
||||
|
||||
Once authenticated, Automaker will automatically detect and use your CLI credentials. No additional configuration needed!
|
||||
|
||||
#### Option 2: Direct API Key
|
||||
|
||||
If you prefer not to use the CLI, you can provide an Anthropic API key directly using one of these methods:
|
||||
|
||||
##### 2a. Shell Configuration
|
||||
|
||||
Add to your `~/.bashrc` or `~/.zshrc`:
|
||||
|
||||
```bash
|
||||
export ANTHROPIC_API_KEY="sk-ant-..."
|
||||
```
|
||||
|
||||
Then restart your terminal or run `source ~/.bashrc` (or `source ~/.zshrc`).
|
||||
|
||||
##### 2b. .env File
|
||||
|
||||
Create a `.env` file in the project root (gitignored):
|
||||
|
||||
```bash
|
||||
ANTHROPIC_API_KEY=sk-ant-...
|
||||
PORT=3008
|
||||
DATA_DIR=./data
|
||||
```
|
||||
|
||||
##### 2c. In-App Storage
|
||||
|
||||
The application can store your API key securely in the settings UI. The key is persisted in the `DATA_DIR` directory.
|
||||
|
||||
## Features
|
||||
|
||||
### Core Workflow
|
||||
|
||||
- 📋 **Kanban Board** - Visual drag-and-drop board to manage features through backlog, in progress, waiting approval, and verified stages
|
||||
- 🤖 **AI Agent Integration** - Automatic AI agent assignment to implement features when moved to "In Progress"
|
||||
- 🔀 **Git Worktree Isolation** - Each feature executes in isolated git worktrees to protect your main branch
|
||||
- 📡 **Real-time Streaming** - Watch AI agents work in real-time with live tool usage, progress updates, and task completion
|
||||
- 🔄 **Follow-up Instructions** - Send additional instructions to running agents without stopping them
|
||||
|
||||
### AI & Planning
|
||||
|
||||
- 🧠 **Multi-Model Support** - Choose from Claude Opus, Sonnet, and Haiku per feature
|
||||
- 💭 **Extended Thinking** - Enable thinking modes (none, medium, deep, ultra) for complex problem-solving
|
||||
- 📝 **Planning Modes** - Four planning levels: skip (direct implementation), lite (quick plan), spec (task breakdown), full (phased execution)
|
||||
- ✅ **Plan Approval** - Review and approve AI-generated plans before implementation begins
|
||||
- 📊 **Multi-Agent Task Execution** - Spec mode spawns dedicated agents per task for focused implementation
|
||||
|
||||
### Project Management
|
||||
|
||||
- 🔍 **Project Analysis** - AI-powered codebase analysis to understand your project structure
|
||||
- 💡 **Feature Suggestions** - AI-generated feature suggestions based on project analysis
|
||||
- 📁 **Context Management** - Add markdown, images, and documentation files that agents automatically reference
|
||||
- 🔗 **Dependency Blocking** - Features can depend on other features, enforcing execution order
|
||||
- 🌳 **Graph View** - Visualize feature dependencies with interactive graph visualization
|
||||
- 📋 **GitHub Integration** - Import issues, validate feasibility, and convert to tasks automatically
|
||||
|
||||
### Collaboration & Review
|
||||
|
||||
- 🧪 **Verification Workflow** - Features move to "Waiting Approval" for review and testing
|
||||
- 💬 **Agent Chat** - Interactive chat sessions with AI agents for exploratory work
|
||||
- 👤 **AI Profiles** - Create custom agent configurations with different prompts, models, and settings
|
||||
- 📜 **Session History** - Persistent chat sessions across restarts with full conversation history
|
||||
- 🔍 **Git Diff Viewer** - Review changes made by agents before approving
|
||||
|
||||
### Developer Tools
|
||||
|
||||
- 🖥️ **Integrated Terminal** - Full terminal access with tabs, splits, and persistent sessions
|
||||
- 🖼️ **Image Support** - Attach screenshots and diagrams to feature descriptions for visual context
|
||||
- ⚡ **Concurrent Execution** - Configure how many features can run simultaneously (default: 3)
|
||||
- ⌨️ **Keyboard Shortcuts** - Fully customizable shortcuts for navigation and actions
|
||||
- 🎨 **Theme System** - 25+ themes including Dark, Light, Dracula, Nord, Catppuccin, and more
|
||||
- 🖥️ **Cross-Platform** - Desktop app for macOS (x64, arm64), Windows (x64), and Linux (x64)
|
||||
- 🌐 **Web Mode** - Run in browser or as Electron desktop app
|
||||
|
||||
### Advanced Features
|
||||
|
||||
- 🔐 **Docker Isolation** - Security-focused Docker deployment with no host filesystem access
|
||||
- 🎯 **Worktree Management** - Create, switch, commit, and create PRs from worktrees
|
||||
- 📊 **Usage Tracking** - Monitor Claude API usage with detailed metrics
|
||||
- 🔊 **Audio Notifications** - Optional completion sounds (mutable in settings)
|
||||
- 💾 **Auto-save** - All work automatically persisted to `.automaker/` directory
|
||||
|
||||
## Tech Stack
|
||||
|
||||
### Frontend
|
||||
|
||||
- **React 19** - UI framework
|
||||
- **Vite 7** - Build tool and development server
|
||||
- **Electron 39** - Desktop application framework
|
||||
- **TypeScript 5.9** - Type safety
|
||||
- **TanStack Router** - File-based routing
|
||||
- **Zustand 5** - State management with persistence
|
||||
- **Tailwind CSS 4** - Utility-first styling with 25+ themes
|
||||
- **Radix UI** - Accessible component primitives
|
||||
- **dnd-kit** - Drag and drop for Kanban board
|
||||
- **@xyflow/react** - Graph visualization for dependencies
|
||||
- **xterm.js** - Integrated terminal emulator
|
||||
- **CodeMirror 6** - Code editor for XML/syntax highlighting
|
||||
- **Lucide Icons** - Icon library
|
||||
|
||||
### Backend
|
||||
|
||||
- **Node.js** - JavaScript runtime with ES modules
|
||||
- **Express 5** - HTTP server framework
|
||||
- **TypeScript 5.9** - Type safety
|
||||
- **Claude Agent SDK** - AI agent integration (@anthropic-ai/claude-agent-sdk)
|
||||
- **WebSocket (ws)** - Real-time event streaming
|
||||
- **node-pty** - PTY terminal sessions
|
||||
|
||||
### Testing & Quality
|
||||
|
||||
- **Playwright** - End-to-end testing
|
||||
- **Vitest** - Unit testing framework
|
||||
- **ESLint 9** - Code linting
|
||||
- **Prettier 3** - Code formatting
|
||||
- **Husky** - Git hooks for pre-commit formatting
|
||||
|
||||
### Shared Libraries
|
||||
|
||||
- **@automaker/types** - Shared TypeScript definitions
|
||||
- **@automaker/utils** - Logging, error handling, image processing
|
||||
- **@automaker/prompts** - AI prompt templates
|
||||
- **@automaker/platform** - Path management and security
|
||||
- **@automaker/model-resolver** - Claude model alias resolution
|
||||
- **@automaker/dependency-resolver** - Feature dependency ordering
|
||||
- **@automaker/git-utils** - Git operations and worktree management
|
||||
|
||||
## Available Views
|
||||
|
||||
Automaker provides several specialized views accessible via the sidebar or keyboard shortcuts:
|
||||
|
||||
| View | Shortcut | Description |
|
||||
| ------------------ | -------- | ------------------------------------------------------------------------------------------------ |
|
||||
| **Board** | `K` | Kanban board for managing feature workflow (Backlog → In Progress → Waiting Approval → Verified) |
|
||||
| **Agent** | `A` | Interactive chat sessions with AI agents for exploratory work and questions |
|
||||
| **Spec** | `D` | Project specification editor with AI-powered generation and feature suggestions |
|
||||
| **Context** | `C` | Manage context files (markdown, images) that AI agents automatically reference |
|
||||
| **Profiles** | `M` | Create and manage AI agent profiles with custom prompts and configurations |
|
||||
| **Settings** | `S` | Configure themes, shortcuts, defaults, authentication, and more |
|
||||
| **Terminal** | `T` | Integrated terminal with tabs, splits, and persistent sessions |
|
||||
| **GitHub Issues** | - | Import and validate GitHub issues, convert to tasks |
|
||||
| **Running Agents** | - | View all active agents across projects with status and progress |
|
||||
|
||||
### Keyboard Navigation
|
||||
|
||||
All shortcuts are customizable in Settings. Default shortcuts:
|
||||
|
||||
- **Navigation:** `K` (Board), `A` (Agent), `D` (Spec), `C` (Context), `S` (Settings), `M` (Profiles), `T` (Terminal)
|
||||
- **UI:** `` ` `` (Toggle sidebar)
|
||||
- **Actions:** `N` (New item in current view), `G` (Start next features), `O` (Open project), `P` (Project picker)
|
||||
- **Projects:** `Q`/`E` (Cycle previous/next project)
|
||||
|
||||
## Architecture
|
||||
|
||||
### Monorepo Structure
|
||||
|
||||
Automaker is built as an npm workspace monorepo with two main applications and seven shared packages:
|
||||
|
||||
```text
|
||||
automaker/
|
||||
├── apps/
|
||||
│ ├── ui/ # React + Vite + Electron frontend
|
||||
│ └── server/ # Express + WebSocket backend
|
||||
└── libs/ # Shared packages
|
||||
├── types/ # Core TypeScript definitions
|
||||
├── utils/ # Logging, errors, utilities
|
||||
├── prompts/ # AI prompt templates
|
||||
├── platform/ # Path management, security
|
||||
├── model-resolver/ # Claude model aliasing
|
||||
├── dependency-resolver/ # Feature dependency ordering
|
||||
└── git-utils/ # Git operations & worktree management
|
||||
```
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Feature Definition** - Users create feature cards on the Kanban board with descriptions, images, and configuration
|
||||
2. **Git Worktree Creation** - When a feature starts, a git worktree is created for isolated development
|
||||
3. **Agent Execution** - Claude Agent SDK executes in the worktree with full file system and command access
|
||||
4. **Real-time Streaming** - Agent output streams via WebSocket to the frontend for live monitoring
|
||||
5. **Plan Approval** (optional) - For spec/full planning modes, agents generate plans that require user approval
|
||||
6. **Multi-Agent Tasks** (spec mode) - Each task in the spec gets a dedicated agent for focused implementation
|
||||
7. **Verification** - Features move to "Waiting Approval" where changes can be reviewed via git diff
|
||||
8. **Integration** - After approval, changes can be committed and PRs created from the worktree
|
||||
|
||||
### Key Architectural Patterns
|
||||
|
||||
- **Event-Driven Architecture** - All server operations emit events that stream to the frontend
|
||||
- **Provider Pattern** - Extensible AI provider system (currently Claude, designed for future providers)
|
||||
- **Service-Oriented Backend** - Modular services for agent management, features, terminals, settings
|
||||
- **State Management** - Zustand with persistence for frontend state across restarts
|
||||
- **File-Based Storage** - No database; features stored as JSON files in `.automaker/` directory
|
||||
|
||||
### Security & Isolation
|
||||
|
||||
- **Git Worktrees** - Each feature executes in an isolated git worktree, protecting your main branch
|
||||
- **Path Sandboxing** - Optional `ALLOWED_ROOT_DIRECTORY` restricts file access
|
||||
- **Docker Isolation** - Recommended deployment uses Docker with no host filesystem access
|
||||
- **Plan Approval** - Optional plan review before implementation prevents unwanted changes
|
||||
|
||||
### Data Storage
|
||||
|
||||
Automaker uses a file-based storage system (no database required):
|
||||
|
||||
#### Per-Project Data
|
||||
|
||||
Stored in `{projectPath}/.automaker/`:
|
||||
|
||||
```text
|
||||
.automaker/
|
||||
├── features/ # Feature JSON files and images
|
||||
│ └── {featureId}/
|
||||
│ ├── feature.json # Feature metadata
|
||||
│ ├── agent-output.md # AI agent output log
|
||||
│ └── images/ # Attached images
|
||||
├── context/ # Context files for AI agents
|
||||
├── settings.json # Project-specific settings
|
||||
├── spec.md # Project specification
|
||||
├── analysis.json # Project structure analysis
|
||||
└── feature-suggestions.json # AI-generated suggestions
|
||||
```
|
||||
|
||||
#### Global Data
|
||||
|
||||
Stored in `DATA_DIR` (default `./data`):
|
||||
|
||||
```text
|
||||
data/
|
||||
├── settings.json # Global settings, profiles, shortcuts
|
||||
├── credentials.json # API keys (encrypted)
|
||||
├── sessions-metadata.json # Chat session metadata
|
||||
└── agent-sessions/ # Conversation histories
|
||||
└── {sessionId}.json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -14,95 +620,45 @@ Automaker is an autonomous AI development studio that helps you build software f
|
||||
>
|
||||
> **We do not recommend running Automaker directly on your local computer** due to the risk of AI agents having access to your entire file system. Please sandbox this application using Docker or a virtual machine.
|
||||
>
|
||||
> **[Read the full disclaimer](../DISCLAIMER.md)**
|
||||
> **[Read the full disclaimer](./DISCLAIMER.md)**
|
||||
|
||||
---
|
||||
|
||||
## Getting Started
|
||||
|
||||
**Step 1:** Clone this repository:
|
||||
|
||||
```bash
|
||||
git clone git@github.com:AutoMaker-Org/automaker.git
|
||||
cd automaker
|
||||
```
|
||||
|
||||
**Step 2:** Install dependencies:
|
||||
|
||||
```bash
|
||||
npm install
|
||||
```
|
||||
|
||||
**Step 3:** Run the Claude Code setup token command:
|
||||
|
||||
```bash
|
||||
claude setup-token
|
||||
```
|
||||
|
||||
> **⚠️ Warning:** This command will print your token to your terminal. Be careful if you're streaming or sharing your screen, as the token will be visible to anyone watching.
|
||||
|
||||
**Step 4:** Export the Claude Code OAuth token in your shell:
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_OAUTH_TOKEN="your-token-here"
|
||||
```
|
||||
|
||||
**Step 5:** Start the development server:
|
||||
|
||||
```bash
|
||||
npm run dev:electron
|
||||
```
|
||||
|
||||
This will start both the Next.js development server and the Electron application.
|
||||
|
||||
**Step 6:** MOST IMPORANT: Run the Following after all is setup
|
||||
|
||||
```bash
|
||||
echo "W"
|
||||
echo "W"
|
||||
echo "W"
|
||||
echo "W"
|
||||
echo "W"
|
||||
echo "W"
|
||||
echo "W"
|
||||
echo "W"
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- 📋 **Kanban Board** - Visual drag-and-drop board to manage features through backlog, in progress, waiting approval, and verified stages
|
||||
- 🤖 **AI Agent Integration** - Automatic AI agent assignment to implement features when moved to "In Progress"
|
||||
- 🧠 **Multi-Model Support** - Choose from multiple AI models including Claude Opus, Sonnet, and more
|
||||
- 💭 **Extended Thinking** - Enable extended thinking modes for complex problem-solving
|
||||
- 📡 **Real-time Agent Output** - View live agent output, logs, and file diffs as features are being implemented
|
||||
- 🔍 **Project Analysis** - AI-powered project structure analysis to understand your codebase
|
||||
- 📁 **Context Management** - Add context files to help AI agents understand your project better
|
||||
- 💡 **Feature Suggestions** - AI-generated feature suggestions based on your project
|
||||
- 🖼️ **Image Support** - Attach images and screenshots to feature descriptions
|
||||
- ⚡ **Concurrent Processing** - Configure concurrency to process multiple features simultaneously
|
||||
- 🧪 **Test Integration** - Automatic test running and verification for implemented features
|
||||
- 🔀 **Git Integration** - View git diffs and track changes made by AI agents
|
||||
- 👤 **AI Profiles** - Create and manage different AI agent profiles for various tasks
|
||||
- 💬 **Chat History** - Keep track of conversations and interactions with AI agents
|
||||
- ⌨️ **Keyboard Shortcuts** - Efficient navigation and actions via keyboard shortcuts
|
||||
- 🎨 **Dark/Light Theme** - Beautiful UI with theme support
|
||||
- 🖥️ **Cross-Platform** - Desktop application built with Electron for Windows, macOS, and Linux
|
||||
|
||||
## Tech Stack
|
||||
|
||||
- [Next.js](https://nextjs.org) - React framework
|
||||
- [Electron](https://www.electronjs.org/) - Desktop application framework
|
||||
- [Tailwind CSS](https://tailwindcss.com/) - Styling
|
||||
- [Zustand](https://zustand-demo.pmnd.rs/) - State management
|
||||
- [dnd-kit](https://dndkit.com/) - Drag and drop functionality
|
||||
|
||||
## Learn More
|
||||
|
||||
To learn more about Next.js, take a look at the following resources:
|
||||
### Documentation
|
||||
|
||||
- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API.
|
||||
- [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial.
|
||||
- [Contributing Guide](./CONTRIBUTING.md) - How to contribute to Automaker
|
||||
- [Project Documentation](./docs/) - Architecture guides, patterns, and developer docs
|
||||
- [Docker Isolation Guide](./docs/docker-isolation.md) - Security-focused Docker deployment
|
||||
- [Shared Packages Guide](./docs/llm-shared-packages.md) - Using monorepo packages
|
||||
|
||||
### Community
|
||||
|
||||
Join the **Agentic Jumpstart** Discord to connect with other builders exploring **agentic coding**:
|
||||
|
||||
👉 [Agentic Jumpstart Discord](https://discord.gg/jjem7aEDKU)
|
||||
|
||||
## License
|
||||
|
||||
See [LICENSE](../LICENSE) for details.
|
||||
This project is licensed under the **Automaker License Agreement**. See [LICENSE](LICENSE) for the full text.
|
||||
|
||||
**Summary of Terms:**
|
||||
|
||||
- **Allowed:**
|
||||
- **Build Anything:** You can clone and use Automaker locally or in your organization to build ANY product (commercial or free).
|
||||
- **Internal Use:** You can use it internally within your company (commercial or non-profit) without restriction.
|
||||
- **Modify:** You can modify the code for internal use within your organization (commercial or non-profit).
|
||||
|
||||
- **Restricted (The "No Monetization of the Tool" Rule):**
|
||||
- **No Resale:** You cannot resell Automaker itself.
|
||||
- **No SaaS:** You cannot host Automaker as a service for others.
|
||||
- **No Monetizing Mods:** You cannot distribute modified versions of Automaker for money.
|
||||
|
||||
- **Liability:**
|
||||
- **Use at Own Risk:** This tool uses AI. We are **NOT** responsible if it breaks your computer, deletes your files, or generates bad code. You assume all risk.
|
||||
|
||||
- **Contributing:**
|
||||
- By contributing to this repository, you grant the Core Contributors full, irrevocable rights to your code (copyright assignment).
|
||||
|
||||
**Core Contributors** (Cody Seibert (webdevcody), SuperComboGamer (SCG), Kacper Lachowicz (Shironex, Shirone), and Ben Scott (trueheads)) are granted perpetual, royalty-free licenses for any use, including monetization.
|
||||
|
||||
108
app/README.md
108
app/README.md
@@ -1,108 +0,0 @@
|
||||
# Automaker
|
||||
|
||||
Automaker is an autonomous AI development studio that helps you build software faster using AI-powered agents. It provides a visual Kanban board interface to manage features, automatically assigns AI agents to implement them, and tracks progress through an intuitive workflow from backlog to verified completion.
|
||||
|
||||
---
|
||||
|
||||
> **[!CAUTION]**
|
||||
>
|
||||
> ## Security Disclaimer
|
||||
>
|
||||
> **This software uses AI-powered tooling that has access to your operating system and can read, modify, and delete files. Use at your own risk.**
|
||||
>
|
||||
> We have reviewed this codebase for security vulnerabilities, but you assume all risk when running this software. You should review the code yourself before running it.
|
||||
>
|
||||
> **We do not recommend running Automaker directly on your local computer** due to the risk of AI agents having access to your entire file system. Please sandbox this application using Docker or a virtual machine.
|
||||
>
|
||||
> **[Read the full disclaimer](../DISCLAIMER.md)**
|
||||
|
||||
---
|
||||
|
||||
## Getting Started
|
||||
|
||||
**Step 1:** Clone this repository:
|
||||
|
||||
```bash
|
||||
git clone git@github.com:AutoMaker-Org/automaker.git
|
||||
cd automaker
|
||||
```
|
||||
|
||||
**Step 2:** Install dependencies:
|
||||
|
||||
```bash
|
||||
npm install
|
||||
```
|
||||
|
||||
**Step 3:** Run the Claude Code setup token command:
|
||||
|
||||
```bash
|
||||
claude setup-token
|
||||
```
|
||||
|
||||
> **⚠️ Warning:** This command will print your token to your terminal. Be careful if you're streaming or sharing your screen, as the token will be visible to anyone watching.
|
||||
|
||||
**Step 4:** Export the Claude Code OAuth token in your shell:
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_OAUTH_TOKEN="your-token-here"
|
||||
```
|
||||
|
||||
**Step 5:** Start the development server:
|
||||
|
||||
```bash
|
||||
npm run dev:electron
|
||||
```
|
||||
|
||||
This will start both the Next.js development server and the Electron application.
|
||||
|
||||
**Step 6:** MOST IMPORANT: Run the Following after all is setup
|
||||
|
||||
```bash
|
||||
echo "W"
|
||||
echo "W"
|
||||
echo "W"
|
||||
echo "W"
|
||||
echo "W"
|
||||
echo "W"
|
||||
echo "W"
|
||||
echo "W"
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- 📋 **Kanban Board** - Visual drag-and-drop board to manage features through backlog, in progress, waiting approval, and verified stages
|
||||
- 🤖 **AI Agent Integration** - Automatic AI agent assignment to implement features when moved to "In Progress"
|
||||
- 🧠 **Multi-Model Support** - Choose from multiple AI models including Claude Opus, Sonnet, and more
|
||||
- 💭 **Extended Thinking** - Enable extended thinking modes for complex problem-solving
|
||||
- 📡 **Real-time Agent Output** - View live agent output, logs, and file diffs as features are being implemented
|
||||
- 🔍 **Project Analysis** - AI-powered project structure analysis to understand your codebase
|
||||
- 📁 **Context Management** - Add context files to help AI agents understand your project better
|
||||
- 💡 **Feature Suggestions** - AI-generated feature suggestions based on your project
|
||||
- 🖼️ **Image Support** - Attach images and screenshots to feature descriptions
|
||||
- ⚡ **Concurrent Processing** - Configure concurrency to process multiple features simultaneously
|
||||
- 🧪 **Test Integration** - Automatic test running and verification for implemented features
|
||||
- 🔀 **Git Integration** - View git diffs and track changes made by AI agents
|
||||
- 👤 **AI Profiles** - Create and manage different AI agent profiles for various tasks
|
||||
- 💬 **Chat History** - Keep track of conversations and interactions with AI agents
|
||||
- ⌨️ **Keyboard Shortcuts** - Efficient navigation and actions via keyboard shortcuts
|
||||
- 🎨 **Dark/Light Theme** - Beautiful UI with theme support
|
||||
- 🖥️ **Cross-Platform** - Desktop application built with Electron for Windows, macOS, and Linux
|
||||
|
||||
## Tech Stack
|
||||
|
||||
- [Next.js](https://nextjs.org) - React framework
|
||||
- [Electron](https://www.electronjs.org/) - Desktop application framework
|
||||
- [Tailwind CSS](https://tailwindcss.com/) - Styling
|
||||
- [Zustand](https://zustand-demo.pmnd.rs/) - State management
|
||||
- [dnd-kit](https://dndkit.com/) - Drag and drop functionality
|
||||
|
||||
## Learn More
|
||||
|
||||
To learn more about Next.js, take a look at the following resources:
|
||||
|
||||
- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API.
|
||||
- [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial.
|
||||
|
||||
## License
|
||||
|
||||
See [LICENSE](../LICENSE) for details.
|
||||
@@ -1,5 +0,0 @@
|
||||
module.exports = {
|
||||
rules: {
|
||||
"@typescript-eslint/no-require-imports": "off",
|
||||
},
|
||||
};
|
||||
@@ -1,684 +0,0 @@
|
||||
const { query, AbortError } = require("@anthropic-ai/claude-agent-sdk");
|
||||
const path = require("path");
|
||||
const fs = require("fs/promises");
|
||||
|
||||
/**
|
||||
* Agent Service - Runs Claude agents in the Electron main process
|
||||
* This service survives Next.js restarts and maintains conversation state
|
||||
*/
|
||||
class AgentService {
|
||||
constructor() {
|
||||
this.sessions = new Map(); // sessionId -> { messages, isRunning, abortController }
|
||||
this.stateDir = null; // Will be set when app is ready
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the service with app data directory
|
||||
*/
|
||||
async initialize(appDataPath) {
|
||||
this.stateDir = path.join(appDataPath, "agent-sessions");
|
||||
this.metadataFile = path.join(appDataPath, "sessions-metadata.json");
|
||||
await fs.mkdir(this.stateDir, { recursive: true });
|
||||
console.log("[AgentService] Initialized with state dir:", this.stateDir);
|
||||
}
|
||||
|
||||
/**
|
||||
* Start or resume a conversation
|
||||
*/
|
||||
async startConversation({ sessionId, workingDirectory }) {
|
||||
console.log("[AgentService] Starting conversation:", sessionId);
|
||||
|
||||
// Initialize session if it doesn't exist
|
||||
if (!this.sessions.has(sessionId)) {
|
||||
const messages = await this.loadSession(sessionId);
|
||||
|
||||
this.sessions.set(sessionId, {
|
||||
messages,
|
||||
isRunning: false,
|
||||
abortController: null,
|
||||
workingDirectory: workingDirectory || process.cwd(),
|
||||
});
|
||||
}
|
||||
|
||||
const session = this.sessions.get(sessionId);
|
||||
return {
|
||||
success: true,
|
||||
messages: session.messages,
|
||||
sessionId,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a message to the agent and stream responses
|
||||
*/
|
||||
async sendMessage({
|
||||
sessionId,
|
||||
message,
|
||||
workingDirectory,
|
||||
imagePaths,
|
||||
sendToRenderer,
|
||||
}) {
|
||||
const session = this.sessions.get(sessionId);
|
||||
if (!session) {
|
||||
throw new Error(`Session ${sessionId} not found`);
|
||||
}
|
||||
|
||||
if (session.isRunning) {
|
||||
throw new Error("Agent is already processing a message");
|
||||
}
|
||||
|
||||
// Read images from temp files and convert to base64 for storage
|
||||
const images = [];
|
||||
if (imagePaths && imagePaths.length > 0) {
|
||||
const fs = require("fs/promises");
|
||||
const path = require("path");
|
||||
|
||||
for (const imagePath of imagePaths) {
|
||||
try {
|
||||
const imageBuffer = await fs.readFile(imagePath);
|
||||
const base64Data = imageBuffer.toString("base64");
|
||||
|
||||
// Determine media type from file extension
|
||||
const ext = path.extname(imagePath).toLowerCase();
|
||||
const mimeTypeMap = {
|
||||
".jpg": "image/jpeg",
|
||||
".jpeg": "image/jpeg",
|
||||
".png": "image/png",
|
||||
".gif": "image/gif",
|
||||
".webp": "image/webp",
|
||||
};
|
||||
const mediaType = mimeTypeMap[ext] || "image/png";
|
||||
|
||||
images.push({
|
||||
data: base64Data,
|
||||
mimeType: mediaType,
|
||||
filename: path.basename(imagePath),
|
||||
});
|
||||
|
||||
console.log(
|
||||
`[AgentService] Loaded image from ${imagePath} for storage`
|
||||
);
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`[AgentService] Failed to load image from ${imagePath}:`,
|
||||
error
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add user message to conversation with base64 images
|
||||
const userMessage = {
|
||||
id: this.generateId(),
|
||||
role: "user",
|
||||
content: message,
|
||||
images: images.length > 0 ? images : undefined,
|
||||
timestamp: new Date().toISOString(),
|
||||
};
|
||||
|
||||
session.messages.push(userMessage);
|
||||
session.isRunning = true;
|
||||
session.abortController = new AbortController();
|
||||
|
||||
// Send initial user message to renderer
|
||||
sendToRenderer({
|
||||
type: "message",
|
||||
message: userMessage,
|
||||
});
|
||||
|
||||
// Save state with base64 images
|
||||
await this.saveSession(sessionId, session.messages);
|
||||
|
||||
try {
|
||||
// Configure Claude Agent SDK options
|
||||
const options = {
|
||||
// model: "claude-sonnet-4-20250514",
|
||||
model: "claude-opus-4-5-20251101",
|
||||
systemPrompt: this.getSystemPrompt(),
|
||||
maxTurns: 20,
|
||||
cwd: workingDirectory || session.workingDirectory,
|
||||
allowedTools: [
|
||||
"Read",
|
||||
"Write",
|
||||
"Edit",
|
||||
"Glob",
|
||||
"Grep",
|
||||
"Bash",
|
||||
"WebSearch",
|
||||
"WebFetch",
|
||||
],
|
||||
permissionMode: "acceptEdits",
|
||||
sandbox: {
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
},
|
||||
abortController: session.abortController,
|
||||
};
|
||||
|
||||
// Build prompt content with text and images
|
||||
let promptContent = message;
|
||||
|
||||
// If there are images, create a content array
|
||||
if (imagePaths && imagePaths.length > 0) {
|
||||
const contentBlocks = [];
|
||||
|
||||
// Add text block
|
||||
if (message && message.trim()) {
|
||||
contentBlocks.push({
|
||||
type: "text",
|
||||
text: message,
|
||||
});
|
||||
}
|
||||
|
||||
// Add image blocks
|
||||
const fs = require("fs");
|
||||
for (const imagePath of imagePaths) {
|
||||
try {
|
||||
const imageBuffer = fs.readFileSync(imagePath);
|
||||
const base64Data = imageBuffer.toString("base64");
|
||||
const ext = path.extname(imagePath).toLowerCase();
|
||||
const mimeTypeMap = {
|
||||
".jpg": "image/jpeg",
|
||||
".jpeg": "image/jpeg",
|
||||
".png": "image/png",
|
||||
".gif": "image/gif",
|
||||
".webp": "image/webp",
|
||||
};
|
||||
const mediaType = mimeTypeMap[ext] || "image/png";
|
||||
|
||||
contentBlocks.push({
|
||||
type: "image",
|
||||
source: {
|
||||
type: "base64",
|
||||
media_type: mediaType,
|
||||
data: base64Data,
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`[AgentService] Failed to load image ${imagePath}:`,
|
||||
error
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Use content blocks if we have images
|
||||
if (
|
||||
contentBlocks.length > 1 ||
|
||||
(contentBlocks.length === 1 && contentBlocks[0].type === "image")
|
||||
) {
|
||||
promptContent = contentBlocks;
|
||||
}
|
||||
}
|
||||
|
||||
// Build payload for the SDK
|
||||
const promptPayload = Array.isArray(promptContent)
|
||||
? (async function* () {
|
||||
yield {
|
||||
type: "user",
|
||||
session_id: "",
|
||||
message: {
|
||||
role: "user",
|
||||
content: promptContent,
|
||||
},
|
||||
parent_tool_use_id: null,
|
||||
};
|
||||
})()
|
||||
: promptContent;
|
||||
|
||||
// Send the query via the SDK (conversation state handled by the SDK)
|
||||
const stream = query({ prompt: promptPayload, options });
|
||||
|
||||
let currentAssistantMessage = null;
|
||||
let responseText = "";
|
||||
const toolUses = [];
|
||||
|
||||
// Stream responses from the SDK
|
||||
for await (const msg of stream) {
|
||||
if (msg.type === "assistant") {
|
||||
if (msg.message.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === "text") {
|
||||
responseText += block.text;
|
||||
|
||||
// Create or update assistant message
|
||||
if (!currentAssistantMessage) {
|
||||
currentAssistantMessage = {
|
||||
id: this.generateId(),
|
||||
role: "assistant",
|
||||
content: responseText,
|
||||
timestamp: new Date().toISOString(),
|
||||
};
|
||||
session.messages.push(currentAssistantMessage);
|
||||
} else {
|
||||
currentAssistantMessage.content = responseText;
|
||||
}
|
||||
|
||||
// Stream to renderer
|
||||
sendToRenderer({
|
||||
type: "stream",
|
||||
messageId: currentAssistantMessage.id,
|
||||
content: responseText,
|
||||
isComplete: false,
|
||||
});
|
||||
} else if (block.type === "tool_use") {
|
||||
const toolUse = {
|
||||
name: block.name,
|
||||
input: block.input,
|
||||
};
|
||||
toolUses.push(toolUse);
|
||||
|
||||
// Send tool use notification
|
||||
sendToRenderer({
|
||||
type: "tool_use",
|
||||
tool: toolUse,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (msg.type === "result") {
|
||||
if (msg.subtype === "success" && msg.result) {
|
||||
// Use the final result
|
||||
if (currentAssistantMessage) {
|
||||
currentAssistantMessage.content = msg.result;
|
||||
responseText = msg.result;
|
||||
}
|
||||
}
|
||||
|
||||
// Send completion
|
||||
sendToRenderer({
|
||||
type: "complete",
|
||||
messageId: currentAssistantMessage?.id,
|
||||
content: responseText,
|
||||
toolUses,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Save final state
|
||||
await this.saveSession(sessionId, session.messages);
|
||||
|
||||
session.isRunning = false;
|
||||
session.abortController = null;
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: currentAssistantMessage,
|
||||
};
|
||||
} catch (error) {
|
||||
if (error instanceof AbortError || error?.name === "AbortError") {
|
||||
console.log("[AgentService] Query aborted");
|
||||
session.isRunning = false;
|
||||
session.abortController = null;
|
||||
return { success: false, aborted: true };
|
||||
}
|
||||
|
||||
console.error("[AgentService] Error:", error);
|
||||
|
||||
session.isRunning = false;
|
||||
session.abortController = null;
|
||||
|
||||
// Add error message
|
||||
const errorMessage = {
|
||||
id: this.generateId(),
|
||||
role: "assistant",
|
||||
content: `Error: ${error.message}`,
|
||||
timestamp: new Date().toISOString(),
|
||||
isError: true,
|
||||
};
|
||||
|
||||
session.messages.push(errorMessage);
|
||||
await this.saveSession(sessionId, session.messages);
|
||||
|
||||
sendToRenderer({
|
||||
type: "error",
|
||||
error: error.message,
|
||||
message: errorMessage,
|
||||
});
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get conversation history
|
||||
*/
|
||||
getHistory(sessionId) {
|
||||
const session = this.sessions.get(sessionId);
|
||||
if (!session) {
|
||||
return { success: false, error: "Session not found" };
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
messages: session.messages,
|
||||
isRunning: session.isRunning,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop current agent execution
|
||||
*/
|
||||
async stopExecution(sessionId) {
|
||||
const session = this.sessions.get(sessionId);
|
||||
if (!session) {
|
||||
return { success: false, error: "Session not found" };
|
||||
}
|
||||
|
||||
if (session.abortController) {
|
||||
session.abortController.abort();
|
||||
session.isRunning = false;
|
||||
session.abortController = null;
|
||||
}
|
||||
|
||||
return { success: true };
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear conversation history
|
||||
*/
|
||||
async clearSession(sessionId) {
|
||||
const session = this.sessions.get(sessionId);
|
||||
if (session) {
|
||||
session.messages = [];
|
||||
session.isRunning = false;
|
||||
await this.saveSession(sessionId, []);
|
||||
}
|
||||
|
||||
return { success: true };
|
||||
}
|
||||
|
||||
/**
|
||||
* Load session from disk
|
||||
*/
|
||||
async loadSession(sessionId) {
|
||||
if (!this.stateDir) return [];
|
||||
|
||||
const sessionFile = path.join(this.stateDir, `${sessionId}.json`);
|
||||
|
||||
try {
|
||||
const data = await fs.readFile(sessionFile, "utf-8");
|
||||
const parsed = JSON.parse(data);
|
||||
console.log(
|
||||
`[AgentService] Loaded ${parsed.length} messages for ${sessionId}`
|
||||
);
|
||||
return parsed;
|
||||
} catch (error) {
|
||||
// Session doesn't exist yet
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save session to disk
|
||||
*/
|
||||
async saveSession(sessionId, messages) {
|
||||
if (!this.stateDir) return;
|
||||
|
||||
const sessionFile = path.join(this.stateDir, `${sessionId}.json`);
|
||||
|
||||
try {
|
||||
await fs.writeFile(
|
||||
sessionFile,
|
||||
JSON.stringify(messages, null, 2),
|
||||
"utf-8"
|
||||
);
|
||||
console.log(
|
||||
`[AgentService] Saved ${messages.length} messages for ${sessionId}`
|
||||
);
|
||||
|
||||
// Update timestamp
|
||||
await this.updateSessionTimestamp(sessionId);
|
||||
} catch (error) {
|
||||
console.error("[AgentService] Failed to save session:", error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get system prompt
|
||||
*/
|
||||
getSystemPrompt() {
|
||||
return `You are an AI assistant helping users build software. You are part of the Automaker application,
|
||||
which is designed to help developers plan, design, and implement software projects autonomously.
|
||||
|
||||
**Feature Storage:**
|
||||
Features are stored in .automaker/features/{id}/feature.json - each feature has its own folder.
|
||||
Use the UpdateFeatureStatus tool to manage features, not direct file edits.
|
||||
|
||||
Your role is to:
|
||||
- Help users define their project requirements and specifications
|
||||
- Ask clarifying questions to better understand their needs
|
||||
- Suggest technical approaches and architectures
|
||||
- Guide them through the development process
|
||||
- Be conversational and helpful
|
||||
- Write, edit, and modify code files as requested
|
||||
- Execute commands and tests
|
||||
- Search and analyze the codebase
|
||||
|
||||
When discussing projects, help users think through:
|
||||
- Core functionality and features
|
||||
- Technical stack choices
|
||||
- Data models and architecture
|
||||
- User experience considerations
|
||||
- Testing strategies
|
||||
|
||||
You have full access to the codebase and can:
|
||||
- Read files to understand existing code
|
||||
- Write new files
|
||||
- Edit existing files
|
||||
- Run bash commands
|
||||
- Search for code patterns
|
||||
- Execute tests and builds
|
||||
|
||||
IMPORTANT: When making file changes, be aware that the Next.js development server may restart.
|
||||
This is normal and expected. Your conversation state is preserved across these restarts.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate unique ID
|
||||
*/
|
||||
generateId() {
|
||||
return `msg_${Date.now()}_${Math.random().toString(36).substring(2, 11)}`;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Session Management
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Load all session metadata
|
||||
*/
|
||||
async loadMetadata() {
|
||||
if (!this.metadataFile) return {};
|
||||
|
||||
try {
|
||||
const data = await fs.readFile(this.metadataFile, "utf-8");
|
||||
return JSON.parse(data);
|
||||
} catch (error) {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save session metadata
|
||||
*/
|
||||
async saveMetadata(metadata) {
|
||||
if (!this.metadataFile) return;
|
||||
|
||||
try {
|
||||
await fs.writeFile(
|
||||
this.metadataFile,
|
||||
JSON.stringify(metadata, null, 2),
|
||||
"utf-8"
|
||||
);
|
||||
} catch (error) {
|
||||
console.error("[AgentService] Failed to save metadata:", error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* List all sessions
|
||||
*/
|
||||
async listSessions({ includeArchived = false } = {}) {
|
||||
const metadata = await this.loadMetadata();
|
||||
const sessions = [];
|
||||
|
||||
for (const [sessionId, meta] of Object.entries(metadata)) {
|
||||
if (!includeArchived && meta.isArchived) continue;
|
||||
|
||||
const messages = await this.loadSession(sessionId);
|
||||
const lastMessage = messages[messages.length - 1];
|
||||
|
||||
sessions.push({
|
||||
id: sessionId,
|
||||
name: meta.name || sessionId,
|
||||
projectPath: meta.projectPath || "",
|
||||
createdAt: meta.createdAt,
|
||||
updatedAt: meta.updatedAt,
|
||||
messageCount: messages.length,
|
||||
isArchived: meta.isArchived || false,
|
||||
tags: meta.tags || [],
|
||||
preview: lastMessage?.content.substring(0, 100) || "",
|
||||
});
|
||||
}
|
||||
|
||||
// Sort by most recently updated
|
||||
sessions.sort((a, b) => new Date(b.updatedAt) - new Date(a.updatedAt));
|
||||
|
||||
return sessions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new session
|
||||
*/
|
||||
async createSession({ name, projectPath, workingDirectory }) {
|
||||
const sessionId = `session_${Date.now()}_${Math.random()
|
||||
.toString(36)
|
||||
.substring(2, 11)}`;
|
||||
|
||||
const metadata = await this.loadMetadata();
|
||||
metadata[sessionId] = {
|
||||
name,
|
||||
projectPath,
|
||||
createdAt: new Date().toISOString(),
|
||||
updatedAt: new Date().toISOString(),
|
||||
isArchived: false,
|
||||
tags: [],
|
||||
};
|
||||
|
||||
await this.saveMetadata(metadata);
|
||||
|
||||
this.sessions.set(sessionId, {
|
||||
messages: [],
|
||||
isRunning: false,
|
||||
abortController: null,
|
||||
workingDirectory: workingDirectory || projectPath,
|
||||
});
|
||||
|
||||
await this.saveSession(sessionId, []);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
sessionId,
|
||||
session: metadata[sessionId],
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Update session metadata
|
||||
*/
|
||||
async updateSession({ sessionId, name, tags }) {
|
||||
const metadata = await this.loadMetadata();
|
||||
|
||||
if (!metadata[sessionId]) {
|
||||
return { success: false, error: "Session not found" };
|
||||
}
|
||||
|
||||
if (name !== undefined) metadata[sessionId].name = name;
|
||||
if (tags !== undefined) metadata[sessionId].tags = tags;
|
||||
metadata[sessionId].updatedAt = new Date().toISOString();
|
||||
|
||||
await this.saveMetadata(metadata);
|
||||
|
||||
return { success: true };
|
||||
}
|
||||
|
||||
/**
|
||||
* Archive a session
|
||||
*/
|
||||
async archiveSession(sessionId) {
|
||||
const metadata = await this.loadMetadata();
|
||||
|
||||
if (!metadata[sessionId]) {
|
||||
return { success: false, error: "Session not found" };
|
||||
}
|
||||
|
||||
metadata[sessionId].isArchived = true;
|
||||
metadata[sessionId].updatedAt = new Date().toISOString();
|
||||
|
||||
await this.saveMetadata(metadata);
|
||||
|
||||
return { success: true };
|
||||
}
|
||||
|
||||
/**
|
||||
* Unarchive a session
|
||||
*/
|
||||
async unarchiveSession(sessionId) {
|
||||
const metadata = await this.loadMetadata();
|
||||
|
||||
if (!metadata[sessionId]) {
|
||||
return { success: false, error: "Session not found" };
|
||||
}
|
||||
|
||||
metadata[sessionId].isArchived = false;
|
||||
metadata[sessionId].updatedAt = new Date().toISOString();
|
||||
|
||||
await this.saveMetadata(metadata);
|
||||
|
||||
return { success: true };
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a session permanently
|
||||
*/
|
||||
async deleteSession(sessionId) {
|
||||
const metadata = await this.loadMetadata();
|
||||
|
||||
if (!metadata[sessionId]) {
|
||||
return { success: false, error: "Session not found" };
|
||||
}
|
||||
|
||||
// Remove from metadata
|
||||
delete metadata[sessionId];
|
||||
await this.saveMetadata(metadata);
|
||||
|
||||
// Remove from memory
|
||||
this.sessions.delete(sessionId);
|
||||
|
||||
// Delete session file
|
||||
const sessionFile = path.join(this.stateDir, `${sessionId}.json`);
|
||||
try {
|
||||
await fs.unlink(sessionFile);
|
||||
} catch (error) {
|
||||
console.warn("[AgentService] Failed to delete session file:", error);
|
||||
}
|
||||
|
||||
return { success: true };
|
||||
}
|
||||
|
||||
/**
|
||||
* Update session metadata when messages change
|
||||
*/
|
||||
async updateSessionTimestamp(sessionId) {
|
||||
const metadata = await this.loadMetadata();
|
||||
|
||||
if (metadata[sessionId]) {
|
||||
metadata[sessionId].updatedAt = new Date().toISOString();
|
||||
await this.saveMetadata(metadata);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Export singleton instance
|
||||
module.exports = new AgentService();
|
||||
File diff suppressed because it is too large
Load Diff
1778
app/electron/main.js
1778
app/electron/main.js
File diff suppressed because it is too large
Load Diff
@@ -1,388 +0,0 @@
|
||||
const { contextBridge, ipcRenderer } = require("electron");
|
||||
|
||||
// Expose protected methods that allow the renderer process to use
|
||||
// the ipcRenderer without exposing the entire object
|
||||
contextBridge.exposeInMainWorld("electronAPI", {
|
||||
// IPC test
|
||||
ping: () => ipcRenderer.invoke("ping"),
|
||||
|
||||
// Dialog APIs
|
||||
openDirectory: () => ipcRenderer.invoke("dialog:openDirectory"),
|
||||
openFile: (options) => ipcRenderer.invoke("dialog:openFile", options),
|
||||
|
||||
// File system APIs
|
||||
readFile: (filePath) => ipcRenderer.invoke("fs:readFile", filePath),
|
||||
writeFile: (filePath, content) =>
|
||||
ipcRenderer.invoke("fs:writeFile", filePath, content),
|
||||
mkdir: (dirPath) => ipcRenderer.invoke("fs:mkdir", dirPath),
|
||||
readdir: (dirPath) => ipcRenderer.invoke("fs:readdir", dirPath),
|
||||
exists: (filePath) => ipcRenderer.invoke("fs:exists", filePath),
|
||||
stat: (filePath) => ipcRenderer.invoke("fs:stat", filePath),
|
||||
deleteFile: (filePath) => ipcRenderer.invoke("fs:deleteFile", filePath),
|
||||
trashItem: (filePath) => ipcRenderer.invoke("fs:trashItem", filePath),
|
||||
|
||||
// App APIs
|
||||
getPath: (name) => ipcRenderer.invoke("app:getPath", name),
|
||||
saveImageToTemp: (data, filename, mimeType, projectPath) =>
|
||||
ipcRenderer.invoke("app:saveImageToTemp", {
|
||||
data,
|
||||
filename,
|
||||
mimeType,
|
||||
projectPath,
|
||||
}),
|
||||
|
||||
// Agent APIs
|
||||
agent: {
|
||||
// Start or resume a conversation
|
||||
start: (sessionId, workingDirectory) =>
|
||||
ipcRenderer.invoke("agent:start", { sessionId, workingDirectory }),
|
||||
|
||||
// Send a message to the agent
|
||||
send: (sessionId, message, workingDirectory, imagePaths) =>
|
||||
ipcRenderer.invoke("agent:send", {
|
||||
sessionId,
|
||||
message,
|
||||
workingDirectory,
|
||||
imagePaths,
|
||||
}),
|
||||
|
||||
// Get conversation history
|
||||
getHistory: (sessionId) =>
|
||||
ipcRenderer.invoke("agent:getHistory", { sessionId }),
|
||||
|
||||
// Stop current execution
|
||||
stop: (sessionId) => ipcRenderer.invoke("agent:stop", { sessionId }),
|
||||
|
||||
// Clear conversation
|
||||
clear: (sessionId) => ipcRenderer.invoke("agent:clear", { sessionId }),
|
||||
|
||||
// Subscribe to streaming events
|
||||
onStream: (callback) => {
|
||||
const subscription = (_, data) => callback(data);
|
||||
ipcRenderer.on("agent:stream", subscription);
|
||||
// Return unsubscribe function
|
||||
return () => ipcRenderer.removeListener("agent:stream", subscription);
|
||||
},
|
||||
},
|
||||
|
||||
// Session Management APIs
|
||||
sessions: {
|
||||
// List all sessions
|
||||
list: (includeArchived) =>
|
||||
ipcRenderer.invoke("sessions:list", { includeArchived }),
|
||||
|
||||
// Create a new session
|
||||
create: (name, projectPath, workingDirectory) =>
|
||||
ipcRenderer.invoke("sessions:create", {
|
||||
name,
|
||||
projectPath,
|
||||
workingDirectory,
|
||||
}),
|
||||
|
||||
// Update session metadata
|
||||
update: (sessionId, name, tags) =>
|
||||
ipcRenderer.invoke("sessions:update", { sessionId, name, tags }),
|
||||
|
||||
// Archive a session
|
||||
archive: (sessionId) =>
|
||||
ipcRenderer.invoke("sessions:archive", { sessionId }),
|
||||
|
||||
// Unarchive a session
|
||||
unarchive: (sessionId) =>
|
||||
ipcRenderer.invoke("sessions:unarchive", { sessionId }),
|
||||
|
||||
// Delete a session permanently
|
||||
delete: (sessionId) => ipcRenderer.invoke("sessions:delete", { sessionId }),
|
||||
},
|
||||
|
||||
// Auto Mode API
|
||||
autoMode: {
|
||||
// Start auto mode
|
||||
start: (projectPath, maxConcurrency) =>
|
||||
ipcRenderer.invoke("auto-mode:start", { projectPath, maxConcurrency }),
|
||||
|
||||
// Stop auto mode
|
||||
stop: () => ipcRenderer.invoke("auto-mode:stop"),
|
||||
|
||||
// Get auto mode status
|
||||
status: () => ipcRenderer.invoke("auto-mode:status"),
|
||||
|
||||
// Run a specific feature
|
||||
runFeature: (projectPath, featureId, useWorktrees) =>
|
||||
ipcRenderer.invoke("auto-mode:run-feature", {
|
||||
projectPath,
|
||||
featureId,
|
||||
useWorktrees,
|
||||
}),
|
||||
|
||||
// Verify a specific feature by running its tests
|
||||
verifyFeature: (projectPath, featureId) =>
|
||||
ipcRenderer.invoke("auto-mode:verify-feature", {
|
||||
projectPath,
|
||||
featureId,
|
||||
}),
|
||||
|
||||
// Resume a specific feature with previous context
|
||||
resumeFeature: (projectPath, featureId) =>
|
||||
ipcRenderer.invoke("auto-mode:resume-feature", {
|
||||
projectPath,
|
||||
featureId,
|
||||
}),
|
||||
|
||||
// Check if context file exists for a feature
|
||||
contextExists: (projectPath, featureId) =>
|
||||
ipcRenderer.invoke("auto-mode:context-exists", {
|
||||
projectPath,
|
||||
featureId,
|
||||
}),
|
||||
|
||||
// Analyze a new project - kicks off an agent to analyze codebase
|
||||
analyzeProject: (projectPath) =>
|
||||
ipcRenderer.invoke("auto-mode:analyze-project", { projectPath }),
|
||||
|
||||
// Stop a specific feature
|
||||
stopFeature: (featureId) =>
|
||||
ipcRenderer.invoke("auto-mode:stop-feature", { featureId }),
|
||||
|
||||
// Follow-up on a feature with additional prompt
|
||||
followUpFeature: (projectPath, featureId, prompt, imagePaths) =>
|
||||
ipcRenderer.invoke("auto-mode:follow-up-feature", {
|
||||
projectPath,
|
||||
featureId,
|
||||
prompt,
|
||||
imagePaths,
|
||||
}),
|
||||
|
||||
// Commit changes for a feature
|
||||
commitFeature: (projectPath, featureId) =>
|
||||
ipcRenderer.invoke("auto-mode:commit-feature", {
|
||||
projectPath,
|
||||
featureId,
|
||||
}),
|
||||
|
||||
// Listen for auto mode events
|
||||
onEvent: (callback) => {
|
||||
const subscription = (_, data) => callback(data);
|
||||
ipcRenderer.on("auto-mode:event", subscription);
|
||||
|
||||
// Return unsubscribe function
|
||||
return () => {
|
||||
ipcRenderer.removeListener("auto-mode:event", subscription);
|
||||
};
|
||||
},
|
||||
},
|
||||
|
||||
// Claude CLI Detection API
|
||||
checkClaudeCli: () => ipcRenderer.invoke("claude:check-cli"),
|
||||
|
||||
// Codex CLI Detection API
|
||||
checkCodexCli: () => ipcRenderer.invoke("codex:check-cli"),
|
||||
|
||||
// Model Management APIs
|
||||
model: {
|
||||
// Get all available models from all providers
|
||||
getAvailable: () => ipcRenderer.invoke("model:get-available"),
|
||||
|
||||
// Check all provider installation status
|
||||
checkProviders: () => ipcRenderer.invoke("model:check-providers"),
|
||||
},
|
||||
|
||||
// OpenAI API
|
||||
testOpenAIConnection: (apiKey) =>
|
||||
ipcRenderer.invoke("openai:test-connection", { apiKey }),
|
||||
|
||||
// Worktree Management APIs
|
||||
worktree: {
|
||||
// Revert feature changes by removing the worktree
|
||||
revertFeature: (projectPath, featureId) =>
|
||||
ipcRenderer.invoke("worktree:revert-feature", { projectPath, featureId }),
|
||||
|
||||
// Merge feature worktree changes back to main branch
|
||||
mergeFeature: (projectPath, featureId, options) =>
|
||||
ipcRenderer.invoke("worktree:merge-feature", {
|
||||
projectPath,
|
||||
featureId,
|
||||
options,
|
||||
}),
|
||||
|
||||
// Get worktree info for a feature
|
||||
getInfo: (projectPath, featureId) =>
|
||||
ipcRenderer.invoke("worktree:get-info", { projectPath, featureId }),
|
||||
|
||||
// Get worktree status (changed files, commits)
|
||||
getStatus: (projectPath, featureId) =>
|
||||
ipcRenderer.invoke("worktree:get-status", { projectPath, featureId }),
|
||||
|
||||
// List all feature worktrees
|
||||
list: (projectPath) => ipcRenderer.invoke("worktree:list", { projectPath }),
|
||||
|
||||
// Get file diffs for a feature worktree
|
||||
getDiffs: (projectPath, featureId) =>
|
||||
ipcRenderer.invoke("worktree:get-diffs", { projectPath, featureId }),
|
||||
|
||||
// Get diff for a specific file in a worktree
|
||||
getFileDiff: (projectPath, featureId, filePath) =>
|
||||
ipcRenderer.invoke("worktree:get-file-diff", {
|
||||
projectPath,
|
||||
featureId,
|
||||
filePath,
|
||||
}),
|
||||
},
|
||||
|
||||
// Git Operations APIs (for non-worktree operations)
|
||||
git: {
|
||||
// Get file diffs for the main project
|
||||
getDiffs: (projectPath) =>
|
||||
ipcRenderer.invoke("git:get-diffs", { projectPath }),
|
||||
|
||||
// Get diff for a specific file in the main project
|
||||
getFileDiff: (projectPath, filePath) =>
|
||||
ipcRenderer.invoke("git:get-file-diff", { projectPath, filePath }),
|
||||
},
|
||||
|
||||
// Feature Suggestions API
|
||||
suggestions: {
|
||||
// Generate feature suggestions
|
||||
generate: (projectPath) =>
|
||||
ipcRenderer.invoke("suggestions:generate", { projectPath }),
|
||||
|
||||
// Stop generating suggestions
|
||||
stop: () => ipcRenderer.invoke("suggestions:stop"),
|
||||
|
||||
// Get suggestions status
|
||||
status: () => ipcRenderer.invoke("suggestions:status"),
|
||||
|
||||
// Listen for suggestions events
|
||||
onEvent: (callback) => {
|
||||
const subscription = (_, data) => callback(data);
|
||||
ipcRenderer.on("suggestions:event", subscription);
|
||||
|
||||
// Return unsubscribe function
|
||||
return () => {
|
||||
ipcRenderer.removeListener("suggestions:event", subscription);
|
||||
};
|
||||
},
|
||||
},
|
||||
|
||||
// Spec Regeneration API
|
||||
specRegeneration: {
|
||||
// Create initial app spec for a new project
|
||||
create: (projectPath, projectOverview, generateFeatures = true) =>
|
||||
ipcRenderer.invoke("spec-regeneration:create", {
|
||||
projectPath,
|
||||
projectOverview,
|
||||
generateFeatures,
|
||||
}),
|
||||
|
||||
// Regenerate the app spec
|
||||
generate: (projectPath, projectDefinition) =>
|
||||
ipcRenderer.invoke("spec-regeneration:generate", {
|
||||
projectPath,
|
||||
projectDefinition,
|
||||
}),
|
||||
|
||||
// Stop regenerating spec
|
||||
stop: () => ipcRenderer.invoke("spec-regeneration:stop"),
|
||||
|
||||
// Get regeneration status
|
||||
status: () => ipcRenderer.invoke("spec-regeneration:status"),
|
||||
|
||||
// Listen for regeneration events
|
||||
onEvent: (callback) => {
|
||||
const subscription = (_, data) => callback(data);
|
||||
ipcRenderer.on("spec-regeneration:event", subscription);
|
||||
|
||||
// Return unsubscribe function
|
||||
return () => {
|
||||
ipcRenderer.removeListener("spec-regeneration:event", subscription);
|
||||
};
|
||||
},
|
||||
},
|
||||
|
||||
// Setup & CLI Management API
|
||||
setup: {
|
||||
// Get comprehensive Claude CLI status
|
||||
getClaudeStatus: () => ipcRenderer.invoke("setup:claude-status"),
|
||||
|
||||
// Get comprehensive Codex CLI status
|
||||
getCodexStatus: () => ipcRenderer.invoke("setup:codex-status"),
|
||||
|
||||
// Install Claude CLI
|
||||
installClaude: () => ipcRenderer.invoke("setup:install-claude"),
|
||||
|
||||
// Install Codex CLI
|
||||
installCodex: () => ipcRenderer.invoke("setup:install-codex"),
|
||||
|
||||
// Authenticate Claude CLI
|
||||
authClaude: () => ipcRenderer.invoke("setup:auth-claude"),
|
||||
|
||||
// Authenticate Codex CLI with optional API key
|
||||
authCodex: (apiKey) => ipcRenderer.invoke("setup:auth-codex", { apiKey }),
|
||||
|
||||
// Store API key securely
|
||||
storeApiKey: (provider, apiKey) =>
|
||||
ipcRenderer.invoke("setup:store-api-key", { provider, apiKey }),
|
||||
|
||||
// Get stored API keys status
|
||||
getApiKeys: () => ipcRenderer.invoke("setup:get-api-keys"),
|
||||
|
||||
// Configure Codex MCP server for a project
|
||||
configureCodexMcp: (projectPath) =>
|
||||
ipcRenderer.invoke("setup:configure-codex-mcp", { projectPath }),
|
||||
|
||||
// Get platform information
|
||||
getPlatform: () => ipcRenderer.invoke("setup:get-platform"),
|
||||
|
||||
// Listen for installation progress
|
||||
onInstallProgress: (callback) => {
|
||||
const subscription = (_, data) => callback(data);
|
||||
ipcRenderer.on("setup:install-progress", subscription);
|
||||
return () => {
|
||||
ipcRenderer.removeListener("setup:install-progress", subscription);
|
||||
};
|
||||
},
|
||||
|
||||
// Listen for auth progress
|
||||
onAuthProgress: (callback) => {
|
||||
const subscription = (_, data) => callback(data);
|
||||
ipcRenderer.on("setup:auth-progress", subscription);
|
||||
return () => {
|
||||
ipcRenderer.removeListener("setup:auth-progress", subscription);
|
||||
};
|
||||
},
|
||||
},
|
||||
|
||||
// Features API
|
||||
features: {
|
||||
// Get all features for a project
|
||||
getAll: (projectPath) =>
|
||||
ipcRenderer.invoke("features:getAll", { projectPath }),
|
||||
|
||||
// Get a single feature by ID
|
||||
get: (projectPath, featureId) =>
|
||||
ipcRenderer.invoke("features:get", { projectPath, featureId }),
|
||||
|
||||
// Create a new feature
|
||||
create: (projectPath, feature) =>
|
||||
ipcRenderer.invoke("features:create", { projectPath, feature }),
|
||||
|
||||
// Update a feature (partial updates supported)
|
||||
update: (projectPath, featureId, updates) =>
|
||||
ipcRenderer.invoke("features:update", {
|
||||
projectPath,
|
||||
featureId,
|
||||
updates,
|
||||
}),
|
||||
|
||||
// Delete a feature and its folder
|
||||
delete: (projectPath, featureId) =>
|
||||
ipcRenderer.invoke("features:delete", { projectPath, featureId }),
|
||||
|
||||
// Get agent output for a feature
|
||||
getAgentOutput: (projectPath, featureId) =>
|
||||
ipcRenderer.invoke("features:getAgentOutput", { projectPath, featureId }),
|
||||
},
|
||||
});
|
||||
|
||||
// Also expose a flag to detect if we're in Electron
|
||||
contextBridge.exposeInMainWorld("isElectron", true);
|
||||
@@ -1,494 +0,0 @@
|
||||
const { execSync, spawn } = require("child_process");
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
const os = require("os");
|
||||
|
||||
/**
|
||||
* Claude CLI Detector
|
||||
*
|
||||
* Authentication options:
|
||||
* 1. OAuth Token (Subscription): User runs `claude setup-token` and provides the token to the app
|
||||
* 2. API Key (Pay-per-use): User provides their Anthropic API key directly
|
||||
*/
|
||||
class ClaudeCliDetector {
|
||||
/**
|
||||
* Check if Claude Code CLI is installed and accessible
|
||||
* @returns {Object} { installed: boolean, path: string|null, version: string|null, method: 'cli'|'none' }
|
||||
*/
|
||||
/**
|
||||
* Try to get updated PATH from shell config files
|
||||
* This helps detect CLI installations that modify shell config but haven't updated the current process PATH
|
||||
*/
|
||||
static getUpdatedPathFromShellConfig() {
|
||||
const homeDir = os.homedir();
|
||||
const shell = process.env.SHELL || "/bin/bash";
|
||||
const shellName = path.basename(shell);
|
||||
|
||||
// Common shell config files
|
||||
const configFiles = [];
|
||||
if (shellName.includes("zsh")) {
|
||||
configFiles.push(path.join(homeDir, ".zshrc"));
|
||||
configFiles.push(path.join(homeDir, ".zshenv"));
|
||||
configFiles.push(path.join(homeDir, ".zprofile"));
|
||||
} else if (shellName.includes("bash")) {
|
||||
configFiles.push(path.join(homeDir, ".bashrc"));
|
||||
configFiles.push(path.join(homeDir, ".bash_profile"));
|
||||
configFiles.push(path.join(homeDir, ".profile"));
|
||||
}
|
||||
|
||||
// Also check common locations
|
||||
const commonPaths = [
|
||||
path.join(homeDir, ".local", "bin"),
|
||||
path.join(homeDir, ".cargo", "bin"),
|
||||
"/usr/local/bin",
|
||||
"/opt/homebrew/bin",
|
||||
path.join(homeDir, "bin"),
|
||||
];
|
||||
|
||||
// Try to extract PATH additions from config files
|
||||
for (const configFile of configFiles) {
|
||||
if (fs.existsSync(configFile)) {
|
||||
try {
|
||||
const content = fs.readFileSync(configFile, "utf-8");
|
||||
// Look for PATH exports that might include claude installation paths
|
||||
const pathMatches = content.match(
|
||||
/export\s+PATH=["']?([^"'\n]+)["']?/g
|
||||
);
|
||||
if (pathMatches) {
|
||||
for (const match of pathMatches) {
|
||||
const pathValue = match
|
||||
.replace(/export\s+PATH=["']?/, "")
|
||||
.replace(/["']?$/, "");
|
||||
const paths = pathValue
|
||||
.split(":")
|
||||
.filter((p) => p && !p.includes("$"));
|
||||
commonPaths.push(...paths);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Ignore errors reading config files
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return [...new Set(commonPaths)]; // Remove duplicates
|
||||
}
|
||||
|
||||
static detectClaudeInstallation() {
|
||||
console.log("[ClaudeCliDetector] Detecting Claude installation...");
|
||||
|
||||
try {
|
||||
// Method 1: Check if 'claude' command is in PATH (Unix)
|
||||
if (process.platform !== "win32") {
|
||||
try {
|
||||
const claudePath = execSync("which claude 2>/dev/null", {
|
||||
encoding: "utf-8",
|
||||
}).trim();
|
||||
if (claudePath) {
|
||||
const version = this.getClaudeVersion(claudePath);
|
||||
console.log(
|
||||
"[ClaudeCliDetector] Found claude at:",
|
||||
claudePath,
|
||||
"version:",
|
||||
version
|
||||
);
|
||||
return {
|
||||
installed: true,
|
||||
path: claudePath,
|
||||
version: version,
|
||||
method: "cli",
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// CLI not in PATH, continue checking other locations
|
||||
}
|
||||
}
|
||||
|
||||
// Method 2: Check Windows path
|
||||
if (process.platform === "win32") {
|
||||
try {
|
||||
const claudePath = execSync("where claude 2>nul", {
|
||||
encoding: "utf-8",
|
||||
})
|
||||
.trim()
|
||||
.split("\n")[0];
|
||||
if (claudePath) {
|
||||
const version = this.getClaudeVersion(claudePath);
|
||||
console.log(
|
||||
"[ClaudeCliDetector] Found claude at:",
|
||||
claudePath,
|
||||
"version:",
|
||||
version
|
||||
);
|
||||
return {
|
||||
installed: true,
|
||||
path: claudePath,
|
||||
version: version,
|
||||
method: "cli",
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// Not found on Windows
|
||||
}
|
||||
}
|
||||
|
||||
// Method 3: Check for local installation
|
||||
const localClaudePath = path.join(
|
||||
os.homedir(),
|
||||
".claude",
|
||||
"local",
|
||||
"claude"
|
||||
);
|
||||
if (fs.existsSync(localClaudePath)) {
|
||||
const version = this.getClaudeVersion(localClaudePath);
|
||||
console.log(
|
||||
"[ClaudeCliDetector] Found local claude at:",
|
||||
localClaudePath,
|
||||
"version:",
|
||||
version
|
||||
);
|
||||
return {
|
||||
installed: true,
|
||||
path: localClaudePath,
|
||||
version: version,
|
||||
method: "cli-local",
|
||||
};
|
||||
}
|
||||
|
||||
// Method 4: Check common installation locations (including those from shell config)
|
||||
const commonPaths = this.getUpdatedPathFromShellConfig();
|
||||
const binaryNames = ["claude", "claude-code"];
|
||||
|
||||
for (const basePath of commonPaths) {
|
||||
for (const binaryName of binaryNames) {
|
||||
const claudePath = path.join(basePath, binaryName);
|
||||
if (fs.existsSync(claudePath)) {
|
||||
try {
|
||||
const version = this.getClaudeVersion(claudePath);
|
||||
console.log(
|
||||
"[ClaudeCliDetector] Found claude at:",
|
||||
claudePath,
|
||||
"version:",
|
||||
version
|
||||
);
|
||||
return {
|
||||
installed: true,
|
||||
path: claudePath,
|
||||
version: version,
|
||||
method: "cli",
|
||||
};
|
||||
} catch (error) {
|
||||
// File exists but can't get version, might not be executable
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Method 5: Try to source shell config and check PATH again (for Unix)
|
||||
if (process.platform !== "win32") {
|
||||
try {
|
||||
const shell = process.env.SHELL || "/bin/bash";
|
||||
const shellName = path.basename(shell);
|
||||
const homeDir = os.homedir();
|
||||
|
||||
let sourceCmd = "";
|
||||
if (shellName.includes("zsh")) {
|
||||
sourceCmd = `source ${homeDir}/.zshrc 2>/dev/null && which claude`;
|
||||
} else if (shellName.includes("bash")) {
|
||||
sourceCmd = `source ${homeDir}/.bashrc 2>/dev/null && which claude`;
|
||||
}
|
||||
|
||||
if (sourceCmd) {
|
||||
const claudePath = execSync(`bash -c "${sourceCmd}"`, {
|
||||
encoding: "utf-8",
|
||||
timeout: 2000,
|
||||
}).trim();
|
||||
if (claudePath && claudePath.startsWith("/")) {
|
||||
const version = this.getClaudeVersion(claudePath);
|
||||
console.log(
|
||||
"[ClaudeCliDetector] Found claude via shell config at:",
|
||||
claudePath,
|
||||
"version:",
|
||||
version
|
||||
);
|
||||
return {
|
||||
installed: true,
|
||||
path: claudePath,
|
||||
version: version,
|
||||
method: "cli",
|
||||
};
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Failed to source shell config or find claude
|
||||
}
|
||||
}
|
||||
|
||||
console.log("[ClaudeCliDetector] Claude CLI not found");
|
||||
return {
|
||||
installed: false,
|
||||
path: null,
|
||||
version: null,
|
||||
method: "none",
|
||||
};
|
||||
} catch (error) {
|
||||
console.error(
|
||||
"[ClaudeCliDetector] Error detecting Claude installation:",
|
||||
error
|
||||
);
|
||||
return {
|
||||
installed: false,
|
||||
path: null,
|
||||
version: null,
|
||||
method: "none",
|
||||
error: error.message,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Claude CLI version
|
||||
* @param {string} claudePath Path to claude executable
|
||||
* @returns {string|null} Version string or null
|
||||
*/
|
||||
static getClaudeVersion(claudePath) {
|
||||
try {
|
||||
const version = execSync(`"${claudePath}" --version 2>/dev/null`, {
|
||||
encoding: "utf-8",
|
||||
timeout: 5000,
|
||||
}).trim();
|
||||
return version || null;
|
||||
} catch (error) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get authentication status
|
||||
* Checks for:
|
||||
* 1. OAuth token stored in app's credentials (from `claude setup-token`)
|
||||
* 2. API key stored in app's credentials
|
||||
* 3. API key in environment variable
|
||||
*
|
||||
* @param {string} appCredentialsPath Path to app's credentials.json
|
||||
* @returns {Object} Authentication status
|
||||
*/
|
||||
static getAuthStatus(appCredentialsPath) {
|
||||
console.log("[ClaudeCliDetector] Checking auth status...");
|
||||
|
||||
const envApiKey = process.env.ANTHROPIC_API_KEY;
|
||||
console.log("[ClaudeCliDetector] Env ANTHROPIC_API_KEY:", !!envApiKey);
|
||||
|
||||
// Check app's stored credentials
|
||||
let storedOAuthToken = null;
|
||||
let storedApiKey = null;
|
||||
|
||||
if (appCredentialsPath && fs.existsSync(appCredentialsPath)) {
|
||||
try {
|
||||
const content = fs.readFileSync(appCredentialsPath, "utf-8");
|
||||
const credentials = JSON.parse(content);
|
||||
storedOAuthToken = credentials.anthropic_oauth_token || null;
|
||||
storedApiKey =
|
||||
credentials.anthropic || credentials.anthropic_api_key || null;
|
||||
console.log("[ClaudeCliDetector] App credentials:", {
|
||||
hasOAuthToken: !!storedOAuthToken,
|
||||
hasApiKey: !!storedApiKey,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error(
|
||||
"[ClaudeCliDetector] Error reading app credentials:",
|
||||
error
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Determine authentication method
|
||||
// Priority: Stored OAuth Token > Stored API Key > Env API Key
|
||||
let authenticated = false;
|
||||
let method = "none";
|
||||
|
||||
if (storedOAuthToken) {
|
||||
authenticated = true;
|
||||
method = "oauth_token";
|
||||
console.log(
|
||||
"[ClaudeCliDetector] Using stored OAuth token (subscription)"
|
||||
);
|
||||
} else if (storedApiKey) {
|
||||
authenticated = true;
|
||||
method = "api_key";
|
||||
console.log("[ClaudeCliDetector] Using stored API key");
|
||||
} else if (envApiKey) {
|
||||
authenticated = true;
|
||||
method = "api_key_env";
|
||||
console.log("[ClaudeCliDetector] Using environment API key");
|
||||
} else {
|
||||
console.log("[ClaudeCliDetector] No authentication found");
|
||||
}
|
||||
|
||||
const result = {
|
||||
authenticated,
|
||||
method,
|
||||
hasStoredOAuthToken: !!storedOAuthToken,
|
||||
hasStoredApiKey: !!storedApiKey,
|
||||
hasEnvApiKey: !!envApiKey,
|
||||
};
|
||||
|
||||
console.log("[ClaudeCliDetector] Auth status result:", result);
|
||||
return result;
|
||||
}
|
||||
/**
|
||||
* Get installation info (installation status only, no auth)
|
||||
* @returns {Object} Installation info with status property
|
||||
*/
|
||||
static getInstallationInfo() {
|
||||
const installation = this.detectClaudeInstallation();
|
||||
return {
|
||||
status: installation.installed ? "installed" : "not_installed",
|
||||
installed: installation.installed,
|
||||
path: installation.path,
|
||||
version: installation.version,
|
||||
method: installation.method,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get full status including installation and auth
|
||||
* @param {string} appCredentialsPath Path to app's credentials.json
|
||||
* @returns {Object} Full status
|
||||
*/
|
||||
static getFullStatus(appCredentialsPath) {
|
||||
const installation = this.detectClaudeInstallation();
|
||||
const auth = this.getAuthStatus(appCredentialsPath);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
status: installation.installed ? "installed" : "not_installed",
|
||||
installed: installation.installed,
|
||||
path: installation.path,
|
||||
version: installation.version,
|
||||
method: installation.method,
|
||||
auth,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get installation commands for different platforms
|
||||
* @returns {Object} Installation commands
|
||||
*/
|
||||
static getInstallCommands() {
|
||||
return {
|
||||
macos: "curl -fsSL https://claude.ai/install.sh | bash",
|
||||
windows: "irm https://claude.ai/install.ps1 | iex",
|
||||
linux: "curl -fsSL https://claude.ai/install.sh | bash",
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Install Claude CLI using the official script
|
||||
* @param {Function} onProgress Callback for progress updates
|
||||
* @returns {Promise<Object>} Installation result
|
||||
*/
|
||||
static async installCli(onProgress) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const platform = process.platform;
|
||||
let command, args;
|
||||
|
||||
if (platform === "win32") {
|
||||
command = "powershell";
|
||||
args = ["-Command", "irm https://claude.ai/install.ps1 | iex"];
|
||||
} else {
|
||||
command = "bash";
|
||||
args = ["-c", "curl -fsSL https://claude.ai/install.sh | bash"];
|
||||
}
|
||||
|
||||
console.log("[ClaudeCliDetector] Installing Claude CLI...");
|
||||
|
||||
const proc = spawn(command, args, {
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
shell: false,
|
||||
});
|
||||
|
||||
let output = "";
|
||||
let errorOutput = "";
|
||||
|
||||
proc.stdout.on("data", (data) => {
|
||||
const text = data.toString();
|
||||
output += text;
|
||||
if (onProgress) {
|
||||
onProgress({ type: "stdout", data: text });
|
||||
}
|
||||
});
|
||||
|
||||
proc.stderr.on("data", (data) => {
|
||||
const text = data.toString();
|
||||
errorOutput += text;
|
||||
if (onProgress) {
|
||||
onProgress({ type: "stderr", data: text });
|
||||
}
|
||||
});
|
||||
|
||||
proc.on("close", (code) => {
|
||||
if (code === 0) {
|
||||
console.log(
|
||||
"[ClaudeCliDetector] Installation completed successfully"
|
||||
);
|
||||
resolve({
|
||||
success: true,
|
||||
output,
|
||||
message: "Claude CLI installed successfully",
|
||||
});
|
||||
} else {
|
||||
console.error(
|
||||
"[ClaudeCliDetector] Installation failed with code:",
|
||||
code
|
||||
);
|
||||
reject({
|
||||
success: false,
|
||||
error: errorOutput || `Installation failed with code ${code}`,
|
||||
output,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
proc.on("error", (error) => {
|
||||
console.error("[ClaudeCliDetector] Installation error:", error);
|
||||
reject({
|
||||
success: false,
|
||||
error: error.message,
|
||||
output,
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get instructions for setup-token command
|
||||
* @returns {Object} Setup token instructions
|
||||
*/
|
||||
static getSetupTokenInstructions() {
|
||||
const detection = this.detectClaudeInstallation();
|
||||
|
||||
if (!detection.installed) {
|
||||
return {
|
||||
success: false,
|
||||
error: "Claude CLI is not installed. Please install it first.",
|
||||
installCommands: this.getInstallCommands(),
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
command: "claude setup-token",
|
||||
instructions: [
|
||||
"1. Open your terminal",
|
||||
"2. Run: claude setup-token",
|
||||
"3. Follow the prompts to authenticate",
|
||||
"4. Copy the token that is displayed",
|
||||
"5. Paste the token in the field below",
|
||||
],
|
||||
note: "This token is from your Claude subscription and allows you to use Claude without API charges.",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = ClaudeCliDetector;
|
||||
@@ -1,675 +0,0 @@
|
||||
const { execSync, spawn } = require('child_process');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const os = require('os');
|
||||
|
||||
/**
|
||||
* Codex CLI Detector - Checks if OpenAI Codex CLI is installed
|
||||
*
|
||||
* Codex CLI is OpenAI's agent CLI tool that allows users to use
|
||||
* GPT-5.1 Codex models (gpt-5.1-codex-max, gpt-5.1-codex, etc.)
|
||||
* for code generation and agentic tasks.
|
||||
*/
|
||||
class CodexCliDetector {
|
||||
/**
|
||||
* Get the path to Codex config directory
|
||||
* @returns {string} Path to .codex directory
|
||||
*/
|
||||
static getConfigDir() {
|
||||
return path.join(os.homedir(), '.codex');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the path to Codex auth file
|
||||
* @returns {string} Path to auth.json
|
||||
*/
|
||||
static getAuthPath() {
|
||||
return path.join(this.getConfigDir(), 'auth.json');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check Codex authentication status
|
||||
* @returns {Object} Authentication status
|
||||
*/
|
||||
static checkAuth() {
|
||||
console.log('[CodexCliDetector] Checking auth status...');
|
||||
try {
|
||||
const authPath = this.getAuthPath();
|
||||
const envApiKey = process.env.OPENAI_API_KEY;
|
||||
console.log('[CodexCliDetector] Auth path:', authPath);
|
||||
console.log('[CodexCliDetector] Has env API key:', !!envApiKey);
|
||||
|
||||
// First, try to verify authentication using codex CLI command if available
|
||||
try {
|
||||
const detection = this.detectCodexInstallation();
|
||||
if (detection.installed) {
|
||||
try {
|
||||
// Use 'codex login status' to verify authentication
|
||||
const statusOutput = execSync(`"${detection.path || 'codex'}" login status 2>/dev/null`, {
|
||||
encoding: 'utf-8',
|
||||
timeout: 5000
|
||||
});
|
||||
|
||||
// If command succeeds and shows logged in status
|
||||
if (statusOutput && (statusOutput.includes('Logged in') || statusOutput.includes('Authenticated'))) {
|
||||
const result = {
|
||||
authenticated: true,
|
||||
method: 'cli_verified',
|
||||
hasAuthFile: fs.existsSync(authPath),
|
||||
hasEnvKey: !!envApiKey,
|
||||
authPath
|
||||
};
|
||||
console.log('[CodexCliDetector] Auth result (cli_verified):', result);
|
||||
return result;
|
||||
}
|
||||
} catch (statusError) {
|
||||
// status command failed, continue with file-based check
|
||||
}
|
||||
}
|
||||
} catch (verifyError) {
|
||||
// CLI verification failed, continue with file-based check
|
||||
}
|
||||
|
||||
// Check if auth file exists
|
||||
if (fs.existsSync(authPath)) {
|
||||
console.log('[CodexCliDetector] Auth file exists, reading content...');
|
||||
let auth = null;
|
||||
try {
|
||||
const content = fs.readFileSync(authPath, 'utf-8');
|
||||
auth = JSON.parse(content);
|
||||
console.log('[CodexCliDetector] Auth file content keys:', Object.keys(auth));
|
||||
console.log('[CodexCliDetector] Auth file has token object:', !!auth.token);
|
||||
if (auth.token) {
|
||||
console.log('[CodexCliDetector] Token object keys:', Object.keys(auth.token));
|
||||
}
|
||||
|
||||
// Check for token object structure (from codex auth login)
|
||||
// Structure: { token: { Id_token, access_token, refresh_token }, last_refresh: ... }
|
||||
if (auth.token && typeof auth.token === 'object') {
|
||||
const token = auth.token;
|
||||
if (token.Id_token || token.access_token || token.refresh_token || token.id_token) {
|
||||
const result = {
|
||||
authenticated: true,
|
||||
method: 'cli_tokens', // Distinguish token-based auth from API key auth
|
||||
hasAuthFile: true,
|
||||
hasEnvKey: !!envApiKey,
|
||||
authPath
|
||||
};
|
||||
console.log('[CodexCliDetector] Auth result (cli_tokens):', result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
// Check for tokens at root level (alternative structure)
|
||||
if (auth.access_token || auth.refresh_token || auth.Id_token || auth.id_token) {
|
||||
const result = {
|
||||
authenticated: true,
|
||||
method: 'cli_tokens', // These are tokens, not API keys
|
||||
hasAuthFile: true,
|
||||
hasEnvKey: !!envApiKey,
|
||||
authPath
|
||||
};
|
||||
console.log('[CodexCliDetector] Auth result (cli_tokens - root level):', result);
|
||||
return result;
|
||||
}
|
||||
|
||||
// Check for various possible API key fields that codex might use
|
||||
// Note: access_token is NOT an API key, it's a token, so we check for it above
|
||||
if (auth.api_key || auth.openai_api_key || auth.apiKey) {
|
||||
const result = {
|
||||
authenticated: true,
|
||||
method: 'auth_file',
|
||||
hasAuthFile: true,
|
||||
hasEnvKey: !!envApiKey,
|
||||
authPath
|
||||
};
|
||||
console.log('[CodexCliDetector] Auth result (auth_file - API key):', result);
|
||||
return result;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('[CodexCliDetector] Error reading/parsing auth file:', error.message);
|
||||
// If we can't parse the file, we can't determine auth status
|
||||
return {
|
||||
authenticated: false,
|
||||
method: 'none',
|
||||
hasAuthFile: false,
|
||||
hasEnvKey: !!envApiKey,
|
||||
authPath
|
||||
};
|
||||
}
|
||||
|
||||
// Also check if the file has any meaningful content (non-empty object)
|
||||
// This is a fallback - but we should still try to detect if it's tokens
|
||||
if (!auth) {
|
||||
// File exists but couldn't be parsed
|
||||
return {
|
||||
authenticated: false,
|
||||
method: 'none',
|
||||
hasAuthFile: true,
|
||||
hasEnvKey: !!envApiKey,
|
||||
authPath
|
||||
};
|
||||
}
|
||||
|
||||
const keys = Object.keys(auth);
|
||||
console.log('[CodexCliDetector] File has content, keys:', keys);
|
||||
if (keys.length > 0) {
|
||||
// Check again for tokens in case we missed them (maybe nested differently)
|
||||
const hasTokens = keys.some(key =>
|
||||
key.toLowerCase().includes('token') ||
|
||||
key.toLowerCase().includes('refresh') ||
|
||||
(auth[key] && typeof auth[key] === 'object' && (
|
||||
auth[key].access_token || auth[key].refresh_token || auth[key].Id_token || auth[key].id_token
|
||||
))
|
||||
);
|
||||
|
||||
if (hasTokens) {
|
||||
const result = {
|
||||
authenticated: true,
|
||||
method: 'cli_tokens',
|
||||
hasAuthFile: true,
|
||||
hasEnvKey: !!envApiKey,
|
||||
authPath
|
||||
};
|
||||
console.log('[CodexCliDetector] Auth result (cli_tokens - fallback detection):', result);
|
||||
return result;
|
||||
}
|
||||
|
||||
// File exists and has content, likely authenticated
|
||||
// Try to verify by checking if codex command works
|
||||
try {
|
||||
const detection = this.detectCodexInstallation();
|
||||
if (detection.installed) {
|
||||
// Try to verify auth by running a simple command
|
||||
try {
|
||||
execSync(`"${detection.path || 'codex'}" --version 2>/dev/null`, {
|
||||
encoding: 'utf-8',
|
||||
timeout: 3000
|
||||
});
|
||||
// If command succeeds, assume authenticated
|
||||
// But check if it's likely tokens vs API key based on file structure
|
||||
const likelyTokens = keys.some(key => key.toLowerCase().includes('token') || key.toLowerCase().includes('refresh'));
|
||||
const result = {
|
||||
authenticated: true,
|
||||
method: likelyTokens ? 'cli_tokens' : 'auth_file',
|
||||
hasAuthFile: true,
|
||||
hasEnvKey: !!envApiKey,
|
||||
authPath
|
||||
};
|
||||
console.log('[CodexCliDetector] Auth result (verified via CLI, method:', result.method, '):', result);
|
||||
return result;
|
||||
} catch (cmdError) {
|
||||
// Command failed, but file exists - might still be authenticated
|
||||
// Check if it's likely tokens
|
||||
const likelyTokens = keys.some(key => key.toLowerCase().includes('token') || key.toLowerCase().includes('refresh'));
|
||||
const result = {
|
||||
authenticated: true,
|
||||
method: likelyTokens ? 'cli_tokens' : 'auth_file',
|
||||
hasAuthFile: true,
|
||||
hasEnvKey: !!envApiKey,
|
||||
authPath
|
||||
};
|
||||
console.log('[CodexCliDetector] Auth result (file exists, method:', result.method, '):', result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
} catch (verifyError) {
|
||||
// Verification failed, but file exists with content
|
||||
// Check if it's likely tokens
|
||||
const likelyTokens = keys.some(key => key.toLowerCase().includes('token') || key.toLowerCase().includes('refresh'));
|
||||
const result = {
|
||||
authenticated: true,
|
||||
method: likelyTokens ? 'cli_tokens' : 'auth_file',
|
||||
hasAuthFile: true,
|
||||
hasEnvKey: !!envApiKey,
|
||||
authPath
|
||||
};
|
||||
console.log('[CodexCliDetector] Auth result (fallback, method:', result.method, '):', result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check environment variable
|
||||
if (envApiKey) {
|
||||
const result = {
|
||||
authenticated: true,
|
||||
method: 'env_var',
|
||||
hasAuthFile: false,
|
||||
hasEnvKey: true,
|
||||
authPath
|
||||
};
|
||||
console.log('[CodexCliDetector] Auth result (env_var):', result);
|
||||
return result;
|
||||
}
|
||||
|
||||
// If auth file exists but we didn't find standard keys,
|
||||
// check if codex CLI is installed and try to verify auth
|
||||
if (fs.existsSync(authPath)) {
|
||||
try {
|
||||
const detection = this.detectCodexInstallation();
|
||||
if (detection.installed) {
|
||||
// Auth file exists and CLI is installed - likely authenticated
|
||||
// The file existing is a good indicator that login was successful
|
||||
return {
|
||||
authenticated: true,
|
||||
method: 'auth_file',
|
||||
hasAuthFile: true,
|
||||
hasEnvKey: !!envApiKey,
|
||||
authPath
|
||||
};
|
||||
}
|
||||
} catch (verifyError) {
|
||||
// Verification attempt failed, but file exists
|
||||
// Assume authenticated if file exists
|
||||
return {
|
||||
authenticated: true,
|
||||
method: 'auth_file',
|
||||
hasAuthFile: true,
|
||||
hasEnvKey: !!envApiKey,
|
||||
authPath
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const result = {
|
||||
authenticated: false,
|
||||
method: 'none',
|
||||
hasAuthFile: false,
|
||||
hasEnvKey: false,
|
||||
authPath
|
||||
};
|
||||
console.log('[CodexCliDetector] Auth result (not authenticated):', result);
|
||||
return result;
|
||||
} catch (error) {
|
||||
console.error('[CodexCliDetector] Error checking auth:', error);
|
||||
const result = {
|
||||
authenticated: false,
|
||||
method: 'none',
|
||||
error: error.message
|
||||
};
|
||||
console.log('[CodexCliDetector] Auth result (error):', result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Check if Codex CLI is installed and accessible
|
||||
* @returns {Object} { installed: boolean, path: string|null, version: string|null, method: 'cli'|'npm'|'brew'|'none' }
|
||||
*/
|
||||
static detectCodexInstallation() {
|
||||
try {
|
||||
// Method 1: Check if 'codex' command is in PATH
|
||||
try {
|
||||
const codexPath = execSync('which codex 2>/dev/null', { encoding: 'utf-8' }).trim();
|
||||
if (codexPath) {
|
||||
const version = this.getCodexVersion(codexPath);
|
||||
return {
|
||||
installed: true,
|
||||
path: codexPath,
|
||||
version: version,
|
||||
method: 'cli'
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// CLI not in PATH, continue checking other methods
|
||||
}
|
||||
|
||||
// Method 2: Check for npm global installation
|
||||
try {
|
||||
const npmListOutput = execSync('npm list -g @openai/codex --depth=0 2>/dev/null', { encoding: 'utf-8' });
|
||||
if (npmListOutput && npmListOutput.includes('@openai/codex')) {
|
||||
// Get the path from npm bin
|
||||
const npmBinPath = execSync('npm bin -g', { encoding: 'utf-8' }).trim();
|
||||
const codexPath = path.join(npmBinPath, 'codex');
|
||||
const version = this.getCodexVersion(codexPath);
|
||||
return {
|
||||
installed: true,
|
||||
path: codexPath,
|
||||
version: version,
|
||||
method: 'npm'
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// npm global not found
|
||||
}
|
||||
|
||||
// Method 3: Check for Homebrew installation on macOS
|
||||
if (process.platform === 'darwin') {
|
||||
try {
|
||||
const brewList = execSync('brew list --formula 2>/dev/null', { encoding: 'utf-8' });
|
||||
if (brewList.includes('codex')) {
|
||||
const brewPrefixOutput = execSync('brew --prefix codex 2>/dev/null', { encoding: 'utf-8' }).trim();
|
||||
const codexPath = path.join(brewPrefixOutput, 'bin', 'codex');
|
||||
const version = this.getCodexVersion(codexPath);
|
||||
return {
|
||||
installed: true,
|
||||
path: codexPath,
|
||||
version: version,
|
||||
method: 'brew'
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// Homebrew not found or codex not installed via brew
|
||||
}
|
||||
}
|
||||
|
||||
// Method 4: Check Windows path
|
||||
if (process.platform === 'win32') {
|
||||
try {
|
||||
const codexPath = execSync('where codex 2>nul', { encoding: 'utf-8' }).trim().split('\n')[0];
|
||||
if (codexPath) {
|
||||
const version = this.getCodexVersion(codexPath);
|
||||
return {
|
||||
installed: true,
|
||||
path: codexPath,
|
||||
version: version,
|
||||
method: 'cli'
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// Not found on Windows
|
||||
}
|
||||
}
|
||||
|
||||
// Method 5: Check common installation paths
|
||||
const commonPaths = [
|
||||
path.join(os.homedir(), '.local', 'bin', 'codex'),
|
||||
path.join(os.homedir(), '.npm-global', 'bin', 'codex'),
|
||||
'/usr/local/bin/codex',
|
||||
'/opt/homebrew/bin/codex',
|
||||
];
|
||||
|
||||
for (const checkPath of commonPaths) {
|
||||
if (fs.existsSync(checkPath)) {
|
||||
const version = this.getCodexVersion(checkPath);
|
||||
return {
|
||||
installed: true,
|
||||
path: checkPath,
|
||||
version: version,
|
||||
method: 'cli'
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Method 6: Check if OPENAI_API_KEY is set (can use Codex API directly)
|
||||
if (process.env.OPENAI_API_KEY) {
|
||||
return {
|
||||
installed: false,
|
||||
path: null,
|
||||
version: null,
|
||||
method: 'api-key-only',
|
||||
hasApiKey: true
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
installed: false,
|
||||
path: null,
|
||||
version: null,
|
||||
method: 'none'
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('[CodexCliDetector] Error detecting Codex installation:', error);
|
||||
return {
|
||||
installed: false,
|
||||
path: null,
|
||||
version: null,
|
||||
method: 'none',
|
||||
error: error.message
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Codex CLI version from executable path
|
||||
* @param {string} codexPath Path to codex executable
|
||||
* @returns {string|null} Version string or null
|
||||
*/
|
||||
static getCodexVersion(codexPath) {
|
||||
try {
|
||||
const version = execSync(`"${codexPath}" --version 2>/dev/null`, { encoding: 'utf-8' }).trim();
|
||||
return version || null;
|
||||
} catch (error) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get installation info and recommendations
|
||||
* @returns {Object} Installation status and recommendations
|
||||
*/
|
||||
static getInstallationInfo() {
|
||||
const detection = this.detectCodexInstallation();
|
||||
|
||||
if (detection.installed) {
|
||||
return {
|
||||
status: 'installed',
|
||||
method: detection.method,
|
||||
version: detection.version,
|
||||
path: detection.path,
|
||||
recommendation: detection.method === 'cli'
|
||||
? 'Using Codex CLI - ready for GPT-5.1 Codex models'
|
||||
: `Using Codex CLI via ${detection.method} - ready for GPT-5.1 Codex models`
|
||||
};
|
||||
}
|
||||
|
||||
// Not installed but has API key
|
||||
if (detection.method === 'api-key-only') {
|
||||
return {
|
||||
status: 'api_key_only',
|
||||
method: 'api-key-only',
|
||||
recommendation: 'OPENAI_API_KEY detected but Codex CLI not installed. Install Codex CLI for full agentic capabilities.',
|
||||
installCommands: this.getInstallCommands()
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
status: 'not_installed',
|
||||
recommendation: 'Install OpenAI Codex CLI to use GPT-5.1 Codex models for agentic tasks',
|
||||
installCommands: this.getInstallCommands()
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get installation commands for different platforms
|
||||
* @returns {Object} Installation commands by platform
|
||||
*/
|
||||
static getInstallCommands() {
|
||||
return {
|
||||
npm: 'npm install -g @openai/codex@latest',
|
||||
macos: 'brew install codex',
|
||||
linux: 'npm install -g @openai/codex@latest',
|
||||
windows: 'npm install -g @openai/codex@latest'
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if Codex CLI supports a specific model
|
||||
* @param {string} model Model name to check
|
||||
* @returns {boolean} Whether the model is supported
|
||||
*/
|
||||
static isModelSupported(model) {
|
||||
const supportedModels = [
|
||||
'gpt-5.1-codex-max',
|
||||
'gpt-5.1-codex',
|
||||
'gpt-5.1-codex-mini',
|
||||
'gpt-5.1'
|
||||
];
|
||||
return supportedModels.includes(model);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get default model for Codex CLI
|
||||
* @returns {string} Default model name
|
||||
*/
|
||||
static getDefaultModel() {
|
||||
return 'gpt-5.1-codex-max';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get comprehensive installation info including auth status
|
||||
* @returns {Object} Full status object
|
||||
*/
|
||||
static getFullStatus() {
|
||||
const installation = this.detectCodexInstallation();
|
||||
const auth = this.checkAuth();
|
||||
const info = this.getInstallationInfo();
|
||||
|
||||
return {
|
||||
...info,
|
||||
auth,
|
||||
installation
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Install Codex CLI using npm
|
||||
* @param {Function} onProgress Callback for progress updates
|
||||
* @returns {Promise<Object>} Installation result
|
||||
*/
|
||||
static async installCli(onProgress) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const command = 'npm';
|
||||
const args = ['install', '-g', '@openai/codex@latest'];
|
||||
|
||||
const proc = spawn(command, args, {
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
shell: true
|
||||
});
|
||||
|
||||
let output = '';
|
||||
let errorOutput = '';
|
||||
|
||||
proc.stdout.on('data', (data) => {
|
||||
const text = data.toString();
|
||||
output += text;
|
||||
if (onProgress) {
|
||||
onProgress({ type: 'stdout', data: text });
|
||||
}
|
||||
});
|
||||
|
||||
proc.stderr.on('data', (data) => {
|
||||
const text = data.toString();
|
||||
errorOutput += text;
|
||||
// npm often outputs progress to stderr
|
||||
if (onProgress) {
|
||||
onProgress({ type: 'stderr', data: text });
|
||||
}
|
||||
});
|
||||
|
||||
proc.on('close', (code) => {
|
||||
if (code === 0) {
|
||||
resolve({
|
||||
success: true,
|
||||
output,
|
||||
message: 'Codex CLI installed successfully'
|
||||
});
|
||||
} else {
|
||||
reject({
|
||||
success: false,
|
||||
error: errorOutput || `Installation failed with code ${code}`,
|
||||
output
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
proc.on('error', (error) => {
|
||||
reject({
|
||||
success: false,
|
||||
error: error.message,
|
||||
output
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Authenticate Codex CLI - opens browser for OAuth or stores API key
|
||||
* @param {string} apiKey Optional API key to store
|
||||
* @param {Function} onProgress Callback for progress updates
|
||||
* @returns {Promise<Object>} Authentication result
|
||||
*/
|
||||
static async authenticate(apiKey, onProgress) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const detection = this.detectCodexInstallation();
|
||||
|
||||
if (!detection.installed) {
|
||||
reject({
|
||||
success: false,
|
||||
error: 'Codex CLI is not installed'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const codexPath = detection.path || 'codex';
|
||||
|
||||
if (apiKey) {
|
||||
// Store API key directly using codex auth command
|
||||
const proc = spawn(codexPath, ['auth', 'login', '--api-key', apiKey], {
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
shell: false
|
||||
});
|
||||
|
||||
let output = '';
|
||||
let errorOutput = '';
|
||||
|
||||
proc.stdout.on('data', (data) => {
|
||||
const text = data.toString();
|
||||
output += text;
|
||||
if (onProgress) {
|
||||
onProgress({ type: 'stdout', data: text });
|
||||
}
|
||||
});
|
||||
|
||||
proc.stderr.on('data', (data) => {
|
||||
const text = data.toString();
|
||||
errorOutput += text;
|
||||
if (onProgress) {
|
||||
onProgress({ type: 'stderr', data: text });
|
||||
}
|
||||
});
|
||||
|
||||
proc.on('close', (code) => {
|
||||
if (code === 0) {
|
||||
resolve({
|
||||
success: true,
|
||||
output,
|
||||
message: 'Codex CLI authenticated successfully'
|
||||
});
|
||||
} else {
|
||||
reject({
|
||||
success: false,
|
||||
error: errorOutput || `Authentication failed with code ${code}`,
|
||||
output
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
proc.on('error', (error) => {
|
||||
reject({
|
||||
success: false,
|
||||
error: error.message,
|
||||
output
|
||||
});
|
||||
});
|
||||
} else {
|
||||
// Require manual authentication
|
||||
if (onProgress) {
|
||||
onProgress({
|
||||
type: 'info',
|
||||
data: 'Please run the following command in your terminal to authenticate:\n\ncodex auth login\n\nThen return here to continue setup.'
|
||||
});
|
||||
}
|
||||
|
||||
resolve({
|
||||
success: true,
|
||||
requiresManualAuth: true,
|
||||
command: `${codexPath} auth login`,
|
||||
message: 'Please authenticate Codex CLI manually'
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = CodexCliDetector;
|
||||
@@ -1,353 +0,0 @@
|
||||
/**
|
||||
* Codex TOML Configuration Manager
|
||||
*
|
||||
* Manages Codex CLI's TOML configuration file to add/update MCP server settings.
|
||||
* Codex CLI looks for config at:
|
||||
* - ~/.codex/config.toml (user-level)
|
||||
* - .codex/config.toml (project-level, takes precedence)
|
||||
*/
|
||||
|
||||
const fs = require('fs/promises');
|
||||
const path = require('path');
|
||||
const os = require('os');
|
||||
|
||||
class CodexConfigManager {
|
||||
constructor() {
|
||||
this.userConfigPath = path.join(os.homedir(), '.codex', 'config.toml');
|
||||
this.projectConfigPath = null; // Will be set per project
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the project path for project-level config
|
||||
*/
|
||||
setProjectPath(projectPath) {
|
||||
this.projectConfigPath = path.join(projectPath, '.codex', 'config.toml');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the effective config path (project-level if exists, otherwise user-level)
|
||||
*/
|
||||
async getConfigPath() {
|
||||
if (this.projectConfigPath) {
|
||||
try {
|
||||
await fs.access(this.projectConfigPath);
|
||||
return this.projectConfigPath;
|
||||
} catch (e) {
|
||||
// Project config doesn't exist, fall back to user config
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure user config directory exists
|
||||
const userConfigDir = path.dirname(this.userConfigPath);
|
||||
try {
|
||||
await fs.mkdir(userConfigDir, { recursive: true });
|
||||
} catch (e) {
|
||||
// Directory might already exist
|
||||
}
|
||||
|
||||
return this.userConfigPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read existing TOML config (simple parser for our needs)
|
||||
*/
|
||||
async readConfig(configPath) {
|
||||
try {
|
||||
const content = await fs.readFile(configPath, 'utf-8');
|
||||
return this.parseToml(content);
|
||||
} catch (e) {
|
||||
if (e.code === 'ENOENT') {
|
||||
return {};
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple TOML parser for our specific use case
|
||||
* This is a minimal parser that handles the MCP server config structure
|
||||
*/
|
||||
parseToml(content) {
|
||||
const config = {};
|
||||
let currentSection = null;
|
||||
let currentSubsection = null;
|
||||
|
||||
const lines = content.split('\n');
|
||||
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
|
||||
// Skip comments and empty lines
|
||||
if (!trimmed || trimmed.startsWith('#')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Section header: [section]
|
||||
const sectionMatch = trimmed.match(/^\[([^\]]+)\]$/);
|
||||
if (sectionMatch) {
|
||||
const sectionName = sectionMatch[1];
|
||||
const parts = sectionName.split('.');
|
||||
|
||||
if (parts.length === 1) {
|
||||
currentSection = parts[0];
|
||||
currentSubsection = null;
|
||||
if (!config[currentSection]) {
|
||||
config[currentSection] = {};
|
||||
}
|
||||
} else if (parts.length === 2) {
|
||||
currentSection = parts[0];
|
||||
currentSubsection = parts[1];
|
||||
if (!config[currentSection]) {
|
||||
config[currentSection] = {};
|
||||
}
|
||||
if (!config[currentSection][currentSubsection]) {
|
||||
config[currentSection][currentSubsection] = {};
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Key-value pair: key = value
|
||||
const kvMatch = trimmed.match(/^([^=]+)=(.+)$/);
|
||||
if (kvMatch) {
|
||||
const key = kvMatch[1].trim();
|
||||
let value = kvMatch[2].trim();
|
||||
|
||||
// Remove quotes if present
|
||||
if ((value.startsWith('"') && value.endsWith('"')) ||
|
||||
(value.startsWith("'") && value.endsWith("'"))) {
|
||||
value = value.slice(1, -1);
|
||||
}
|
||||
|
||||
// Parse boolean
|
||||
if (value === 'true') value = true;
|
||||
else if (value === 'false') value = false;
|
||||
// Parse number
|
||||
else if (/^-?\d+$/.test(value)) value = parseInt(value, 10);
|
||||
else if (/^-?\d+\.\d+$/.test(value)) value = parseFloat(value);
|
||||
|
||||
if (currentSubsection) {
|
||||
if (!config[currentSection][currentSubsection]) {
|
||||
config[currentSection][currentSubsection] = {};
|
||||
}
|
||||
config[currentSection][currentSubsection][key] = value;
|
||||
} else if (currentSection) {
|
||||
if (!config[currentSection]) {
|
||||
config[currentSection] = {};
|
||||
}
|
||||
config[currentSection][key] = value;
|
||||
} else {
|
||||
config[key] = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert config object back to TOML format
|
||||
*/
|
||||
stringifyToml(config, indent = 0) {
|
||||
const indentStr = ' '.repeat(indent);
|
||||
let result = '';
|
||||
|
||||
for (const [key, value] of Object.entries(config)) {
|
||||
if (typeof value === 'object' && value !== null && !Array.isArray(value)) {
|
||||
// Section
|
||||
result += `${indentStr}[${key}]\n`;
|
||||
result += this.stringifyToml(value, indent);
|
||||
} else {
|
||||
// Key-value
|
||||
let valueStr = value;
|
||||
if (typeof value === 'string') {
|
||||
// Escape quotes and wrap in quotes if needed
|
||||
if (value.includes('"') || value.includes("'") || value.includes(' ')) {
|
||||
valueStr = `"${value.replace(/"/g, '\\"')}"`;
|
||||
}
|
||||
} else if (typeof value === 'boolean') {
|
||||
valueStr = value.toString();
|
||||
}
|
||||
result += `${indentStr}${key} = ${valueStr}\n`;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure the automaker-tools MCP server
|
||||
*/
|
||||
async configureMcpServer(projectPath, mcpServerScriptPath) {
|
||||
this.setProjectPath(projectPath);
|
||||
const configPath = await this.getConfigPath();
|
||||
|
||||
// Read existing config
|
||||
const config = await this.readConfig(configPath);
|
||||
|
||||
// Ensure mcp_servers section exists
|
||||
if (!config.mcp_servers) {
|
||||
config.mcp_servers = {};
|
||||
}
|
||||
|
||||
// Configure automaker-tools server
|
||||
config.mcp_servers['automaker-tools'] = {
|
||||
command: 'node',
|
||||
args: [mcpServerScriptPath],
|
||||
env: {
|
||||
AUTOMAKER_PROJECT_PATH: projectPath
|
||||
},
|
||||
startup_timeout_sec: 10,
|
||||
tool_timeout_sec: 60,
|
||||
enabled_tools: ['UpdateFeatureStatus']
|
||||
};
|
||||
|
||||
// Ensure experimental_use_rmcp_client is enabled (if needed)
|
||||
if (!config.experimental_use_rmcp_client) {
|
||||
config.experimental_use_rmcp_client = true;
|
||||
}
|
||||
|
||||
// Write config back
|
||||
await this.writeConfig(configPath, config);
|
||||
|
||||
console.log(`[CodexConfigManager] Configured automaker-tools MCP server in ${configPath}`);
|
||||
return configPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write config to TOML file
|
||||
*/
|
||||
async writeConfig(configPath, config) {
|
||||
let content = '';
|
||||
|
||||
// Write top-level keys first (preserve existing non-MCP config)
|
||||
for (const [key, value] of Object.entries(config)) {
|
||||
if (key === 'mcp_servers' || key === 'experimental_use_rmcp_client') {
|
||||
continue; // Handle these separately
|
||||
}
|
||||
if (typeof value !== 'object') {
|
||||
content += `${key} = ${this.formatValue(value)}\n`;
|
||||
}
|
||||
}
|
||||
|
||||
// Write experimental flag if enabled
|
||||
if (config.experimental_use_rmcp_client) {
|
||||
if (content && !content.endsWith('\n\n')) {
|
||||
content += '\n';
|
||||
}
|
||||
content += `experimental_use_rmcp_client = true\n`;
|
||||
}
|
||||
|
||||
// Write mcp_servers section
|
||||
if (config.mcp_servers && Object.keys(config.mcp_servers).length > 0) {
|
||||
if (content && !content.endsWith('\n\n')) {
|
||||
content += '\n';
|
||||
}
|
||||
|
||||
for (const [serverName, serverConfig] of Object.entries(config.mcp_servers)) {
|
||||
content += `\n[mcp_servers.${serverName}]\n`;
|
||||
|
||||
// Write command first
|
||||
if (serverConfig.command) {
|
||||
content += `command = "${this.escapeTomlString(serverConfig.command)}"\n`;
|
||||
}
|
||||
|
||||
// Write args
|
||||
if (serverConfig.args && Array.isArray(serverConfig.args)) {
|
||||
const argsStr = serverConfig.args.map(a => `"${this.escapeTomlString(a)}"`).join(', ');
|
||||
content += `args = [${argsStr}]\n`;
|
||||
}
|
||||
|
||||
// Write timeouts (must be before env subsection)
|
||||
if (serverConfig.startup_timeout_sec !== undefined) {
|
||||
content += `startup_timeout_sec = ${serverConfig.startup_timeout_sec}\n`;
|
||||
}
|
||||
|
||||
if (serverConfig.tool_timeout_sec !== undefined) {
|
||||
content += `tool_timeout_sec = ${serverConfig.tool_timeout_sec}\n`;
|
||||
}
|
||||
|
||||
// Write enabled_tools (must be before env subsection - at server level, not env level)
|
||||
if (serverConfig.enabled_tools && Array.isArray(serverConfig.enabled_tools)) {
|
||||
const toolsStr = serverConfig.enabled_tools.map(t => `"${this.escapeTomlString(t)}"`).join(', ');
|
||||
content += `enabled_tools = [${toolsStr}]\n`;
|
||||
}
|
||||
|
||||
// Write env section last (as a separate subsection)
|
||||
// IMPORTANT: In TOML, once we start [mcp_servers.server_name.env],
|
||||
// everything after belongs to that subsection until a new section starts
|
||||
if (serverConfig.env && typeof serverConfig.env === 'object' && Object.keys(serverConfig.env).length > 0) {
|
||||
content += `\n[mcp_servers.${serverName}.env]\n`;
|
||||
for (const [envKey, envValue] of Object.entries(serverConfig.env)) {
|
||||
content += `${envKey} = "${this.escapeTomlString(String(envValue))}"\n`;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure directory exists
|
||||
const configDir = path.dirname(configPath);
|
||||
await fs.mkdir(configDir, { recursive: true });
|
||||
|
||||
// Write file
|
||||
await fs.writeFile(configPath, content, 'utf-8');
|
||||
}
|
||||
|
||||
/**
|
||||
* Escape special characters in TOML strings
|
||||
*/
|
||||
escapeTomlString(str) {
|
||||
return str
|
||||
.replace(/\\/g, '\\\\')
|
||||
.replace(/"/g, '\\"')
|
||||
.replace(/\n/g, '\\n')
|
||||
.replace(/\r/g, '\\r')
|
||||
.replace(/\t/g, '\\t');
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a value for TOML output
|
||||
*/
|
||||
formatValue(value) {
|
||||
if (typeof value === 'string') {
|
||||
// Escape quotes
|
||||
const escaped = value.replace(/\\/g, '\\\\').replace(/"/g, '\\"');
|
||||
return `"${escaped}"`;
|
||||
} else if (typeof value === 'boolean') {
|
||||
return value.toString();
|
||||
} else if (typeof value === 'number') {
|
||||
return value.toString();
|
||||
}
|
||||
return `"${String(value)}"`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove automaker-tools MCP server configuration
|
||||
*/
|
||||
async removeMcpServer(projectPath) {
|
||||
this.setProjectPath(projectPath);
|
||||
const configPath = await this.getConfigPath();
|
||||
|
||||
try {
|
||||
const config = await this.readConfig(configPath);
|
||||
|
||||
if (config.mcp_servers && config.mcp_servers['automaker-tools']) {
|
||||
delete config.mcp_servers['automaker-tools'];
|
||||
|
||||
// If no more MCP servers, remove the section
|
||||
if (Object.keys(config.mcp_servers).length === 0) {
|
||||
delete config.mcp_servers;
|
||||
}
|
||||
|
||||
await this.writeConfig(configPath, config);
|
||||
console.log(`[CodexConfigManager] Removed automaker-tools MCP server from ${configPath}`);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(`[CodexConfigManager] Error removing MCP server config:`, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = new CodexConfigManager();
|
||||
|
||||
|
||||
@@ -1,610 +0,0 @@
|
||||
/**
|
||||
* Codex CLI Execution Wrapper
|
||||
*
|
||||
* This module handles spawning and managing Codex CLI processes
|
||||
* for executing OpenAI model queries.
|
||||
*/
|
||||
|
||||
const { spawn } = require('child_process');
|
||||
const { EventEmitter } = require('events');
|
||||
const readline = require('readline');
|
||||
const path = require('path');
|
||||
const CodexCliDetector = require('./codex-cli-detector');
|
||||
const codexConfigManager = require('./codex-config-manager');
|
||||
|
||||
/**
|
||||
* Message types from Codex CLI JSON output
|
||||
*/
|
||||
const CODEX_EVENT_TYPES = {
|
||||
THREAD_STARTED: 'thread.started',
|
||||
ITEM_STARTED: 'item.started',
|
||||
ITEM_COMPLETED: 'item.completed',
|
||||
THREAD_COMPLETED: 'thread.completed',
|
||||
ERROR: 'error'
|
||||
};
|
||||
|
||||
/**
|
||||
* Codex Executor - Manages Codex CLI process execution
|
||||
*/
|
||||
class CodexExecutor extends EventEmitter {
|
||||
constructor() {
|
||||
super();
|
||||
this.currentProcess = null;
|
||||
this.codexPath = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find and cache the Codex CLI path
|
||||
* @returns {string|null} Path to codex executable
|
||||
*/
|
||||
findCodexPath() {
|
||||
if (this.codexPath) {
|
||||
return this.codexPath;
|
||||
}
|
||||
|
||||
const installation = CodexCliDetector.detectCodexInstallation();
|
||||
if (installation.installed && installation.path) {
|
||||
this.codexPath = installation.path;
|
||||
return this.codexPath;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a Codex CLI query
|
||||
* @param {Object} options Execution options
|
||||
* @param {string} options.prompt The prompt to execute
|
||||
* @param {string} options.model Model to use (default: gpt-5.1-codex-max)
|
||||
* @param {string} options.cwd Working directory
|
||||
* @param {string} options.systemPrompt System prompt (optional, will be prepended to prompt)
|
||||
* @param {number} options.maxTurns Not used - Codex CLI doesn't support this parameter
|
||||
* @param {string[]} options.allowedTools Not used - Codex CLI doesn't support this parameter
|
||||
* @param {Object} options.env Environment variables
|
||||
* @param {Object} options.mcpServers MCP servers configuration (for configuring Codex TOML)
|
||||
* @returns {AsyncGenerator} Generator yielding messages
|
||||
*/
|
||||
async *execute(options) {
|
||||
const {
|
||||
prompt,
|
||||
model = 'gpt-5.1-codex-max',
|
||||
cwd = process.cwd(),
|
||||
systemPrompt,
|
||||
maxTurns, // Not used by Codex CLI
|
||||
allowedTools, // Not used by Codex CLI
|
||||
env = {},
|
||||
mcpServers = null
|
||||
} = options;
|
||||
|
||||
const codexPath = this.findCodexPath();
|
||||
if (!codexPath) {
|
||||
yield {
|
||||
type: 'error',
|
||||
error: 'Codex CLI not found. Please install it with: npm install -g @openai/codex@latest'
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
// Configure MCP server if provided
|
||||
if (mcpServers && mcpServers['automaker-tools']) {
|
||||
try {
|
||||
// Get the absolute path to the MCP server script
|
||||
const mcpServerScriptPath = path.resolve(__dirname, 'mcp-server-stdio.js');
|
||||
|
||||
// Verify the script exists
|
||||
const fs = require('fs');
|
||||
if (!fs.existsSync(mcpServerScriptPath)) {
|
||||
console.warn(`[CodexExecutor] MCP server script not found at ${mcpServerScriptPath}, skipping MCP configuration`);
|
||||
} else {
|
||||
// Configure Codex TOML to use the MCP server
|
||||
await codexConfigManager.configureMcpServer(cwd, mcpServerScriptPath);
|
||||
console.log('[CodexExecutor] Configured automaker-tools MCP server for Codex CLI');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('[CodexExecutor] Failed to configure MCP server:', error);
|
||||
// Continue execution even if MCP config fails - Codex will work without MCP tools
|
||||
}
|
||||
}
|
||||
|
||||
// Combine system prompt with main prompt if provided
|
||||
// Codex CLI doesn't support --system-prompt argument, so we prepend it to the prompt
|
||||
let combinedPrompt = prompt;
|
||||
console.log('[CodexExecutor] Original prompt length:', prompt?.length || 0);
|
||||
if (systemPrompt) {
|
||||
combinedPrompt = `${systemPrompt}\n\n---\n\n${prompt}`;
|
||||
console.log('[CodexExecutor] System prompt prepended to main prompt');
|
||||
console.log('[CodexExecutor] System prompt length:', systemPrompt.length);
|
||||
console.log('[CodexExecutor] Combined prompt length:', combinedPrompt.length);
|
||||
}
|
||||
|
||||
// Build command arguments
|
||||
// Note: maxTurns and allowedTools are not supported by Codex CLI
|
||||
console.log('[CodexExecutor] Building command arguments...');
|
||||
const args = this.buildArgs({
|
||||
prompt: combinedPrompt,
|
||||
model
|
||||
});
|
||||
|
||||
console.log('[CodexExecutor] Executing command:', codexPath);
|
||||
console.log('[CodexExecutor] Number of args:', args.length);
|
||||
console.log('[CodexExecutor] Args (without prompt):', args.slice(0, -1).join(' '));
|
||||
console.log('[CodexExecutor] Prompt length in args:', args[args.length - 1]?.length || 0);
|
||||
console.log('[CodexExecutor] Prompt preview (first 200 chars):', args[args.length - 1]?.substring(0, 200));
|
||||
console.log('[CodexExecutor] Working directory:', cwd);
|
||||
|
||||
// Spawn the process
|
||||
const processEnv = {
|
||||
...process.env,
|
||||
...env,
|
||||
// Ensure OPENAI_API_KEY is available
|
||||
OPENAI_API_KEY: env.OPENAI_API_KEY || process.env.OPENAI_API_KEY
|
||||
};
|
||||
|
||||
// Log API key status (without exposing the key)
|
||||
if (processEnv.OPENAI_API_KEY) {
|
||||
console.log('[CodexExecutor] OPENAI_API_KEY is set (length:', processEnv.OPENAI_API_KEY.length, ')');
|
||||
} else {
|
||||
console.warn('[CodexExecutor] WARNING: OPENAI_API_KEY is not set!');
|
||||
}
|
||||
|
||||
console.log('[CodexExecutor] Spawning process...');
|
||||
const proc = spawn(codexPath, args, {
|
||||
cwd,
|
||||
env: processEnv,
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
|
||||
this.currentProcess = proc;
|
||||
console.log('[CodexExecutor] Process spawned with PID:', proc.pid);
|
||||
|
||||
// Track process events
|
||||
proc.on('error', (error) => {
|
||||
console.error('[CodexExecutor] Process error:', error);
|
||||
});
|
||||
|
||||
proc.on('spawn', () => {
|
||||
console.log('[CodexExecutor] Process spawned successfully');
|
||||
});
|
||||
|
||||
// Collect stderr output as it comes in
|
||||
let stderr = '';
|
||||
let hasOutput = false;
|
||||
let stdoutChunks = [];
|
||||
let stderrChunks = [];
|
||||
|
||||
proc.stderr.on('data', (data) => {
|
||||
const errorText = data.toString();
|
||||
stderr += errorText;
|
||||
stderrChunks.push(errorText);
|
||||
hasOutput = true;
|
||||
console.error('[CodexExecutor] stderr chunk received (', data.length, 'bytes):', errorText.substring(0, 200));
|
||||
});
|
||||
|
||||
proc.stderr.on('end', () => {
|
||||
console.log('[CodexExecutor] stderr stream ended. Total chunks:', stderrChunks.length, 'Total length:', stderr.length);
|
||||
});
|
||||
|
||||
proc.stdout.on('data', (data) => {
|
||||
const text = data.toString();
|
||||
stdoutChunks.push(text);
|
||||
hasOutput = true;
|
||||
console.log('[CodexExecutor] stdout chunk received (', data.length, 'bytes):', text.substring(0, 200));
|
||||
});
|
||||
|
||||
proc.stdout.on('end', () => {
|
||||
console.log('[CodexExecutor] stdout stream ended. Total chunks:', stdoutChunks.length);
|
||||
});
|
||||
|
||||
// Create readline interface for parsing JSONL output
|
||||
console.log('[CodexExecutor] Creating readline interface...');
|
||||
const rl = readline.createInterface({
|
||||
input: proc.stdout,
|
||||
crlfDelay: Infinity
|
||||
});
|
||||
|
||||
// Track accumulated content for converting to Claude format
|
||||
let accumulatedText = '';
|
||||
let toolUses = [];
|
||||
let lastOutputTime = Date.now();
|
||||
const OUTPUT_TIMEOUT = 30000; // 30 seconds timeout for no output
|
||||
let lineCount = 0;
|
||||
let jsonParseErrors = 0;
|
||||
|
||||
// Set up timeout check
|
||||
const checkTimeout = setInterval(() => {
|
||||
const timeSinceLastOutput = Date.now() - lastOutputTime;
|
||||
if (timeSinceLastOutput > OUTPUT_TIMEOUT && !hasOutput) {
|
||||
console.warn('[CodexExecutor] No output received for', timeSinceLastOutput, 'ms. Process still alive:', !proc.killed);
|
||||
}
|
||||
}, 5000);
|
||||
|
||||
console.log('[CodexExecutor] Starting to read lines from stdout...');
|
||||
|
||||
// Process stdout line by line (JSONL format)
|
||||
try {
|
||||
for await (const line of rl) {
|
||||
hasOutput = true;
|
||||
lastOutputTime = Date.now();
|
||||
lineCount++;
|
||||
|
||||
console.log('[CodexExecutor] Line', lineCount, 'received (length:', line.length, '):', line.substring(0, 100));
|
||||
|
||||
if (!line.trim()) {
|
||||
console.log('[CodexExecutor] Skipping empty line');
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
const event = JSON.parse(line);
|
||||
console.log('[CodexExecutor] Successfully parsed JSON event. Type:', event.type, 'Keys:', Object.keys(event));
|
||||
|
||||
const convertedMsg = this.convertToClaudeFormat(event);
|
||||
console.log('[CodexExecutor] Converted message:', convertedMsg ? { type: convertedMsg.type } : 'null');
|
||||
|
||||
if (convertedMsg) {
|
||||
// Accumulate text content
|
||||
if (convertedMsg.type === 'assistant' && convertedMsg.message?.content) {
|
||||
for (const block of convertedMsg.message.content) {
|
||||
if (block.type === 'text') {
|
||||
accumulatedText += block.text;
|
||||
console.log('[CodexExecutor] Accumulated text block (total length:', accumulatedText.length, ')');
|
||||
} else if (block.type === 'tool_use') {
|
||||
toolUses.push(block);
|
||||
console.log('[CodexExecutor] Tool use detected:', block.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
console.log('[CodexExecutor] Yielding message of type:', convertedMsg.type);
|
||||
yield convertedMsg;
|
||||
} else {
|
||||
console.log('[CodexExecutor] Converted message is null, skipping');
|
||||
}
|
||||
} catch (parseError) {
|
||||
jsonParseErrors++;
|
||||
// Non-JSON output, yield as text
|
||||
console.log('[CodexExecutor] JSON parse error (', jsonParseErrors, 'total):', parseError.message);
|
||||
console.log('[CodexExecutor] Non-JSON line content:', line.substring(0, 200));
|
||||
yield {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{ type: 'text', text: line + '\n' }]
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
console.log('[CodexExecutor] Finished reading all lines. Total lines:', lineCount, 'JSON errors:', jsonParseErrors);
|
||||
} catch (readError) {
|
||||
console.error('[CodexExecutor] Error reading from readline:', readError);
|
||||
throw readError;
|
||||
} finally {
|
||||
clearInterval(checkTimeout);
|
||||
console.log('[CodexExecutor] Cleaned up timeout checker');
|
||||
}
|
||||
|
||||
// Handle process completion
|
||||
console.log('[CodexExecutor] Waiting for process to close...');
|
||||
const exitCode = await new Promise((resolve) => {
|
||||
proc.on('close', (code, signal) => {
|
||||
console.log('[CodexExecutor] Process closed with code:', code, 'signal:', signal);
|
||||
resolve(code);
|
||||
});
|
||||
});
|
||||
|
||||
this.currentProcess = null;
|
||||
console.log('[CodexExecutor] Process completed. Exit code:', exitCode, 'Has output:', hasOutput, 'Stderr length:', stderr.length);
|
||||
|
||||
// Wait a bit for any remaining stderr data to be collected
|
||||
console.log('[CodexExecutor] Waiting 200ms for any remaining stderr data...');
|
||||
await new Promise(resolve => setTimeout(resolve, 200));
|
||||
console.log('[CodexExecutor] Final stderr length:', stderr.length, 'Final stdout chunks:', stdoutChunks.length);
|
||||
|
||||
if (exitCode !== 0) {
|
||||
const errorMessage = stderr.trim()
|
||||
? `Codex CLI exited with code ${exitCode}.\n\nError output:\n${stderr}`
|
||||
: `Codex CLI exited with code ${exitCode}. No error output captured.`;
|
||||
|
||||
console.error('[CodexExecutor] Process failed with exit code', exitCode);
|
||||
console.error('[CodexExecutor] Error message:', errorMessage);
|
||||
console.error('[CodexExecutor] Stderr chunks:', stderrChunks.length, 'Stdout chunks:', stdoutChunks.length);
|
||||
|
||||
yield {
|
||||
type: 'error',
|
||||
error: errorMessage
|
||||
};
|
||||
} else if (!hasOutput && !stderr) {
|
||||
// Process exited successfully but produced no output - might be API key issue
|
||||
const warningMessage = 'Codex CLI completed but produced no output. This might indicate:\n' +
|
||||
'- Missing or invalid OPENAI_API_KEY\n' +
|
||||
'- Codex CLI configuration issue\n' +
|
||||
'- The process completed without generating any response\n\n' +
|
||||
`Debug info: Exit code ${exitCode}, stdout chunks: ${stdoutChunks.length}, stderr chunks: ${stderrChunks.length}, lines read: ${lineCount}`;
|
||||
|
||||
console.warn('[CodexExecutor] No output detected:', warningMessage);
|
||||
console.warn('[CodexExecutor] Stdout chunks:', stdoutChunks);
|
||||
console.warn('[CodexExecutor] Stderr chunks:', stderrChunks);
|
||||
|
||||
yield {
|
||||
type: 'error',
|
||||
error: warningMessage
|
||||
};
|
||||
} else {
|
||||
console.log('[CodexExecutor] Process completed successfully. Exit code:', exitCode, 'Lines processed:', lineCount);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build command arguments for Codex CLI
|
||||
* Only includes supported arguments based on Codex CLI help:
|
||||
* - --model: Model to use
|
||||
* - --json: JSON output format
|
||||
* - --full-auto: Non-interactive automatic execution
|
||||
*
|
||||
* Note: Codex CLI does NOT support:
|
||||
* - --system-prompt (system prompt is prepended to main prompt)
|
||||
* - --max-turns (not available in CLI)
|
||||
* - --tools (not available in CLI)
|
||||
*
|
||||
* @param {Object} options Options
|
||||
* @returns {string[]} Command arguments
|
||||
*/
|
||||
buildArgs(options) {
|
||||
const { prompt, model } = options;
|
||||
|
||||
console.log('[CodexExecutor] buildArgs called with model:', model, 'prompt length:', prompt?.length || 0);
|
||||
|
||||
const args = ['exec'];
|
||||
|
||||
// Add model (required for most use cases)
|
||||
if (model) {
|
||||
args.push('--model', model);
|
||||
console.log('[CodexExecutor] Added model argument:', model);
|
||||
}
|
||||
|
||||
// Add JSON output flag for structured parsing
|
||||
args.push('--json');
|
||||
console.log('[CodexExecutor] Added --json flag');
|
||||
|
||||
// Add full-auto mode (non-interactive)
|
||||
// This enables automatic execution with workspace-write sandbox
|
||||
args.push('--full-auto');
|
||||
console.log('[CodexExecutor] Added --full-auto flag');
|
||||
|
||||
// Add the prompt at the end
|
||||
args.push(prompt);
|
||||
console.log('[CodexExecutor] Added prompt (length:', prompt?.length || 0, ')');
|
||||
|
||||
console.log('[CodexExecutor] Final args count:', args.length);
|
||||
return args;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map Claude tool names to Codex tool names
|
||||
* @param {string[]} tools Array of tool names
|
||||
* @returns {string[]} Mapped tool names
|
||||
*/
|
||||
mapToolsToCodex(tools) {
|
||||
const toolMap = {
|
||||
'Read': 'read',
|
||||
'Write': 'write',
|
||||
'Edit': 'edit',
|
||||
'Bash': 'bash',
|
||||
'Glob': 'glob',
|
||||
'Grep': 'grep',
|
||||
'WebSearch': 'web-search',
|
||||
'WebFetch': 'web-fetch'
|
||||
};
|
||||
|
||||
return tools
|
||||
.map(tool => toolMap[tool] || tool.toLowerCase())
|
||||
.filter(tool => tool); // Remove undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert Codex JSONL event to Claude SDK message format
|
||||
* @param {Object} event Codex event object
|
||||
* @returns {Object|null} Claude-format message or null
|
||||
*/
|
||||
convertToClaudeFormat(event) {
|
||||
console.log('[CodexExecutor] Converting event:', JSON.stringify(event).substring(0, 200));
|
||||
const { type, data, item, thread_id } = event;
|
||||
|
||||
switch (type) {
|
||||
case CODEX_EVENT_TYPES.THREAD_STARTED:
|
||||
case 'thread.started':
|
||||
// Session initialization
|
||||
return {
|
||||
type: 'session_start',
|
||||
sessionId: thread_id || data?.thread_id || event.thread_id
|
||||
};
|
||||
|
||||
case CODEX_EVENT_TYPES.ITEM_COMPLETED:
|
||||
case 'item.completed':
|
||||
// Codex uses 'item' field, not 'data'
|
||||
return this.convertItemCompleted(item || data);
|
||||
|
||||
case CODEX_EVENT_TYPES.ITEM_STARTED:
|
||||
case 'item.started':
|
||||
// Convert item.started events - these indicate tool/command usage
|
||||
const startedItem = item || data;
|
||||
if (startedItem?.type === 'command_execution' && startedItem?.command) {
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'tool_use',
|
||||
name: 'bash',
|
||||
input: { command: startedItem.command }
|
||||
}]
|
||||
}
|
||||
};
|
||||
}
|
||||
// For other item.started types, return null (we'll show the completed version)
|
||||
return null;
|
||||
|
||||
case CODEX_EVENT_TYPES.THREAD_COMPLETED:
|
||||
case 'thread.completed':
|
||||
return {
|
||||
type: 'complete',
|
||||
sessionId: thread_id || data?.thread_id || event.thread_id
|
||||
};
|
||||
|
||||
case CODEX_EVENT_TYPES.ERROR:
|
||||
case 'error':
|
||||
return {
|
||||
type: 'error',
|
||||
error: data?.message || item?.message || event.message || 'Unknown error from Codex CLI'
|
||||
};
|
||||
|
||||
case 'turn.started':
|
||||
// Turn started - just a marker, no need to convert
|
||||
return null;
|
||||
|
||||
default:
|
||||
// Pass through other events
|
||||
console.log('[CodexExecutor] Unhandled event type:', type);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert item.completed event to Claude format
|
||||
* @param {Object} item Event item data
|
||||
* @returns {Object|null} Claude-format message
|
||||
*/
|
||||
convertItemCompleted(item) {
|
||||
if (!item) {
|
||||
console.log('[CodexExecutor] convertItemCompleted: item is null/undefined');
|
||||
return null;
|
||||
}
|
||||
|
||||
const itemType = item.type || item.item_type;
|
||||
console.log('[CodexExecutor] convertItemCompleted: itemType =', itemType, 'item keys:', Object.keys(item));
|
||||
|
||||
switch (itemType) {
|
||||
case 'reasoning':
|
||||
// Thinking/reasoning output - Codex uses 'text' field
|
||||
const reasoningText = item.text || item.content || '';
|
||||
console.log('[CodexExecutor] Converting reasoning, text length:', reasoningText.length);
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'thinking',
|
||||
thinking: reasoningText
|
||||
}]
|
||||
}
|
||||
};
|
||||
|
||||
case 'agent_message':
|
||||
case 'message':
|
||||
// Assistant text message
|
||||
const messageText = item.content || item.text || '';
|
||||
console.log('[CodexExecutor] Converting message, text length:', messageText.length);
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'text',
|
||||
text: messageText
|
||||
}]
|
||||
}
|
||||
};
|
||||
|
||||
case 'command_execution':
|
||||
// Command execution - show both the command and its output
|
||||
const command = item.command || '';
|
||||
const output = item.aggregated_output || item.output || '';
|
||||
console.log('[CodexExecutor] Converting command_execution, command:', command.substring(0, 50), 'output length:', output.length);
|
||||
|
||||
// Return as text message showing the command and output
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'text',
|
||||
text: `\`\`\`bash\n${command}\n\`\`\`\n\n${output}`
|
||||
}]
|
||||
}
|
||||
};
|
||||
|
||||
case 'tool_use':
|
||||
// Tool use
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'tool_use',
|
||||
name: item.tool || item.command || 'unknown',
|
||||
input: item.input || item.args || {}
|
||||
}]
|
||||
}
|
||||
};
|
||||
|
||||
case 'tool_result':
|
||||
// Tool result
|
||||
return {
|
||||
type: 'tool_result',
|
||||
tool_use_id: item.tool_use_id,
|
||||
content: item.output || item.result
|
||||
};
|
||||
|
||||
case 'todo_list':
|
||||
// Todo list - convert to text format
|
||||
const todos = item.items || [];
|
||||
const todoText = todos.map((t, i) => `${i + 1}. ${t.text || t}`).join('\n');
|
||||
console.log('[CodexExecutor] Converting todo_list, items:', todos.length);
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'text',
|
||||
text: `**Todo List:**\n${todoText}`
|
||||
}]
|
||||
}
|
||||
};
|
||||
|
||||
default:
|
||||
// Generic text output
|
||||
const text = item.text || item.content || item.aggregated_output;
|
||||
if (text) {
|
||||
console.log('[CodexExecutor] Converting default item type, text length:', text.length);
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'text',
|
||||
text: String(text)
|
||||
}]
|
||||
}
|
||||
};
|
||||
}
|
||||
console.log('[CodexExecutor] convertItemCompleted: No text content found, returning null');
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Abort current execution
|
||||
*/
|
||||
abort() {
|
||||
if (this.currentProcess) {
|
||||
console.log('[CodexExecutor] Aborting current process');
|
||||
this.currentProcess.kill('SIGTERM');
|
||||
this.currentProcess = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if execution is in progress
|
||||
* @returns {boolean} Whether execution is in progress
|
||||
*/
|
||||
isRunning() {
|
||||
return this.currentProcess !== null;
|
||||
}
|
||||
}
|
||||
|
||||
// Singleton instance
|
||||
const codexExecutor = new CodexExecutor();
|
||||
|
||||
module.exports = codexExecutor;
|
||||
@@ -1,452 +0,0 @@
|
||||
const path = require("path");
|
||||
const fs = require("fs/promises");
|
||||
|
||||
/**
|
||||
* Context Manager - Handles reading, writing, and deleting context files for features
|
||||
*/
|
||||
class ContextManager {
|
||||
/**
|
||||
* Write output to feature context file
|
||||
*/
|
||||
async writeToContextFile(projectPath, featureId, content) {
|
||||
if (!projectPath) return;
|
||||
|
||||
try {
|
||||
const featureDir = path.join(
|
||||
projectPath,
|
||||
".automaker",
|
||||
"features",
|
||||
featureId
|
||||
);
|
||||
|
||||
// Ensure feature directory exists
|
||||
try {
|
||||
await fs.access(featureDir);
|
||||
} catch {
|
||||
await fs.mkdir(featureDir, { recursive: true });
|
||||
}
|
||||
|
||||
const filePath = path.join(featureDir, "agent-output.md");
|
||||
|
||||
// Append to existing file or create new one
|
||||
try {
|
||||
const existing = await fs.readFile(filePath, "utf-8");
|
||||
await fs.writeFile(filePath, existing + content, "utf-8");
|
||||
} catch {
|
||||
await fs.writeFile(filePath, content, "utf-8");
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("[ContextManager] Failed to write to context file:", error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read context file for a feature
|
||||
*/
|
||||
async readContextFile(projectPath, featureId) {
|
||||
try {
|
||||
const contextPath = path.join(
|
||||
projectPath,
|
||||
".automaker",
|
||||
"features",
|
||||
featureId,
|
||||
"agent-output.md"
|
||||
);
|
||||
const content = await fs.readFile(contextPath, "utf-8");
|
||||
return content;
|
||||
} catch (error) {
|
||||
console.log(`[ContextManager] No context file found for ${featureId}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete agent context file for a feature
|
||||
*/
|
||||
async deleteContextFile(projectPath, featureId) {
|
||||
if (!projectPath) return;
|
||||
|
||||
try {
|
||||
const contextPath = path.join(
|
||||
projectPath,
|
||||
".automaker",
|
||||
"features",
|
||||
featureId,
|
||||
"agent-output.md"
|
||||
);
|
||||
await fs.unlink(contextPath);
|
||||
console.log(
|
||||
`[ContextManager] Deleted agent context for feature ${featureId}`
|
||||
);
|
||||
} catch (error) {
|
||||
// File might not exist, which is fine
|
||||
if (error.code !== "ENOENT") {
|
||||
console.error("[ContextManager] Failed to delete context file:", error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the memory.md file containing lessons learned and common issues
|
||||
* Returns formatted string to inject into prompts
|
||||
*/
|
||||
async getMemoryContent(projectPath) {
|
||||
if (!projectPath) return "";
|
||||
|
||||
try {
|
||||
const memoryPath = path.join(projectPath, ".automaker", "memory.md");
|
||||
|
||||
// Check if file exists
|
||||
try {
|
||||
await fs.access(memoryPath);
|
||||
} catch {
|
||||
// File doesn't exist, return empty string
|
||||
return "";
|
||||
}
|
||||
|
||||
const content = await fs.readFile(memoryPath, "utf-8");
|
||||
|
||||
if (!content.trim()) {
|
||||
return "";
|
||||
}
|
||||
|
||||
return `
|
||||
**🧠 Agent Memory - Previous Lessons Learned:**
|
||||
|
||||
The following memory file contains lessons learned from previous agent runs, including common issues and their solutions. Review this carefully to avoid repeating past mistakes.
|
||||
|
||||
<agent-memory>
|
||||
${content}
|
||||
</agent-memory>
|
||||
|
||||
**IMPORTANT:** If you encounter a new issue that took significant debugging effort to resolve, add it to the memory file at \`.automaker/memory.md\` in a concise format:
|
||||
- Issue title
|
||||
- Problem description (1-2 sentences)
|
||||
- Solution/fix (with code example if helpful)
|
||||
|
||||
This helps future agent runs avoid the same pitfalls.
|
||||
`;
|
||||
} catch (error) {
|
||||
console.error("[ContextManager] Failed to read memory file:", error);
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* List context files from .automaker/context/ directory and get previews
|
||||
* Returns a formatted string with file names and first 50 lines of each file
|
||||
*/
|
||||
async getContextFilesPreview(projectPath) {
|
||||
if (!projectPath) return "";
|
||||
|
||||
try {
|
||||
const contextDir = path.join(projectPath, ".automaker", "context");
|
||||
|
||||
// Check if directory exists
|
||||
try {
|
||||
await fs.access(contextDir);
|
||||
} catch {
|
||||
// Directory doesn't exist, return empty string
|
||||
return "";
|
||||
}
|
||||
|
||||
// Read directory contents
|
||||
const entries = await fs.readdir(contextDir, { withFileTypes: true });
|
||||
const files = entries
|
||||
.filter((entry) => entry.isFile())
|
||||
.map((entry) => entry.name)
|
||||
.sort();
|
||||
|
||||
if (files.length === 0) {
|
||||
return "";
|
||||
}
|
||||
|
||||
// Build preview string
|
||||
const previews = [];
|
||||
previews.push(`\n**📁 Context Files Available:**\n`);
|
||||
previews.push(
|
||||
`The following context files are available in \`.automaker/context/\` directory.`
|
||||
);
|
||||
previews.push(
|
||||
`These files contain additional context that may be relevant to your work.`
|
||||
);
|
||||
previews.push(
|
||||
`You can read them in full using the Read tool if needed.\n`
|
||||
);
|
||||
|
||||
for (const fileName of files) {
|
||||
try {
|
||||
const filePath = path.join(contextDir, fileName);
|
||||
const content = await fs.readFile(filePath, "utf-8");
|
||||
const lines = content.split("\n");
|
||||
const previewLines = lines.slice(0, 50);
|
||||
const preview = previewLines.join("\n");
|
||||
const hasMore = lines.length > 50;
|
||||
|
||||
previews.push(`\n**File: ${fileName}**`);
|
||||
if (hasMore) {
|
||||
previews.push(
|
||||
`(Showing first 50 of ${lines.length} lines - use Read tool to see full content)`
|
||||
);
|
||||
}
|
||||
previews.push(`\`\`\``);
|
||||
previews.push(preview);
|
||||
previews.push(`\`\`\`\n`);
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`[ContextManager] Failed to read context file ${fileName}:`,
|
||||
error
|
||||
);
|
||||
previews.push(`\n**File: ${fileName}** (Error reading file)\n`);
|
||||
}
|
||||
}
|
||||
|
||||
return previews.join("\n");
|
||||
} catch (error) {
|
||||
console.error("[ContextManager] Failed to list context files:", error);
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save the initial git state before a feature starts executing
|
||||
* This captures all files that were already modified before the AI agent started
|
||||
* @param {string} projectPath - Path to the project
|
||||
* @param {string} featureId - Feature ID
|
||||
* @returns {Promise<{modifiedFiles: string[], untrackedFiles: string[]}>}
|
||||
*/
|
||||
async saveInitialGitState(projectPath, featureId) {
|
||||
if (!projectPath) return { modifiedFiles: [], untrackedFiles: [] };
|
||||
|
||||
try {
|
||||
const { execSync } = require("child_process");
|
||||
const featureDir = path.join(
|
||||
projectPath,
|
||||
".automaker",
|
||||
"features",
|
||||
featureId
|
||||
);
|
||||
|
||||
// Ensure feature directory exists
|
||||
try {
|
||||
await fs.access(featureDir);
|
||||
} catch {
|
||||
await fs.mkdir(featureDir, { recursive: true });
|
||||
}
|
||||
|
||||
// Get list of modified files (both staged and unstaged)
|
||||
let modifiedFiles = [];
|
||||
try {
|
||||
const modifiedOutput = execSync("git diff --name-only HEAD", {
|
||||
cwd: projectPath,
|
||||
encoding: "utf-8",
|
||||
}).trim();
|
||||
if (modifiedOutput) {
|
||||
modifiedFiles = modifiedOutput.split("\n").filter(Boolean);
|
||||
}
|
||||
} catch (error) {
|
||||
console.log(
|
||||
"[ContextManager] No modified files or git error:",
|
||||
error.message
|
||||
);
|
||||
}
|
||||
|
||||
// Get list of untracked files
|
||||
let untrackedFiles = [];
|
||||
try {
|
||||
const untrackedOutput = execSync(
|
||||
"git ls-files --others --exclude-standard",
|
||||
{
|
||||
cwd: projectPath,
|
||||
encoding: "utf-8",
|
||||
}
|
||||
).trim();
|
||||
if (untrackedOutput) {
|
||||
untrackedFiles = untrackedOutput.split("\n").filter(Boolean);
|
||||
}
|
||||
} catch (error) {
|
||||
console.log(
|
||||
"[ContextManager] Error getting untracked files:",
|
||||
error.message
|
||||
);
|
||||
}
|
||||
|
||||
// Save the initial state to a JSON file
|
||||
const stateFile = path.join(featureDir, "git-state.json");
|
||||
const state = {
|
||||
timestamp: new Date().toISOString(),
|
||||
modifiedFiles,
|
||||
untrackedFiles,
|
||||
};
|
||||
|
||||
await fs.writeFile(stateFile, JSON.stringify(state, null, 2), "utf-8");
|
||||
console.log(
|
||||
`[ContextManager] Saved initial git state for ${featureId}:`,
|
||||
{
|
||||
modifiedCount: modifiedFiles.length,
|
||||
untrackedCount: untrackedFiles.length,
|
||||
}
|
||||
);
|
||||
|
||||
return state;
|
||||
} catch (error) {
|
||||
console.error(
|
||||
"[ContextManager] Failed to save initial git state:",
|
||||
error
|
||||
);
|
||||
return { modifiedFiles: [], untrackedFiles: [] };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the initial git state saved before a feature started executing
|
||||
* @param {string} projectPath - Path to the project
|
||||
* @param {string} featureId - Feature ID
|
||||
* @returns {Promise<{modifiedFiles: string[], untrackedFiles: string[], timestamp: string} | null>}
|
||||
*/
|
||||
async getInitialGitState(projectPath, featureId) {
|
||||
if (!projectPath) return null;
|
||||
|
||||
try {
|
||||
const stateFile = path.join(
|
||||
projectPath,
|
||||
".automaker",
|
||||
"features",
|
||||
featureId,
|
||||
"git-state.json"
|
||||
);
|
||||
const content = await fs.readFile(stateFile, "utf-8");
|
||||
return JSON.parse(content);
|
||||
} catch (error) {
|
||||
console.log(
|
||||
`[ContextManager] No initial git state found for ${featureId}`
|
||||
);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete the git state file for a feature
|
||||
* @param {string} projectPath - Path to the project
|
||||
* @param {string} featureId - Feature ID
|
||||
*/
|
||||
async deleteGitStateFile(projectPath, featureId) {
|
||||
if (!projectPath) return;
|
||||
|
||||
try {
|
||||
const stateFile = path.join(
|
||||
projectPath,
|
||||
".automaker",
|
||||
"features",
|
||||
featureId,
|
||||
"git-state.json"
|
||||
);
|
||||
await fs.unlink(stateFile);
|
||||
console.log(`[ContextManager] Deleted git state file for ${featureId}`);
|
||||
} catch (error) {
|
||||
// File might not exist, which is fine
|
||||
if (error.code !== "ENOENT") {
|
||||
console.error(
|
||||
"[ContextManager] Failed to delete git state file:",
|
||||
error
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate which files were changed during the AI session
|
||||
* by comparing current git state with the saved initial state
|
||||
* @param {string} projectPath - Path to the project
|
||||
* @param {string} featureId - Feature ID
|
||||
* @returns {Promise<{newFiles: string[], modifiedFiles: string[]}>}
|
||||
*/
|
||||
async getFilesChangedDuringSession(projectPath, featureId) {
|
||||
if (!projectPath) return { newFiles: [], modifiedFiles: [] };
|
||||
|
||||
try {
|
||||
const { execSync } = require("child_process");
|
||||
|
||||
// Get initial state
|
||||
const initialState = await this.getInitialGitState(
|
||||
projectPath,
|
||||
featureId
|
||||
);
|
||||
|
||||
// Get current state
|
||||
let currentModified = [];
|
||||
try {
|
||||
const modifiedOutput = execSync("git diff --name-only HEAD", {
|
||||
cwd: projectPath,
|
||||
encoding: "utf-8",
|
||||
}).trim();
|
||||
if (modifiedOutput) {
|
||||
currentModified = modifiedOutput.split("\n").filter(Boolean);
|
||||
}
|
||||
} catch (error) {
|
||||
console.log("[ContextManager] No modified files or git error");
|
||||
}
|
||||
|
||||
let currentUntracked = [];
|
||||
try {
|
||||
const untrackedOutput = execSync(
|
||||
"git ls-files --others --exclude-standard",
|
||||
{
|
||||
cwd: projectPath,
|
||||
encoding: "utf-8",
|
||||
}
|
||||
).trim();
|
||||
if (untrackedOutput) {
|
||||
currentUntracked = untrackedOutput.split("\n").filter(Boolean);
|
||||
}
|
||||
} catch (error) {
|
||||
console.log("[ContextManager] Error getting untracked files");
|
||||
}
|
||||
|
||||
if (!initialState) {
|
||||
// No initial state - all current changes are considered from this session
|
||||
console.log(
|
||||
"[ContextManager] No initial state found, returning all current changes"
|
||||
);
|
||||
return {
|
||||
newFiles: currentUntracked,
|
||||
modifiedFiles: currentModified,
|
||||
};
|
||||
}
|
||||
|
||||
// Calculate files that are new since the session started
|
||||
const initialModifiedSet = new Set(initialState.modifiedFiles || []);
|
||||
const initialUntrackedSet = new Set(initialState.untrackedFiles || []);
|
||||
|
||||
// New files = current untracked - initial untracked
|
||||
const newFiles = currentUntracked.filter(
|
||||
(f) => !initialUntrackedSet.has(f)
|
||||
);
|
||||
|
||||
// Modified files = current modified - initial modified
|
||||
const modifiedFiles = currentModified.filter(
|
||||
(f) => !initialModifiedSet.has(f)
|
||||
);
|
||||
|
||||
console.log(
|
||||
`[ContextManager] Files changed during session for ${featureId}:`,
|
||||
{
|
||||
newFilesCount: newFiles.length,
|
||||
modifiedFilesCount: modifiedFiles.length,
|
||||
newFiles,
|
||||
modifiedFiles,
|
||||
}
|
||||
);
|
||||
|
||||
return { newFiles, modifiedFiles };
|
||||
} catch (error) {
|
||||
console.error(
|
||||
"[ContextManager] Failed to calculate changed files:",
|
||||
error
|
||||
);
|
||||
return { newFiles: [], modifiedFiles: [] };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = new ContextManager();
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,413 +0,0 @@
|
||||
const path = require("path");
|
||||
const fs = require("fs/promises");
|
||||
|
||||
/**
|
||||
* Feature Loader - Handles loading and managing features from individual feature folders
|
||||
* Each feature is stored in .automaker/features/{featureId}/feature.json
|
||||
*/
|
||||
class FeatureLoader {
|
||||
/**
|
||||
* Get the features directory path
|
||||
*/
|
||||
getFeaturesDir(projectPath) {
|
||||
return path.join(projectPath, ".automaker", "features");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the path to a specific feature folder
|
||||
*/
|
||||
getFeatureDir(projectPath, featureId) {
|
||||
return path.join(this.getFeaturesDir(projectPath), featureId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the path to a feature's feature.json file
|
||||
*/
|
||||
getFeatureJsonPath(projectPath, featureId) {
|
||||
return path.join(
|
||||
this.getFeatureDir(projectPath, featureId),
|
||||
"feature.json"
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the path to a feature's agent-output.md file
|
||||
*/
|
||||
getAgentOutputPath(projectPath, featureId) {
|
||||
return path.join(
|
||||
this.getFeatureDir(projectPath, featureId),
|
||||
"agent-output.md"
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a new feature ID
|
||||
*/
|
||||
generateFeatureId() {
|
||||
return `feature-${Date.now()}-${Math.random()
|
||||
.toString(36)
|
||||
.substring(2, 11)}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure all image paths for a feature are stored within the feature directory
|
||||
*/
|
||||
async ensureFeatureImages(projectPath, featureId, feature) {
|
||||
if (
|
||||
!feature ||
|
||||
!Array.isArray(feature.imagePaths) ||
|
||||
feature.imagePaths.length === 0
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
const featureDir = this.getFeatureDir(projectPath, featureId);
|
||||
const featureImagesDir = path.join(featureDir, "images");
|
||||
await fs.mkdir(featureImagesDir, { recursive: true });
|
||||
|
||||
const updatedImagePaths = [];
|
||||
|
||||
for (const entry of feature.imagePaths) {
|
||||
const isStringEntry = typeof entry === "string";
|
||||
const currentPathValue = isStringEntry ? entry : entry.path;
|
||||
|
||||
if (!currentPathValue) {
|
||||
updatedImagePaths.push(entry);
|
||||
continue;
|
||||
}
|
||||
|
||||
let resolvedCurrentPath = currentPathValue;
|
||||
if (!path.isAbsolute(resolvedCurrentPath)) {
|
||||
resolvedCurrentPath = path.join(projectPath, resolvedCurrentPath);
|
||||
}
|
||||
resolvedCurrentPath = path.normalize(resolvedCurrentPath);
|
||||
|
||||
// Skip if file doesn't exist
|
||||
try {
|
||||
await fs.access(resolvedCurrentPath);
|
||||
} catch {
|
||||
console.warn(
|
||||
`[FeatureLoader] Image file missing for ${featureId}: ${resolvedCurrentPath}`
|
||||
);
|
||||
updatedImagePaths.push(entry);
|
||||
continue;
|
||||
}
|
||||
|
||||
const relativeToFeatureImages = path.relative(
|
||||
featureImagesDir,
|
||||
resolvedCurrentPath
|
||||
);
|
||||
const alreadyInFeatureDir =
|
||||
relativeToFeatureImages === "" ||
|
||||
(!relativeToFeatureImages.startsWith("..") &&
|
||||
!path.isAbsolute(relativeToFeatureImages));
|
||||
|
||||
let finalPath = resolvedCurrentPath;
|
||||
|
||||
if (!alreadyInFeatureDir) {
|
||||
const originalName = path.basename(resolvedCurrentPath);
|
||||
let targetPath = path.join(featureImagesDir, originalName);
|
||||
|
||||
// Avoid overwriting files by appending a counter if needed
|
||||
let counter = 1;
|
||||
while (true) {
|
||||
try {
|
||||
await fs.access(targetPath);
|
||||
const parsed = path.parse(originalName);
|
||||
targetPath = path.join(
|
||||
featureImagesDir,
|
||||
`${parsed.name}-${counter}${parsed.ext}`
|
||||
);
|
||||
counter += 1;
|
||||
} catch {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
await fs.rename(resolvedCurrentPath, targetPath);
|
||||
finalPath = targetPath;
|
||||
} catch (error) {
|
||||
console.warn(
|
||||
`[FeatureLoader] Failed to move image ${resolvedCurrentPath}: ${error.message}`
|
||||
);
|
||||
updatedImagePaths.push(entry);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
updatedImagePaths.push(
|
||||
isStringEntry ? finalPath : { ...entry, path: finalPath }
|
||||
);
|
||||
}
|
||||
|
||||
feature.imagePaths = updatedImagePaths;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all features for a project
|
||||
*/
|
||||
async getAll(projectPath) {
|
||||
try {
|
||||
const featuresDir = this.getFeaturesDir(projectPath);
|
||||
|
||||
// Check if features directory exists
|
||||
try {
|
||||
await fs.access(featuresDir);
|
||||
} catch {
|
||||
// Directory doesn't exist, return empty array
|
||||
return [];
|
||||
}
|
||||
|
||||
// Read all feature directories
|
||||
const entries = await fs.readdir(featuresDir, { withFileTypes: true });
|
||||
const featureDirs = entries.filter((entry) => entry.isDirectory());
|
||||
|
||||
// Load each feature
|
||||
const features = [];
|
||||
for (const dir of featureDirs) {
|
||||
const featureId = dir.name;
|
||||
const featureJsonPath = this.getFeatureJsonPath(projectPath, featureId);
|
||||
|
||||
try {
|
||||
const content = await fs.readFile(featureJsonPath, "utf-8");
|
||||
const feature = JSON.parse(content);
|
||||
features.push(feature);
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`[FeatureLoader] Failed to load feature ${featureId}:`,
|
||||
error
|
||||
);
|
||||
// Continue loading other features
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by creation order (feature IDs contain timestamp)
|
||||
features.sort((a, b) => {
|
||||
const aTime = a.id ? parseInt(a.id.split("-")[1] || "0") : 0;
|
||||
const bTime = b.id ? parseInt(b.id.split("-")[1] || "0") : 0;
|
||||
return aTime - bTime;
|
||||
});
|
||||
|
||||
return features;
|
||||
} catch (error) {
|
||||
console.error("[FeatureLoader] Failed to get all features:", error);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a single feature by ID
|
||||
*/
|
||||
async get(projectPath, featureId) {
|
||||
try {
|
||||
const featureJsonPath = this.getFeatureJsonPath(projectPath, featureId);
|
||||
const content = await fs.readFile(featureJsonPath, "utf-8");
|
||||
return JSON.parse(content);
|
||||
} catch (error) {
|
||||
if (error.code === "ENOENT") {
|
||||
return null;
|
||||
}
|
||||
console.error(
|
||||
`[FeatureLoader] Failed to get feature ${featureId}:`,
|
||||
error
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new feature
|
||||
*/
|
||||
async create(projectPath, featureData) {
|
||||
const featureId = featureData.id || this.generateFeatureId();
|
||||
const featureDir = this.getFeatureDir(projectPath, featureId);
|
||||
const featureJsonPath = this.getFeatureJsonPath(projectPath, featureId);
|
||||
|
||||
// Ensure features directory exists
|
||||
const featuresDir = this.getFeaturesDir(projectPath);
|
||||
await fs.mkdir(featuresDir, { recursive: true });
|
||||
|
||||
// Create feature directory
|
||||
await fs.mkdir(featureDir, { recursive: true });
|
||||
|
||||
// Ensure feature has an ID
|
||||
const feature = { ...featureData, id: featureId };
|
||||
|
||||
// Move any uploaded images into the feature directory
|
||||
await this.ensureFeatureImages(projectPath, featureId, feature);
|
||||
|
||||
// Write feature.json
|
||||
await fs.writeFile(
|
||||
featureJsonPath,
|
||||
JSON.stringify(feature, null, 2),
|
||||
"utf-8"
|
||||
);
|
||||
|
||||
console.log(`[FeatureLoader] Created feature ${featureId}`);
|
||||
return feature;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update a feature (partial updates supported)
|
||||
*/
|
||||
async update(projectPath, featureId, updates) {
|
||||
try {
|
||||
const feature = await this.get(projectPath, featureId);
|
||||
if (!feature) {
|
||||
throw new Error(`Feature ${featureId} not found`);
|
||||
}
|
||||
|
||||
// Merge updates
|
||||
const updatedFeature = { ...feature, ...updates };
|
||||
|
||||
// Move any new images into the feature directory
|
||||
await this.ensureFeatureImages(projectPath, featureId, updatedFeature);
|
||||
|
||||
// Write back to file
|
||||
const featureJsonPath = this.getFeatureJsonPath(projectPath, featureId);
|
||||
await fs.writeFile(
|
||||
featureJsonPath,
|
||||
JSON.stringify(updatedFeature, null, 2),
|
||||
"utf-8"
|
||||
);
|
||||
|
||||
console.log(`[FeatureLoader] Updated feature ${featureId}`);
|
||||
return updatedFeature;
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`[FeatureLoader] Failed to update feature ${featureId}:`,
|
||||
error
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a feature and its entire folder
|
||||
*/
|
||||
async delete(projectPath, featureId) {
|
||||
try {
|
||||
const featureDir = this.getFeatureDir(projectPath, featureId);
|
||||
await fs.rm(featureDir, { recursive: true, force: true });
|
||||
console.log(`[FeatureLoader] Deleted feature ${featureId}`);
|
||||
} catch (error) {
|
||||
if (error.code === "ENOENT") {
|
||||
// Feature doesn't exist, that's fine
|
||||
return;
|
||||
}
|
||||
console.error(
|
||||
`[FeatureLoader] Failed to delete feature ${featureId}:`,
|
||||
error
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get agent output for a feature
|
||||
*/
|
||||
async getAgentOutput(projectPath, featureId) {
|
||||
try {
|
||||
const agentOutputPath = this.getAgentOutputPath(projectPath, featureId);
|
||||
const content = await fs.readFile(agentOutputPath, "utf-8");
|
||||
return content;
|
||||
} catch (error) {
|
||||
if (error.code === "ENOENT") {
|
||||
return null;
|
||||
}
|
||||
console.error(
|
||||
`[FeatureLoader] Failed to get agent output for ${featureId}:`,
|
||||
error
|
||||
);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Legacy methods for backward compatibility (used by backend services)
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Load all features for a project (legacy API)
|
||||
* Features are stored in .automaker/features/{id}/feature.json
|
||||
*/
|
||||
async loadFeatures(projectPath) {
|
||||
return await this.getAll(projectPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update feature status (legacy API)
|
||||
* Features are stored in .automaker/features/{id}/feature.json
|
||||
* @param {string} featureId - The ID of the feature to update
|
||||
* @param {string} status - The new status
|
||||
* @param {string} projectPath - Path to the project
|
||||
* @param {string} [summary] - Optional summary of what was done
|
||||
* @param {string} [error] - Optional error message if feature errored
|
||||
*/
|
||||
async updateFeatureStatus(featureId, status, projectPath, summary, error) {
|
||||
const updates = { status };
|
||||
if (summary !== undefined) {
|
||||
updates.summary = summary;
|
||||
}
|
||||
if (error !== undefined) {
|
||||
updates.error = error;
|
||||
} else {
|
||||
// Clear error if not provided
|
||||
const feature = await this.get(projectPath, featureId);
|
||||
if (feature && feature.error) {
|
||||
updates.error = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
await this.update(projectPath, featureId, updates);
|
||||
console.log(
|
||||
`[FeatureLoader] Updated feature ${featureId}: status=${status}${
|
||||
summary ? `, summary="${summary}"` : ""
|
||||
}`
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Select the next feature to implement
|
||||
* Prioritizes: earlier features in the list that are not verified or waiting_approval
|
||||
*/
|
||||
selectNextFeature(features) {
|
||||
// Find first feature that is in backlog or in_progress status
|
||||
// Skip verified and waiting_approval (which needs user input)
|
||||
return features.find(
|
||||
(f) => f.status !== "verified" && f.status !== "waiting_approval"
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update worktree info for a feature (legacy API)
|
||||
* Features are stored in .automaker/features/{id}/feature.json
|
||||
* @param {string} featureId - The ID of the feature to update
|
||||
* @param {string} projectPath - Path to the project
|
||||
* @param {string|null} worktreePath - Path to the worktree (null to clear)
|
||||
* @param {string|null} branchName - Name of the feature branch (null to clear)
|
||||
*/
|
||||
async updateFeatureWorktree(
|
||||
featureId,
|
||||
projectPath,
|
||||
worktreePath,
|
||||
branchName
|
||||
) {
|
||||
const updates = {};
|
||||
if (worktreePath) {
|
||||
updates.worktreePath = worktreePath;
|
||||
updates.branchName = branchName;
|
||||
} else {
|
||||
updates.worktreePath = null;
|
||||
updates.branchName = null;
|
||||
}
|
||||
|
||||
await this.update(projectPath, featureId, updates);
|
||||
console.log(
|
||||
`[FeatureLoader] Updated feature ${featureId}: worktreePath=${worktreePath}, branchName=${branchName}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = new FeatureLoader();
|
||||
@@ -1,269 +0,0 @@
|
||||
const { query, AbortError } = require("@anthropic-ai/claude-agent-sdk");
|
||||
const promptBuilder = require("./prompt-builder");
|
||||
|
||||
/**
|
||||
* Feature Suggestions Service - Analyzes project and generates feature suggestions
|
||||
*/
|
||||
class FeatureSuggestionsService {
|
||||
constructor() {
|
||||
this.runningAnalysis = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate feature suggestions by analyzing the project
|
||||
*/
|
||||
async generateSuggestions(projectPath, sendToRenderer, execution) {
|
||||
console.log(
|
||||
`[FeatureSuggestions] Generating suggestions for: ${projectPath}`
|
||||
);
|
||||
|
||||
try {
|
||||
const abortController = new AbortController();
|
||||
execution.abortController = abortController;
|
||||
|
||||
const options = {
|
||||
model: "claude-sonnet-4-20250514",
|
||||
systemPrompt: this.getSystemPrompt(),
|
||||
maxTurns: 50,
|
||||
cwd: projectPath,
|
||||
allowedTools: ["Read", "Glob", "Grep", "Bash"],
|
||||
permissionMode: "acceptEdits",
|
||||
sandbox: {
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
},
|
||||
abortController: abortController,
|
||||
};
|
||||
|
||||
const prompt = this.buildAnalysisPrompt();
|
||||
|
||||
sendToRenderer({
|
||||
type: "suggestions_progress",
|
||||
content: "Starting project analysis...\n",
|
||||
});
|
||||
|
||||
const currentQuery = query({ prompt, options });
|
||||
execution.query = currentQuery;
|
||||
|
||||
let fullResponse = "";
|
||||
for await (const msg of currentQuery) {
|
||||
if (!execution.isActive()) break;
|
||||
|
||||
if (msg.type === "assistant" && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === "text") {
|
||||
fullResponse += block.text;
|
||||
sendToRenderer({
|
||||
type: "suggestions_progress",
|
||||
content: block.text,
|
||||
});
|
||||
} else if (block.type === "tool_use") {
|
||||
sendToRenderer({
|
||||
type: "suggestions_tool",
|
||||
tool: block.name,
|
||||
input: block.input,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
execution.query = null;
|
||||
execution.abortController = null;
|
||||
|
||||
// Parse the suggestions from the response
|
||||
const suggestions = this.parseSuggestions(fullResponse);
|
||||
|
||||
sendToRenderer({
|
||||
type: "suggestions_complete",
|
||||
suggestions: suggestions,
|
||||
});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
suggestions: suggestions,
|
||||
};
|
||||
} catch (error) {
|
||||
if (error instanceof AbortError || error?.name === "AbortError") {
|
||||
console.log("[FeatureSuggestions] Analysis aborted");
|
||||
if (execution) {
|
||||
execution.abortController = null;
|
||||
execution.query = null;
|
||||
}
|
||||
return {
|
||||
success: false,
|
||||
message: "Analysis aborted",
|
||||
suggestions: [],
|
||||
};
|
||||
}
|
||||
|
||||
console.error(
|
||||
"[FeatureSuggestions] Error generating suggestions:",
|
||||
error
|
||||
);
|
||||
if (execution) {
|
||||
execution.abortController = null;
|
||||
execution.query = null;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse suggestions from the LLM response
|
||||
* Looks for JSON array in the response
|
||||
*/
|
||||
parseSuggestions(response) {
|
||||
try {
|
||||
// Try to find JSON array in the response
|
||||
// Look for ```json ... ``` blocks first
|
||||
const jsonBlockMatch = response.match(/```json\s*([\s\S]*?)```/);
|
||||
if (jsonBlockMatch) {
|
||||
const parsed = JSON.parse(jsonBlockMatch[1].trim());
|
||||
if (Array.isArray(parsed)) {
|
||||
return this.validateSuggestions(parsed);
|
||||
}
|
||||
}
|
||||
|
||||
// Try to find a raw JSON array
|
||||
const jsonArrayMatch = response.match(/\[\s*\{[\s\S]*\}\s*\]/);
|
||||
if (jsonArrayMatch) {
|
||||
const parsed = JSON.parse(jsonArrayMatch[0]);
|
||||
if (Array.isArray(parsed)) {
|
||||
return this.validateSuggestions(parsed);
|
||||
}
|
||||
}
|
||||
|
||||
console.warn(
|
||||
"[FeatureSuggestions] Could not parse suggestions from response"
|
||||
);
|
||||
return [];
|
||||
} catch (error) {
|
||||
console.error("[FeatureSuggestions] Error parsing suggestions:", error);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate and normalize suggestions
|
||||
*/
|
||||
validateSuggestions(suggestions) {
|
||||
return suggestions
|
||||
.filter((s) => s && typeof s === "object")
|
||||
.map((s, index) => ({
|
||||
id: `suggestion-${Date.now()}-${index}`,
|
||||
category: s.category || "Uncategorized",
|
||||
description: s.description || s.title || "No description",
|
||||
steps: Array.isArray(s.steps) ? s.steps : [],
|
||||
priority: typeof s.priority === "number" ? s.priority : index + 1,
|
||||
reasoning: s.reasoning || "",
|
||||
}))
|
||||
.sort((a, b) => a.priority - b.priority);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the system prompt for feature suggestion analysis
|
||||
*/
|
||||
getSystemPrompt() {
|
||||
return `You are an expert software architect and product manager. Your job is to analyze a codebase and suggest missing features that would improve the application.
|
||||
|
||||
You should:
|
||||
1. Thoroughly analyze the project structure, code, and any existing documentation
|
||||
2. Identify what the application does and what features it currently has (look at the .automaker/app_spec.txt file as well if it exists)
|
||||
3. Generate a comprehensive list of missing features that would be valuable to users
|
||||
4. Prioritize features by impact and complexity
|
||||
5. Provide clear, actionable descriptions and implementation steps
|
||||
|
||||
When analyzing, look at:
|
||||
- README files and documentation
|
||||
- Package.json, cargo.toml, or similar config files for tech stack
|
||||
- Source code structure and organization
|
||||
- Existing features and their implementation patterns
|
||||
- Common patterns in similar applications
|
||||
- User experience improvements
|
||||
- Developer experience improvements
|
||||
- Performance optimizations
|
||||
- Security enhancements
|
||||
|
||||
You have access to file reading and search tools. Use them to understand the codebase.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the prompt for analyzing the project
|
||||
*/
|
||||
buildAnalysisPrompt() {
|
||||
return `Analyze this project and generate a list of suggested features that are missing or would improve the application.
|
||||
|
||||
**Your Task:**
|
||||
|
||||
1. First, explore the project structure:
|
||||
- Read README.md, package.json, or similar config files
|
||||
- Scan the source code directory structure
|
||||
- Identify the tech stack and frameworks used
|
||||
- Look at existing features and how they're implemented
|
||||
|
||||
2. Identify what the application does:
|
||||
- What is the main purpose?
|
||||
- What features are already implemented?
|
||||
- What patterns and conventions are used?
|
||||
|
||||
3. Generate feature suggestions:
|
||||
- Think about what's missing compared to similar applications
|
||||
- Consider user experience improvements
|
||||
- Consider developer experience improvements
|
||||
- Think about performance, security, and reliability
|
||||
- Consider testing and documentation improvements
|
||||
|
||||
4. **CRITICAL: Output your suggestions as a JSON array** at the end of your response, formatted like this:
|
||||
|
||||
\`\`\`json
|
||||
[
|
||||
{
|
||||
"category": "User Experience",
|
||||
"description": "Add dark mode support with system preference detection",
|
||||
"steps": [
|
||||
"Create a ThemeProvider context to manage theme state",
|
||||
"Add a toggle component in the settings or header",
|
||||
"Implement CSS variables for theme colors",
|
||||
"Add localStorage persistence for user preference"
|
||||
],
|
||||
"priority": 1,
|
||||
"reasoning": "Dark mode is a standard feature that improves accessibility and user comfort"
|
||||
},
|
||||
{
|
||||
"category": "Performance",
|
||||
"description": "Implement lazy loading for heavy components",
|
||||
"steps": [
|
||||
"Identify components that are heavy or rarely used",
|
||||
"Use React.lazy() and Suspense for code splitting",
|
||||
"Add loading states for lazy-loaded components"
|
||||
],
|
||||
"priority": 2,
|
||||
"reasoning": "Improves initial load time and reduces bundle size"
|
||||
}
|
||||
]
|
||||
\`\`\`
|
||||
|
||||
**Important Guidelines:**
|
||||
- Generate at least 10-20 feature suggestions
|
||||
- Order them by priority (1 = highest priority)
|
||||
- Each feature should have clear, actionable steps
|
||||
- Categories should be meaningful (e.g., "User Experience", "Performance", "Security", "Testing", "Documentation", "Developer Experience", "Accessibility", etc.)
|
||||
- Be specific about what files might need to be created or modified
|
||||
- Consider the existing tech stack and patterns when suggesting implementation steps
|
||||
|
||||
Begin by exploring the project structure.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop the current analysis
|
||||
*/
|
||||
stop() {
|
||||
if (this.runningAnalysis && this.runningAnalysis.abortController) {
|
||||
this.runningAnalysis.abortController.abort();
|
||||
}
|
||||
this.runningAnalysis = null;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = new FeatureSuggestionsService();
|
||||
@@ -1,185 +0,0 @@
|
||||
const { query, AbortError } = require("@anthropic-ai/claude-agent-sdk");
|
||||
const promptBuilder = require("./prompt-builder");
|
||||
const contextManager = require("./context-manager");
|
||||
const featureLoader = require("./feature-loader");
|
||||
const mcpServerFactory = require("./mcp-server-factory");
|
||||
|
||||
/**
|
||||
* Feature Verifier - Handles feature verification by running tests
|
||||
*/
|
||||
class FeatureVerifier {
|
||||
/**
|
||||
* Verify feature tests (runs tests and checks if they pass)
|
||||
*/
|
||||
async verifyFeatureTests(feature, projectPath, sendToRenderer, execution) {
|
||||
console.log(
|
||||
`[FeatureVerifier] Verifying tests for: ${feature.description}`
|
||||
);
|
||||
|
||||
try {
|
||||
const verifyMsg = `\n✅ Verifying tests for: ${feature.description}\n`;
|
||||
await contextManager.writeToContextFile(
|
||||
projectPath,
|
||||
feature.id,
|
||||
verifyMsg
|
||||
);
|
||||
|
||||
sendToRenderer({
|
||||
type: "auto_mode_phase",
|
||||
featureId: feature.id,
|
||||
phase: "verification",
|
||||
message: `Verifying tests for: ${feature.description}`,
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
execution.abortController = abortController;
|
||||
|
||||
// Create custom MCP server with UpdateFeatureStatus tool
|
||||
const featureToolsServer = mcpServerFactory.createFeatureToolsServer(
|
||||
featureLoader.updateFeatureStatus.bind(featureLoader),
|
||||
projectPath
|
||||
);
|
||||
|
||||
const options = {
|
||||
model: "claude-opus-4-5-20251101",
|
||||
systemPrompt: await promptBuilder.getVerificationPrompt(projectPath),
|
||||
maxTurns: 1000,
|
||||
cwd: projectPath,
|
||||
mcpServers: {
|
||||
"automaker-tools": featureToolsServer,
|
||||
},
|
||||
allowedTools: [
|
||||
"Read",
|
||||
"Write",
|
||||
"Edit",
|
||||
"Glob",
|
||||
"Grep",
|
||||
"Bash",
|
||||
"mcp__automaker-tools__UpdateFeatureStatus",
|
||||
],
|
||||
permissionMode: "acceptEdits",
|
||||
sandbox: {
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
},
|
||||
abortController: abortController,
|
||||
};
|
||||
|
||||
const prompt = await promptBuilder.buildVerificationPrompt(
|
||||
feature,
|
||||
projectPath
|
||||
);
|
||||
|
||||
const runningTestsMsg =
|
||||
"Running Playwright tests to verify feature implementation...\n";
|
||||
await contextManager.writeToContextFile(
|
||||
projectPath,
|
||||
feature.id,
|
||||
runningTestsMsg
|
||||
);
|
||||
|
||||
sendToRenderer({
|
||||
type: "auto_mode_progress",
|
||||
featureId: feature.id,
|
||||
content: runningTestsMsg,
|
||||
});
|
||||
|
||||
const currentQuery = query({ prompt, options });
|
||||
execution.query = currentQuery;
|
||||
|
||||
let responseText = "";
|
||||
for await (const msg of currentQuery) {
|
||||
// Check if this specific feature was aborted
|
||||
if (!execution.isActive()) break;
|
||||
|
||||
if (msg.type === "assistant" && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === "text") {
|
||||
responseText += block.text;
|
||||
|
||||
await contextManager.writeToContextFile(
|
||||
projectPath,
|
||||
feature.id,
|
||||
block.text
|
||||
);
|
||||
|
||||
sendToRenderer({
|
||||
type: "auto_mode_progress",
|
||||
featureId: feature.id,
|
||||
content: block.text,
|
||||
});
|
||||
} else if (block.type === "tool_use") {
|
||||
const toolMsg = `\n🔧 Tool: ${block.name}\n`;
|
||||
await contextManager.writeToContextFile(
|
||||
projectPath,
|
||||
feature.id,
|
||||
toolMsg
|
||||
);
|
||||
|
||||
sendToRenderer({
|
||||
type: "auto_mode_tool",
|
||||
featureId: feature.id,
|
||||
tool: block.name,
|
||||
input: block.input,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
execution.query = null;
|
||||
execution.abortController = null;
|
||||
|
||||
// Re-load features to check if it was marked as verified or waiting_approval (for skipTests)
|
||||
const updatedFeatures = await featureLoader.loadFeatures(projectPath);
|
||||
const updatedFeature = updatedFeatures.find((f) => f.id === feature.id);
|
||||
// For skipTests features, waiting_approval is also considered a success
|
||||
const passes =
|
||||
updatedFeature?.status === "verified" ||
|
||||
(updatedFeature?.skipTests &&
|
||||
updatedFeature?.status === "waiting_approval");
|
||||
|
||||
const finalMsg = passes
|
||||
? "✓ Verification successful: All tests passed\n"
|
||||
: "✗ Tests failed or not all passing - feature remains in progress\n";
|
||||
|
||||
await contextManager.writeToContextFile(
|
||||
projectPath,
|
||||
feature.id,
|
||||
finalMsg
|
||||
);
|
||||
|
||||
sendToRenderer({
|
||||
type: "auto_mode_progress",
|
||||
featureId: feature.id,
|
||||
content: finalMsg,
|
||||
});
|
||||
|
||||
return {
|
||||
passes,
|
||||
message: responseText.substring(0, 500),
|
||||
};
|
||||
} catch (error) {
|
||||
if (error instanceof AbortError || error?.name === "AbortError") {
|
||||
console.log("[FeatureVerifier] Verification aborted");
|
||||
if (execution) {
|
||||
execution.abortController = null;
|
||||
execution.query = null;
|
||||
}
|
||||
return {
|
||||
passes: false,
|
||||
message: "Verification aborted",
|
||||
};
|
||||
}
|
||||
|
||||
console.error("[FeatureVerifier] Error verifying feature:", error);
|
||||
if (execution) {
|
||||
execution.abortController = null;
|
||||
execution.query = null;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = new FeatureVerifier();
|
||||
@@ -1,76 +0,0 @@
|
||||
const { createSdkMcpServer, tool } = require("@anthropic-ai/claude-agent-sdk");
|
||||
const { z } = require("zod");
|
||||
const featureLoader = require("./feature-loader");
|
||||
|
||||
/**
|
||||
* MCP Server Factory - Creates custom MCP servers with tools
|
||||
*/
|
||||
class McpServerFactory {
|
||||
/**
|
||||
* Create a custom MCP server with the UpdateFeatureStatus tool
|
||||
* This tool allows Claude Code to safely update feature status without
|
||||
* directly modifying feature files, preventing race conditions
|
||||
* and accidental state corruption.
|
||||
*/
|
||||
createFeatureToolsServer(updateFeatureStatusCallback, projectPath) {
|
||||
return createSdkMcpServer({
|
||||
name: "automaker-tools",
|
||||
version: "1.0.0",
|
||||
tools: [
|
||||
tool(
|
||||
"UpdateFeatureStatus",
|
||||
"Update the status of a feature. Use this tool instead of directly modifying feature files to safely update feature status. IMPORTANT: If the feature has skipTests=true, you should NOT mark it as verified - instead it will automatically go to waiting_approval status for manual review. Always include a summary of what was done.",
|
||||
{
|
||||
featureId: z.string().describe("The ID of the feature to update"),
|
||||
status: z.enum(["backlog", "in_progress", "verified"]).describe("The new status for the feature. Note: If skipTests=true, verified will be converted to waiting_approval automatically."),
|
||||
summary: z.string().optional().describe("A brief summary of what was implemented/changed. This will be displayed on the Kanban card. Example: 'Added dark mode toggle. Modified: settings.tsx, theme-provider.tsx'")
|
||||
},
|
||||
async (args) => {
|
||||
try {
|
||||
console.log(`[McpServerFactory] UpdateFeatureStatus tool called: featureId=${args.featureId}, status=${args.status}, summary=${args.summary || "(none)"}`);
|
||||
|
||||
// Load the feature to check skipTests flag
|
||||
const features = await featureLoader.loadFeatures(projectPath);
|
||||
const feature = features.find((f) => f.id === args.featureId);
|
||||
|
||||
if (!feature) {
|
||||
throw new Error(`Feature ${args.featureId} not found`);
|
||||
}
|
||||
|
||||
// If agent tries to mark as verified but feature has skipTests=true, convert to waiting_approval
|
||||
let finalStatus = args.status;
|
||||
if (args.status === "verified" && feature.skipTests === true) {
|
||||
console.log(`[McpServerFactory] Feature ${args.featureId} has skipTests=true, converting verified -> waiting_approval`);
|
||||
finalStatus = "waiting_approval";
|
||||
}
|
||||
|
||||
// Call the provided callback to update feature status with summary
|
||||
await updateFeatureStatusCallback(args.featureId, finalStatus, projectPath, args.summary);
|
||||
|
||||
const statusMessage = finalStatus !== args.status
|
||||
? `Successfully updated feature ${args.featureId} to status "${finalStatus}" (converted from "${args.status}" because skipTests=true)${args.summary ? ` with summary: "${args.summary}"` : ""}`
|
||||
: `Successfully updated feature ${args.featureId} to status "${finalStatus}"${args.summary ? ` with summary: "${args.summary}"` : ""}`;
|
||||
|
||||
return {
|
||||
content: [{
|
||||
type: "text",
|
||||
text: statusMessage
|
||||
}]
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("[McpServerFactory] UpdateFeatureStatus tool error:", error);
|
||||
return {
|
||||
content: [{
|
||||
type: "text",
|
||||
text: `Failed to update feature status: ${error.message}`
|
||||
}]
|
||||
};
|
||||
}
|
||||
}
|
||||
)
|
||||
]
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = new McpServerFactory();
|
||||
@@ -1,349 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Standalone STDIO MCP Server for Automaker Tools
|
||||
*
|
||||
* This script runs as a standalone process and communicates via JSON-RPC 2.0
|
||||
* over stdin/stdout. It implements the MCP protocol to expose the UpdateFeatureStatus
|
||||
* tool to Codex CLI.
|
||||
*
|
||||
* Environment variables:
|
||||
* - AUTOMAKER_PROJECT_PATH: Path to the project directory
|
||||
* - AUTOMAKER_IPC_CHANNEL: IPC channel name for callback communication (optional, uses default)
|
||||
*/
|
||||
|
||||
const readline = require('readline');
|
||||
const path = require('path');
|
||||
|
||||
// Redirect all console.log output to stderr to avoid polluting MCP stdout
|
||||
const originalConsoleLog = console.log;
|
||||
console.log = (...args) => {
|
||||
console.error(...args);
|
||||
};
|
||||
|
||||
// Set up readline interface for line-by-line JSON-RPC input
|
||||
// IMPORTANT: Use a separate output stream for readline to avoid interfering with JSON-RPC stdout
|
||||
// We'll write JSON-RPC responses directly to stdout, not through readline
|
||||
const rl = readline.createInterface({
|
||||
input: process.stdin,
|
||||
output: null, // Don't use stdout for readline output
|
||||
terminal: false
|
||||
});
|
||||
|
||||
let initialized = false;
|
||||
let projectPath = null;
|
||||
let ipcChannel = null;
|
||||
|
||||
// Get configuration from environment
|
||||
projectPath = process.env.AUTOMAKER_PROJECT_PATH || process.cwd();
|
||||
ipcChannel = process.env.AUTOMAKER_IPC_CHANNEL || 'mcp:update-feature-status';
|
||||
|
||||
// Load dependencies (these will be available in the Electron app context)
|
||||
let featureLoader;
|
||||
let electron;
|
||||
|
||||
// Try to load Electron IPC if available (when running from Electron app)
|
||||
try {
|
||||
// In Electron, we can use IPC directly
|
||||
if (typeof require !== 'undefined') {
|
||||
// Check if we're in Electron context
|
||||
const electronModule = require('electron');
|
||||
if (electronModule && electronModule.ipcMain) {
|
||||
electron = electronModule;
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
// Not in Electron context, will use alternative method
|
||||
}
|
||||
|
||||
// Load feature loader
|
||||
// Try multiple paths since this script might be run from different contexts
|
||||
try {
|
||||
// First try relative path (when run from electron/services/)
|
||||
featureLoader = require('./feature-loader');
|
||||
} catch (e) {
|
||||
try {
|
||||
// Try absolute path resolution
|
||||
const featureLoaderPath = path.resolve(__dirname, 'feature-loader.js');
|
||||
delete require.cache[require.resolve(featureLoaderPath)];
|
||||
featureLoader = require(featureLoaderPath);
|
||||
} catch (e2) {
|
||||
// If still fails, try from parent directory
|
||||
try {
|
||||
featureLoader = require(path.join(__dirname, '..', 'services', 'feature-loader'));
|
||||
} catch (e3) {
|
||||
console.error('[McpServerStdio] Error loading feature-loader:', e3.message);
|
||||
console.error('[McpServerStdio] Tried paths:', [
|
||||
'./feature-loader',
|
||||
path.resolve(__dirname, 'feature-loader.js'),
|
||||
path.join(__dirname, '..', 'services', 'feature-loader')
|
||||
]);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Send JSON-RPC response
|
||||
* CRITICAL: Must write directly to stdout, not via console.log
|
||||
* MCP protocol requires ONLY JSON-RPC messages on stdout
|
||||
*/
|
||||
function sendResponse(id, result, error = null) {
|
||||
const response = {
|
||||
jsonrpc: '2.0',
|
||||
id
|
||||
};
|
||||
|
||||
if (error) {
|
||||
response.error = error;
|
||||
} else {
|
||||
response.result = result;
|
||||
}
|
||||
|
||||
// Write directly to stdout with newline (MCP uses line-delimited JSON)
|
||||
process.stdout.write(JSON.stringify(response) + '\n');
|
||||
}
|
||||
|
||||
/**
|
||||
* Send JSON-RPC notification
|
||||
* CRITICAL: Must write directly to stdout, not via console.log
|
||||
*/
|
||||
function sendNotification(method, params) {
|
||||
const notification = {
|
||||
jsonrpc: '2.0',
|
||||
method,
|
||||
params
|
||||
};
|
||||
|
||||
// Write directly to stdout with newline (MCP uses line-delimited JSON)
|
||||
process.stdout.write(JSON.stringify(notification) + '\n');
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle MCP initialize request
|
||||
*/
|
||||
async function handleInitialize(params, id) {
|
||||
initialized = true;
|
||||
|
||||
sendResponse(id, {
|
||||
protocolVersion: '2024-11-05',
|
||||
capabilities: {
|
||||
tools: {}
|
||||
},
|
||||
serverInfo: {
|
||||
name: 'automaker-tools',
|
||||
version: '1.0.0'
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle tools/list request
|
||||
*/
|
||||
async function handleToolsList(params, id) {
|
||||
sendResponse(id, {
|
||||
tools: [
|
||||
{
|
||||
name: 'UpdateFeatureStatus',
|
||||
description: 'Update the status of a feature. Use this tool instead of directly modifying feature files to safely update feature status. IMPORTANT: If the feature has skipTests=true, you should NOT mark it as verified - instead it will automatically go to waiting_approval status for manual review. Always include a summary of what was done.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
featureId: {
|
||||
type: 'string',
|
||||
description: 'The ID of the feature to update'
|
||||
},
|
||||
status: {
|
||||
type: 'string',
|
||||
enum: ['backlog', 'in_progress', 'verified'],
|
||||
description: 'The new status for the feature. Note: If skipTests=true, verified will be converted to waiting_approval automatically.'
|
||||
},
|
||||
summary: {
|
||||
type: 'string',
|
||||
description: 'A brief summary of what was implemented/changed. This will be displayed on the Kanban card. Example: "Added dark mode toggle. Modified: settings.tsx, theme-provider.tsx"'
|
||||
}
|
||||
},
|
||||
required: ['featureId', 'status']
|
||||
}
|
||||
}
|
||||
]
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle tools/call request
|
||||
*/
|
||||
async function handleToolsCall(params, id) {
|
||||
const { name, arguments: args } = params;
|
||||
|
||||
if (name !== 'UpdateFeatureStatus') {
|
||||
sendResponse(id, null, {
|
||||
code: -32601,
|
||||
message: `Unknown tool: ${name}`
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const { featureId, status, summary } = args;
|
||||
|
||||
if (!featureId || !status) {
|
||||
sendResponse(id, null, {
|
||||
code: -32602,
|
||||
message: 'Missing required parameters: featureId and status are required'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Load the feature to check skipTests flag
|
||||
const features = await featureLoader.loadFeatures(projectPath);
|
||||
const feature = features.find((f) => f.id === featureId);
|
||||
|
||||
if (!feature) {
|
||||
sendResponse(id, null, {
|
||||
code: -32602,
|
||||
message: `Feature ${featureId} not found`
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// If agent tries to mark as verified but feature has skipTests=true, convert to waiting_approval
|
||||
let finalStatus = status;
|
||||
if (status === 'verified' && feature.skipTests === true) {
|
||||
finalStatus = 'waiting_approval';
|
||||
}
|
||||
|
||||
// Call the update callback via IPC or direct call
|
||||
// Since we're in a separate process, we need to use IPC to communicate back
|
||||
// For now, we'll call the feature loader directly since it has the update method
|
||||
await featureLoader.updateFeatureStatus(featureId, finalStatus, projectPath, summary);
|
||||
|
||||
const statusMessage = finalStatus !== status
|
||||
? `Successfully updated feature ${featureId} to status "${finalStatus}" (converted from "${status}" because skipTests=true)${summary ? ` with summary: "${summary}"` : ''}`
|
||||
: `Successfully updated feature ${featureId} to status "${finalStatus}"${summary ? ` with summary: "${summary}"` : ''}`;
|
||||
|
||||
sendResponse(id, {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: statusMessage
|
||||
}
|
||||
]
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('[McpServerStdio] UpdateFeatureStatus error:', error);
|
||||
sendResponse(id, null, {
|
||||
code: -32603,
|
||||
message: `Failed to update feature status: ${error.message}`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle JSON-RPC request
|
||||
*/
|
||||
async function handleRequest(line) {
|
||||
let request;
|
||||
|
||||
try {
|
||||
request = JSON.parse(line);
|
||||
} catch (e) {
|
||||
sendResponse(null, null, {
|
||||
code: -32700,
|
||||
message: 'Parse error'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Validate JSON-RPC 2.0 structure
|
||||
if (request.jsonrpc !== '2.0') {
|
||||
sendResponse(request.id || null, null, {
|
||||
code: -32600,
|
||||
message: 'Invalid Request'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const { method, params, id } = request;
|
||||
|
||||
// Handle notifications (no id)
|
||||
if (id === undefined) {
|
||||
// Handle notifications if needed
|
||||
return;
|
||||
}
|
||||
|
||||
// Handle requests
|
||||
try {
|
||||
switch (method) {
|
||||
case 'initialize':
|
||||
await handleInitialize(params, id);
|
||||
break;
|
||||
|
||||
case 'tools/list':
|
||||
if (!initialized) {
|
||||
sendResponse(id, null, {
|
||||
code: -32002,
|
||||
message: 'Server not initialized'
|
||||
});
|
||||
return;
|
||||
}
|
||||
await handleToolsList(params, id);
|
||||
break;
|
||||
|
||||
case 'tools/call':
|
||||
if (!initialized) {
|
||||
sendResponse(id, null, {
|
||||
code: -32002,
|
||||
message: 'Server not initialized'
|
||||
});
|
||||
return;
|
||||
}
|
||||
await handleToolsCall(params, id);
|
||||
break;
|
||||
|
||||
default:
|
||||
sendResponse(id, null, {
|
||||
code: -32601,
|
||||
message: `Method not found: ${method}`
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('[McpServerStdio] Error handling request:', error);
|
||||
sendResponse(id, null, {
|
||||
code: -32603,
|
||||
message: `Internal error: ${error.message}`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Process stdin line by line
|
||||
rl.on('line', async (line) => {
|
||||
if (!line.trim()) {
|
||||
return;
|
||||
}
|
||||
|
||||
await handleRequest(line);
|
||||
});
|
||||
|
||||
// Handle errors
|
||||
rl.on('error', (error) => {
|
||||
console.error('[McpServerStdio] Readline error:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Handle process termination
|
||||
process.on('SIGTERM', () => {
|
||||
rl.close();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
process.on('SIGINT', () => {
|
||||
rl.close();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
// Log startup
|
||||
console.error('[McpServerStdio] Starting MCP server for automaker-tools');
|
||||
console.error(`[McpServerStdio] Project path: ${projectPath}`);
|
||||
console.error(`[McpServerStdio] IPC channel: ${ipcChannel}`);
|
||||
|
||||
|
||||
@@ -1,524 +0,0 @@
|
||||
/**
|
||||
* Model Provider Abstraction Layer
|
||||
*
|
||||
* This module provides an abstract interface for model providers (Claude, Codex, etc.)
|
||||
* allowing the application to use different AI models through a unified API.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Base class for model providers
|
||||
* Concrete implementations should extend this class
|
||||
*/
|
||||
class ModelProvider {
|
||||
constructor(config = {}) {
|
||||
this.config = config;
|
||||
this.name = 'base';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get provider name
|
||||
* @returns {string} Provider name
|
||||
*/
|
||||
getName() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a query with the model provider
|
||||
* @param {Object} options Query options
|
||||
* @param {string} options.prompt The prompt to send
|
||||
* @param {string} options.model The model to use
|
||||
* @param {string} options.systemPrompt System prompt
|
||||
* @param {string} options.cwd Working directory
|
||||
* @param {number} options.maxTurns Maximum turns
|
||||
* @param {string[]} options.allowedTools Allowed tools
|
||||
* @param {Object} options.mcpServers MCP servers configuration
|
||||
* @param {AbortController} options.abortController Abort controller
|
||||
* @param {Object} options.thinking Thinking configuration
|
||||
* @returns {AsyncGenerator} Async generator yielding messages
|
||||
*/
|
||||
async *executeQuery(options) {
|
||||
throw new Error('executeQuery must be implemented by subclass');
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect if this provider's CLI/SDK is installed
|
||||
* @returns {Promise<Object>} Installation status
|
||||
*/
|
||||
async detectInstallation() {
|
||||
throw new Error('detectInstallation must be implemented by subclass');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get list of available models for this provider
|
||||
* @returns {Array<Object>} Array of model definitions
|
||||
*/
|
||||
getAvailableModels() {
|
||||
throw new Error('getAvailableModels must be implemented by subclass');
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate provider configuration
|
||||
* @returns {Object} Validation result { valid: boolean, errors: string[] }
|
||||
*/
|
||||
validateConfig() {
|
||||
throw new Error('validateConfig must be implemented by subclass');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the full model string for a model key
|
||||
* @param {string} modelKey Short model key (e.g., 'opus', 'gpt-5.1-codex')
|
||||
* @returns {string} Full model string
|
||||
*/
|
||||
getModelString(modelKey) {
|
||||
throw new Error('getModelString must be implemented by subclass');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if provider supports a specific feature
|
||||
* @param {string} feature Feature name (e.g., 'thinking', 'tools', 'streaming')
|
||||
* @returns {boolean} Whether the feature is supported
|
||||
*/
|
||||
supportsFeature(feature) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Claude Provider - Uses Anthropic Claude Agent SDK
|
||||
*/
|
||||
class ClaudeProvider extends ModelProvider {
|
||||
constructor(config = {}) {
|
||||
super(config);
|
||||
this.name = 'claude';
|
||||
this.sdk = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to load credentials from the app's own credentials.json file.
|
||||
* This is where we store OAuth tokens and API keys that users enter in the setup wizard.
|
||||
* Returns { oauthToken, apiKey } or null values if not found.
|
||||
*/
|
||||
loadTokenFromAppCredentials() {
|
||||
try {
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { app } = require('electron');
|
||||
const credentialsPath = path.join(app.getPath('userData'), 'credentials.json');
|
||||
|
||||
if (!fs.existsSync(credentialsPath)) {
|
||||
console.log('[ClaudeProvider] App credentials file does not exist:', credentialsPath);
|
||||
return { oauthToken: null, apiKey: null };
|
||||
}
|
||||
|
||||
const raw = fs.readFileSync(credentialsPath, 'utf-8');
|
||||
const parsed = JSON.parse(raw);
|
||||
|
||||
// Check for OAuth token first (from claude setup-token), then API key
|
||||
const oauthToken = parsed.anthropic_oauth_token || null;
|
||||
const apiKey = parsed.anthropic || parsed.anthropic_api_key || null;
|
||||
|
||||
console.log('[ClaudeProvider] App credentials check - OAuth token:', !!oauthToken, ', API key:', !!apiKey);
|
||||
return { oauthToken, apiKey };
|
||||
} catch (err) {
|
||||
console.warn('[ClaudeProvider] Failed to read app credentials:', err?.message);
|
||||
return { oauthToken: null, apiKey: null };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to load a Claude OAuth token from the local CLI config (~/.claude/config.json).
|
||||
* Returns the token string or null if not found.
|
||||
* NOTE: Claude's credentials.json is encrypted, so we only try config.json
|
||||
*/
|
||||
loadTokenFromCliConfig() {
|
||||
try {
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const configPath = path.join(require('os').homedir(), '.claude', 'config.json');
|
||||
if (!fs.existsSync(configPath)) {
|
||||
return null;
|
||||
}
|
||||
const raw = fs.readFileSync(configPath, 'utf-8');
|
||||
const parsed = JSON.parse(raw);
|
||||
// CLI config stores token as oauth_token (newer) or token (older)
|
||||
return parsed.oauth_token || parsed.token || null;
|
||||
} catch (err) {
|
||||
console.warn('[ClaudeProvider] Failed to read CLI config token:', err?.message);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
ensureAuthEnv() {
|
||||
// If API key or token already present in environment, keep as-is.
|
||||
if (process.env.ANTHROPIC_API_KEY || process.env.CLAUDE_CODE_OAUTH_TOKEN) {
|
||||
console.log('[ClaudeProvider] Auth already present in environment');
|
||||
return true;
|
||||
}
|
||||
|
||||
// Priority 1: Try to load from app's own credentials (setup wizard)
|
||||
const appCredentials = this.loadTokenFromAppCredentials();
|
||||
if (appCredentials.oauthToken) {
|
||||
process.env.CLAUDE_CODE_OAUTH_TOKEN = appCredentials.oauthToken;
|
||||
console.log('[ClaudeProvider] Loaded CLAUDE_CODE_OAUTH_TOKEN from app credentials');
|
||||
return true;
|
||||
}
|
||||
if (appCredentials.apiKey) {
|
||||
process.env.ANTHROPIC_API_KEY = appCredentials.apiKey;
|
||||
console.log('[ClaudeProvider] Loaded ANTHROPIC_API_KEY from app credentials');
|
||||
return true;
|
||||
}
|
||||
|
||||
// Priority 2: Try to hydrate from CLI login config (legacy)
|
||||
const token = this.loadTokenFromCliConfig();
|
||||
if (token) {
|
||||
process.env.CLAUDE_CODE_OAUTH_TOKEN = token;
|
||||
console.log('[ClaudeProvider] Loaded CLAUDE_CODE_OAUTH_TOKEN from ~/.claude/config.json');
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if CLI is installed but not logged in
|
||||
try {
|
||||
const claudeCliDetector = require('./claude-cli-detector');
|
||||
const detection = claudeCliDetector.detectClaudeInstallation();
|
||||
if (detection.installed && detection.method === 'cli') {
|
||||
console.error('[ClaudeProvider] Claude CLI is installed but not authenticated. Use the setup wizard or set ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN environment variable.');
|
||||
} else {
|
||||
console.error('[ClaudeProvider] No Anthropic auth found. Use the setup wizard or set ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN.');
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('[ClaudeProvider] No Anthropic auth found. Use the setup wizard or set ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN.');
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Lazily load the Claude SDK
|
||||
*/
|
||||
loadSdk() {
|
||||
if (!this.sdk) {
|
||||
this.sdk = require('@anthropic-ai/claude-agent-sdk');
|
||||
}
|
||||
return this.sdk;
|
||||
}
|
||||
|
||||
async *executeQuery(options) {
|
||||
// Ensure we have auth; fall back to app credentials or CLI login token if available.
|
||||
if (!this.ensureAuthEnv()) {
|
||||
// Check if CLI is installed to provide better error message
|
||||
let msg = 'Missing Anthropic auth. Go to Settings > Setup to configure your Claude authentication.';
|
||||
try {
|
||||
const claudeCliDetector = require('./claude-cli-detector');
|
||||
const detection = claudeCliDetector.detectClaudeInstallation();
|
||||
if (detection.installed && detection.method === 'cli') {
|
||||
msg = 'Claude CLI is installed but not authenticated. Go to Settings > Setup to provide your subscription token (from `claude setup-token`) or API key.';
|
||||
} else {
|
||||
msg = 'Missing Anthropic auth. Go to Settings > Setup to configure your Claude authentication, or set ANTHROPIC_API_KEY environment variable.';
|
||||
}
|
||||
} catch (err) {
|
||||
// Fallback to default message
|
||||
}
|
||||
console.error(`[ClaudeProvider] ${msg}`);
|
||||
yield { type: 'error', error: msg };
|
||||
return;
|
||||
}
|
||||
|
||||
const { query } = this.loadSdk();
|
||||
|
||||
const sdkOptions = {
|
||||
model: options.model,
|
||||
systemPrompt: options.systemPrompt,
|
||||
maxTurns: options.maxTurns || 1000,
|
||||
cwd: options.cwd,
|
||||
mcpServers: options.mcpServers,
|
||||
allowedTools: options.allowedTools,
|
||||
permissionMode: options.permissionMode || 'acceptEdits',
|
||||
sandbox: options.sandbox,
|
||||
abortController: options.abortController,
|
||||
};
|
||||
|
||||
// Add thinking configuration if enabled
|
||||
if (options.thinking) {
|
||||
sdkOptions.thinking = options.thinking;
|
||||
}
|
||||
|
||||
const currentQuery = query({ prompt: options.prompt, options: sdkOptions });
|
||||
|
||||
for await (const msg of currentQuery) {
|
||||
yield msg;
|
||||
}
|
||||
}
|
||||
|
||||
async detectInstallation() {
|
||||
const claudeCliDetector = require('./claude-cli-detector');
|
||||
return claudeCliDetector.getInstallationInfo();
|
||||
}
|
||||
|
||||
getAvailableModels() {
|
||||
return [
|
||||
{
|
||||
id: 'haiku',
|
||||
name: 'Claude Haiku',
|
||||
modelString: 'claude-haiku-4-5',
|
||||
provider: 'claude',
|
||||
description: 'Fast and efficient for simple tasks',
|
||||
tier: 'basic'
|
||||
},
|
||||
{
|
||||
id: 'sonnet',
|
||||
name: 'Claude Sonnet',
|
||||
modelString: 'claude-sonnet-4-20250514',
|
||||
provider: 'claude',
|
||||
description: 'Balanced performance and capabilities',
|
||||
tier: 'standard'
|
||||
},
|
||||
{
|
||||
id: 'opus',
|
||||
name: 'Claude Opus 4.5',
|
||||
modelString: 'claude-opus-4-5-20251101',
|
||||
provider: 'claude',
|
||||
description: 'Most capable model for complex tasks',
|
||||
tier: 'premium'
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
validateConfig() {
|
||||
const errors = [];
|
||||
|
||||
// Ensure auth is available (try to auto-load from app credentials or CLI config)
|
||||
this.ensureAuthEnv();
|
||||
|
||||
if (!process.env.CLAUDE_CODE_OAUTH_TOKEN && !process.env.ANTHROPIC_API_KEY) {
|
||||
errors.push('No Claude authentication found. Go to Settings > Setup to configure your subscription token or API key.');
|
||||
}
|
||||
|
||||
return {
|
||||
valid: errors.length === 0,
|
||||
errors
|
||||
};
|
||||
}
|
||||
|
||||
getModelString(modelKey) {
|
||||
const modelMap = {
|
||||
haiku: 'claude-haiku-4-5',
|
||||
sonnet: 'claude-sonnet-4-20250514',
|
||||
opus: 'claude-opus-4-5-20251101'
|
||||
};
|
||||
return modelMap[modelKey] || modelMap.opus;
|
||||
}
|
||||
|
||||
supportsFeature(feature) {
|
||||
const supportedFeatures = ['thinking', 'tools', 'streaming', 'mcp'];
|
||||
return supportedFeatures.includes(feature);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Codex Provider - Uses OpenAI Codex CLI
|
||||
*/
|
||||
class CodexProvider extends ModelProvider {
|
||||
constructor(config = {}) {
|
||||
super(config);
|
||||
this.name = 'codex';
|
||||
}
|
||||
|
||||
async *executeQuery(options) {
|
||||
const codexExecutor = require('./codex-executor');
|
||||
|
||||
// Validate that we're not receiving a Claude model string
|
||||
if (options.model && options.model.startsWith('claude-')) {
|
||||
const errorMsg = `Codex provider cannot use Claude model '${options.model}'. Codex only supports OpenAI models (gpt-5.1-codex-max, gpt-5.1-codex, gpt-5.1-codex-mini, gpt-5.1).`;
|
||||
console.error(`[CodexProvider] ${errorMsg}`);
|
||||
yield {
|
||||
type: 'error',
|
||||
error: errorMsg
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
const executeOptions = {
|
||||
prompt: options.prompt,
|
||||
model: options.model,
|
||||
cwd: options.cwd,
|
||||
systemPrompt: options.systemPrompt,
|
||||
maxTurns: options.maxTurns || 20,
|
||||
allowedTools: options.allowedTools,
|
||||
mcpServers: options.mcpServers, // Pass MCP servers config to executor
|
||||
env: {
|
||||
...process.env,
|
||||
OPENAI_API_KEY: process.env.OPENAI_API_KEY
|
||||
}
|
||||
};
|
||||
|
||||
// Execute and yield results
|
||||
const generator = codexExecutor.execute(executeOptions);
|
||||
for await (const msg of generator) {
|
||||
yield msg;
|
||||
}
|
||||
}
|
||||
|
||||
async detectInstallation() {
|
||||
const codexCliDetector = require('./codex-cli-detector');
|
||||
return codexCliDetector.getInstallationInfo();
|
||||
}
|
||||
|
||||
getAvailableModels() {
|
||||
return [
|
||||
{
|
||||
id: 'gpt-5.1-codex-max',
|
||||
name: 'GPT-5.1 Codex Max',
|
||||
modelString: 'gpt-5.1-codex-max',
|
||||
provider: 'codex',
|
||||
description: 'Latest flagship - deep and fast reasoning for coding',
|
||||
tier: 'premium',
|
||||
default: true
|
||||
},
|
||||
{
|
||||
id: 'gpt-5.1-codex',
|
||||
name: 'GPT-5.1 Codex',
|
||||
modelString: 'gpt-5.1-codex',
|
||||
provider: 'codex',
|
||||
description: 'Optimized for code generation',
|
||||
tier: 'standard'
|
||||
},
|
||||
{
|
||||
id: 'gpt-5.1-codex-mini',
|
||||
name: 'GPT-5.1 Codex Mini',
|
||||
modelString: 'gpt-5.1-codex-mini',
|
||||
provider: 'codex',
|
||||
description: 'Faster and cheaper option',
|
||||
tier: 'basic'
|
||||
},
|
||||
{
|
||||
id: 'gpt-5.1',
|
||||
name: 'GPT-5.1',
|
||||
modelString: 'gpt-5.1',
|
||||
provider: 'codex',
|
||||
description: 'Broad world knowledge with strong reasoning',
|
||||
tier: 'standard'
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
validateConfig() {
|
||||
const errors = [];
|
||||
const codexCliDetector = require('./codex-cli-detector');
|
||||
const installation = codexCliDetector.detectCodexInstallation();
|
||||
|
||||
if (!installation.installed && !process.env.OPENAI_API_KEY) {
|
||||
errors.push('Codex CLI not installed and no OPENAI_API_KEY found.');
|
||||
}
|
||||
|
||||
return {
|
||||
valid: errors.length === 0,
|
||||
errors
|
||||
};
|
||||
}
|
||||
|
||||
getModelString(modelKey) {
|
||||
// Codex models use the key directly as the model string
|
||||
const modelMap = {
|
||||
'gpt-5.1-codex-max': 'gpt-5.1-codex-max',
|
||||
'gpt-5.1-codex': 'gpt-5.1-codex',
|
||||
'gpt-5.1-codex-mini': 'gpt-5.1-codex-mini',
|
||||
'gpt-5.1': 'gpt-5.1'
|
||||
};
|
||||
return modelMap[modelKey] || 'gpt-5.1-codex-max';
|
||||
}
|
||||
|
||||
supportsFeature(feature) {
|
||||
const supportedFeatures = ['tools', 'streaming'];
|
||||
return supportedFeatures.includes(feature);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Model Provider Factory
|
||||
* Creates the appropriate provider based on model or provider name
|
||||
*/
|
||||
class ModelProviderFactory {
|
||||
static providers = {
|
||||
claude: ClaudeProvider,
|
||||
codex: CodexProvider
|
||||
};
|
||||
|
||||
/**
|
||||
* Get provider for a specific model
|
||||
* @param {string} modelId Model ID (e.g., 'opus', 'gpt-5.1-codex')
|
||||
* @returns {ModelProvider} Provider instance
|
||||
*/
|
||||
static getProviderForModel(modelId) {
|
||||
// Check if it's a Claude model
|
||||
const claudeModels = ['haiku', 'sonnet', 'opus'];
|
||||
if (claudeModels.includes(modelId)) {
|
||||
return new ClaudeProvider();
|
||||
}
|
||||
|
||||
// Check if it's a Codex/OpenAI model
|
||||
const codexModels = [
|
||||
'gpt-5.1-codex-max', 'gpt-5.1-codex', 'gpt-5.1-codex-mini', 'gpt-5.1'
|
||||
];
|
||||
if (codexModels.includes(modelId)) {
|
||||
return new CodexProvider();
|
||||
}
|
||||
|
||||
// Default to Claude
|
||||
return new ClaudeProvider();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get provider by name
|
||||
* @param {string} providerName Provider name ('claude' or 'codex')
|
||||
* @returns {ModelProvider} Provider instance
|
||||
*/
|
||||
static getProvider(providerName) {
|
||||
const ProviderClass = this.providers[providerName];
|
||||
if (!ProviderClass) {
|
||||
throw new Error(`Unknown provider: ${providerName}`);
|
||||
}
|
||||
return new ProviderClass();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all available providers
|
||||
* @returns {string[]} List of provider names
|
||||
*/
|
||||
static getAvailableProviders() {
|
||||
return Object.keys(this.providers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all available models across all providers
|
||||
* @returns {Array<Object>} All available models
|
||||
*/
|
||||
static getAllModels() {
|
||||
const allModels = [];
|
||||
for (const providerName of this.getAvailableProviders()) {
|
||||
const provider = this.getProvider(providerName);
|
||||
const models = provider.getAvailableModels();
|
||||
allModels.push(...models);
|
||||
}
|
||||
return allModels;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check installation status for all providers
|
||||
* @returns {Promise<Object>} Installation status for each provider
|
||||
*/
|
||||
static async checkAllProviders() {
|
||||
const status = {};
|
||||
for (const providerName of this.getAvailableProviders()) {
|
||||
const provider = this.getProvider(providerName);
|
||||
status[providerName] = await provider.detectInstallation();
|
||||
}
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
ModelProvider,
|
||||
ClaudeProvider,
|
||||
CodexProvider,
|
||||
ModelProviderFactory
|
||||
};
|
||||
@@ -1,320 +0,0 @@
|
||||
/**
|
||||
* Model Registry - Centralized model definitions and metadata
|
||||
*
|
||||
* This module provides a central registry of all available models
|
||||
* across different providers (Claude, Codex/OpenAI).
|
||||
*/
|
||||
|
||||
/**
|
||||
* Model Categories
|
||||
*/
|
||||
const MODEL_CATEGORIES = {
|
||||
CLAUDE: 'claude',
|
||||
OPENAI: 'openai',
|
||||
CODEX: 'codex'
|
||||
};
|
||||
|
||||
/**
|
||||
* Model Tiers (capability levels)
|
||||
*/
|
||||
const MODEL_TIERS = {
|
||||
BASIC: 'basic', // Fast, cheap, simple tasks
|
||||
STANDARD: 'standard', // Balanced performance
|
||||
PREMIUM: 'premium' // Most capable, complex tasks
|
||||
};
|
||||
|
||||
const CODEX_MODEL_IDS = [
|
||||
'gpt-5.1-codex-max',
|
||||
'gpt-5.1-codex',
|
||||
'gpt-5.1-codex-mini',
|
||||
'gpt-5.1'
|
||||
];
|
||||
|
||||
/**
|
||||
* All available models with full metadata
|
||||
*/
|
||||
const MODELS = {
|
||||
// Claude Models
|
||||
haiku: {
|
||||
id: 'haiku',
|
||||
name: 'Claude Haiku',
|
||||
modelString: 'claude-haiku-4-5',
|
||||
provider: 'claude',
|
||||
category: MODEL_CATEGORIES.CLAUDE,
|
||||
tier: MODEL_TIERS.BASIC,
|
||||
description: 'Fast and efficient for simple tasks',
|
||||
capabilities: ['code', 'text', 'tools'],
|
||||
maxTokens: 8192,
|
||||
contextWindow: 200000,
|
||||
supportsThinking: true,
|
||||
requiresAuth: 'CLAUDE_CODE_OAUTH_TOKEN'
|
||||
},
|
||||
sonnet: {
|
||||
id: 'sonnet',
|
||||
name: 'Claude Sonnet',
|
||||
modelString: 'claude-sonnet-4-20250514',
|
||||
provider: 'claude',
|
||||
category: MODEL_CATEGORIES.CLAUDE,
|
||||
tier: MODEL_TIERS.STANDARD,
|
||||
description: 'Balanced performance and capabilities',
|
||||
capabilities: ['code', 'text', 'tools', 'analysis'],
|
||||
maxTokens: 8192,
|
||||
contextWindow: 200000,
|
||||
supportsThinking: true,
|
||||
requiresAuth: 'CLAUDE_CODE_OAUTH_TOKEN'
|
||||
},
|
||||
opus: {
|
||||
id: 'opus',
|
||||
name: 'Claude Opus 4.5',
|
||||
modelString: 'claude-opus-4-5-20251101',
|
||||
provider: 'claude',
|
||||
category: MODEL_CATEGORIES.CLAUDE,
|
||||
tier: MODEL_TIERS.PREMIUM,
|
||||
description: 'Most capable model for complex tasks',
|
||||
capabilities: ['code', 'text', 'tools', 'analysis', 'reasoning'],
|
||||
maxTokens: 8192,
|
||||
contextWindow: 200000,
|
||||
supportsThinking: true,
|
||||
requiresAuth: 'CLAUDE_CODE_OAUTH_TOKEN',
|
||||
default: true
|
||||
},
|
||||
|
||||
// OpenAI GPT-5.1 Codex Models
|
||||
'gpt-5.1-codex-max': {
|
||||
id: 'gpt-5.1-codex-max',
|
||||
name: 'GPT-5.1 Codex Max',
|
||||
modelString: 'gpt-5.1-codex-max',
|
||||
provider: 'codex',
|
||||
category: MODEL_CATEGORIES.OPENAI,
|
||||
tier: MODEL_TIERS.PREMIUM,
|
||||
description: 'Latest flagship - deep and fast reasoning for coding',
|
||||
capabilities: ['code', 'text', 'tools', 'reasoning'],
|
||||
maxTokens: 32768,
|
||||
contextWindow: 128000,
|
||||
supportsThinking: false,
|
||||
requiresAuth: 'OPENAI_API_KEY',
|
||||
codexDefault: true
|
||||
},
|
||||
'gpt-5.1-codex': {
|
||||
id: 'gpt-5.1-codex',
|
||||
name: 'GPT-5.1 Codex',
|
||||
modelString: 'gpt-5.1-codex',
|
||||
provider: 'codex',
|
||||
category: MODEL_CATEGORIES.OPENAI,
|
||||
tier: MODEL_TIERS.STANDARD,
|
||||
description: 'Optimized for code generation',
|
||||
capabilities: ['code', 'text', 'tools'],
|
||||
maxTokens: 32768,
|
||||
contextWindow: 128000,
|
||||
supportsThinking: false,
|
||||
requiresAuth: 'OPENAI_API_KEY'
|
||||
},
|
||||
'gpt-5.1-codex-mini': {
|
||||
id: 'gpt-5.1-codex-mini',
|
||||
name: 'GPT-5.1 Codex Mini',
|
||||
modelString: 'gpt-5.1-codex-mini',
|
||||
provider: 'codex',
|
||||
category: MODEL_CATEGORIES.OPENAI,
|
||||
tier: MODEL_TIERS.BASIC,
|
||||
description: 'Faster and cheaper option',
|
||||
capabilities: ['code', 'text'],
|
||||
maxTokens: 16384,
|
||||
contextWindow: 128000,
|
||||
supportsThinking: false,
|
||||
requiresAuth: 'OPENAI_API_KEY'
|
||||
},
|
||||
'gpt-5.1': {
|
||||
id: 'gpt-5.1',
|
||||
name: 'GPT-5.1',
|
||||
modelString: 'gpt-5.1',
|
||||
provider: 'codex',
|
||||
category: MODEL_CATEGORIES.OPENAI,
|
||||
tier: MODEL_TIERS.STANDARD,
|
||||
description: 'Broad world knowledge with strong reasoning',
|
||||
capabilities: ['code', 'text', 'reasoning'],
|
||||
maxTokens: 32768,
|
||||
contextWindow: 128000,
|
||||
supportsThinking: false,
|
||||
requiresAuth: 'OPENAI_API_KEY'
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Model Registry class for querying and managing models
|
||||
*/
|
||||
class ModelRegistry {
|
||||
/**
|
||||
* Get all registered models
|
||||
* @returns {Object} All models
|
||||
*/
|
||||
static getAllModels() {
|
||||
return MODELS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get model by ID
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {Object|null} Model definition or null
|
||||
*/
|
||||
static getModel(modelId) {
|
||||
return MODELS[modelId] || null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get models by provider
|
||||
* @param {string} provider Provider name ('claude' or 'codex')
|
||||
* @returns {Object[]} Array of models for the provider
|
||||
*/
|
||||
static getModelsByProvider(provider) {
|
||||
return Object.values(MODELS).filter(m => m.provider === provider);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get models by category
|
||||
* @param {string} category Category name
|
||||
* @returns {Object[]} Array of models in the category
|
||||
*/
|
||||
static getModelsByCategory(category) {
|
||||
return Object.values(MODELS).filter(m => m.category === category);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get models by tier
|
||||
* @param {string} tier Tier name
|
||||
* @returns {Object[]} Array of models in the tier
|
||||
*/
|
||||
static getModelsByTier(tier) {
|
||||
return Object.values(MODELS).filter(m => m.tier === tier);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get default model for a provider
|
||||
* @param {string} provider Provider name
|
||||
* @returns {Object|null} Default model or null
|
||||
*/
|
||||
static getDefaultModel(provider = 'claude') {
|
||||
const models = this.getModelsByProvider(provider);
|
||||
if (provider === 'claude') {
|
||||
return models.find(m => m.default) || models[0];
|
||||
}
|
||||
if (provider === 'codex') {
|
||||
return models.find(m => m.codexDefault) || models[0];
|
||||
}
|
||||
return models[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get model string (full model name) for a model ID
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {string} Full model string
|
||||
*/
|
||||
static getModelString(modelId) {
|
||||
const model = this.getModel(modelId);
|
||||
return model ? model.modelString : modelId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine provider for a model ID
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {string} Provider name ('claude' or 'codex')
|
||||
*/
|
||||
static getProviderForModel(modelId) {
|
||||
const model = this.getModel(modelId);
|
||||
if (model) {
|
||||
return model.provider;
|
||||
}
|
||||
|
||||
// Fallback detection for models not explicitly registered (keeps legacy Codex IDs working)
|
||||
if (CODEX_MODEL_IDS.includes(modelId)) {
|
||||
return 'codex';
|
||||
}
|
||||
|
||||
return 'claude';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a model is a Claude model
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {boolean} Whether it's a Claude model
|
||||
*/
|
||||
static isClaudeModel(modelId) {
|
||||
return this.getProviderForModel(modelId) === 'claude';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a model is a Codex/OpenAI model
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {boolean} Whether it's a Codex model
|
||||
*/
|
||||
static isCodexModel(modelId) {
|
||||
return this.getProviderForModel(modelId) === 'codex';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get models grouped by provider for UI display
|
||||
* @returns {Object} Models grouped by provider
|
||||
*/
|
||||
static getModelsGroupedByProvider() {
|
||||
return {
|
||||
claude: this.getModelsByProvider('claude'),
|
||||
codex: this.getModelsByProvider('codex')
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all model IDs as an array
|
||||
* @returns {string[]} Array of model IDs
|
||||
*/
|
||||
static getAllModelIds() {
|
||||
return Object.keys(MODELS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if model supports a specific capability
|
||||
* @param {string} modelId Model ID
|
||||
* @param {string} capability Capability name
|
||||
* @returns {boolean} Whether the model supports the capability
|
||||
*/
|
||||
static modelSupportsCapability(modelId, capability) {
|
||||
const model = this.getModel(modelId);
|
||||
return model ? model.capabilities.includes(capability) : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if model supports extended thinking
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {boolean} Whether the model supports thinking
|
||||
*/
|
||||
static modelSupportsThinking(modelId) {
|
||||
const model = this.getModel(modelId);
|
||||
return model ? model.supportsThinking : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get required authentication for a model
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {string|null} Required auth env variable name
|
||||
*/
|
||||
static getRequiredAuth(modelId) {
|
||||
const model = this.getModel(modelId);
|
||||
return model ? model.requiresAuth : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if authentication is available for a model
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {boolean} Whether auth is available
|
||||
*/
|
||||
static hasAuthForModel(modelId) {
|
||||
const authVar = this.getRequiredAuth(modelId);
|
||||
if (!authVar) return false;
|
||||
return !!process.env[authVar];
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
MODEL_CATEGORIES,
|
||||
MODEL_TIERS,
|
||||
MODELS,
|
||||
ModelRegistry
|
||||
};
|
||||
@@ -1,112 +0,0 @@
|
||||
const { query, AbortError } = require("@anthropic-ai/claude-agent-sdk");
|
||||
const promptBuilder = require("./prompt-builder");
|
||||
|
||||
/**
|
||||
* Project Analyzer - Scans codebase and updates app_spec.txt
|
||||
*/
|
||||
class ProjectAnalyzer {
|
||||
/**
|
||||
* Run the project analysis using Claude Agent SDK
|
||||
*/
|
||||
async runProjectAnalysis(projectPath, analysisId, sendToRenderer, execution) {
|
||||
console.log(`[ProjectAnalyzer] Running project analysis for: ${projectPath}`);
|
||||
|
||||
try {
|
||||
sendToRenderer({
|
||||
type: "auto_mode_phase",
|
||||
featureId: analysisId,
|
||||
phase: "planning",
|
||||
message: "Scanning project structure...",
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
execution.abortController = abortController;
|
||||
|
||||
const options = {
|
||||
model: "claude-sonnet-4-20250514",
|
||||
systemPrompt: promptBuilder.getProjectAnalysisSystemPrompt(),
|
||||
maxTurns: 50,
|
||||
cwd: projectPath,
|
||||
allowedTools: ["Read", "Write", "Edit", "Glob", "Grep", "Bash"],
|
||||
permissionMode: "acceptEdits",
|
||||
sandbox: {
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
},
|
||||
abortController: abortController,
|
||||
};
|
||||
|
||||
const prompt = promptBuilder.buildProjectAnalysisPrompt(projectPath);
|
||||
|
||||
sendToRenderer({
|
||||
type: "auto_mode_progress",
|
||||
featureId: analysisId,
|
||||
content: "Starting project analysis...\n",
|
||||
});
|
||||
|
||||
const currentQuery = query({ prompt, options });
|
||||
execution.query = currentQuery;
|
||||
|
||||
let responseText = "";
|
||||
for await (const msg of currentQuery) {
|
||||
if (!execution.isActive()) break;
|
||||
|
||||
if (msg.type === "assistant" && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === "text") {
|
||||
responseText += block.text;
|
||||
sendToRenderer({
|
||||
type: "auto_mode_progress",
|
||||
featureId: analysisId,
|
||||
content: block.text,
|
||||
});
|
||||
} else if (block.type === "tool_use") {
|
||||
sendToRenderer({
|
||||
type: "auto_mode_tool",
|
||||
featureId: analysisId,
|
||||
tool: block.name,
|
||||
input: block.input,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
execution.query = null;
|
||||
execution.abortController = null;
|
||||
|
||||
sendToRenderer({
|
||||
type: "auto_mode_phase",
|
||||
featureId: analysisId,
|
||||
phase: "verification",
|
||||
message: "Project analysis complete",
|
||||
});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: "Project analyzed successfully",
|
||||
};
|
||||
} catch (error) {
|
||||
if (error instanceof AbortError || error?.name === "AbortError") {
|
||||
console.log("[ProjectAnalyzer] Project analysis aborted");
|
||||
if (execution) {
|
||||
execution.abortController = null;
|
||||
execution.query = null;
|
||||
}
|
||||
return {
|
||||
success: false,
|
||||
message: "Analysis aborted",
|
||||
};
|
||||
}
|
||||
|
||||
console.error("[ProjectAnalyzer] Error in project analysis:", error);
|
||||
if (execution) {
|
||||
execution.abortController = null;
|
||||
execution.query = null;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = new ProjectAnalyzer();
|
||||
@@ -1,787 +0,0 @@
|
||||
const contextManager = require("./context-manager");
|
||||
|
||||
/**
|
||||
* Prompt Builder - Generates prompts for different agent tasks
|
||||
*/
|
||||
class PromptBuilder {
|
||||
/**
|
||||
* Build the prompt for implementing a specific feature
|
||||
*/
|
||||
async buildFeaturePrompt(feature, projectPath) {
|
||||
const skipTestsNote = feature.skipTests
|
||||
? `\n**⚠️ IMPORTANT - Manual Testing Mode:**\nThis feature has skipTests=true, which means:\n- DO NOT commit changes automatically\n- DO NOT mark as verified - it will automatically go to "waiting_approval" status\n- The user will manually review and commit the changes\n- Just implement the feature and mark it as verified (it will be converted to waiting_approval)\n`
|
||||
: "";
|
||||
|
||||
let imagesNote = "";
|
||||
if (feature.imagePaths && feature.imagePaths.length > 0) {
|
||||
const imagesList = feature.imagePaths
|
||||
.map(
|
||||
(img, idx) =>
|
||||
` ${idx + 1}. ${img.filename} (${img.mimeType})\n Path: ${
|
||||
img.path
|
||||
}`
|
||||
)
|
||||
.join("\n");
|
||||
|
||||
imagesNote = `\n**📎 Context Images Attached:**\nThe user has attached ${feature.imagePaths.length} image(s) for context. These images are provided both visually (in the initial message) and as files you can read:
|
||||
|
||||
${imagesList}
|
||||
|
||||
You can use the Read tool to view these images at any time during implementation. Review them carefully before implementing.\n`;
|
||||
}
|
||||
|
||||
// Get context files preview
|
||||
const contextFilesPreview = await contextManager.getContextFilesPreview(
|
||||
projectPath
|
||||
);
|
||||
|
||||
// Get memory content (lessons learned from previous runs)
|
||||
const memoryContent = await contextManager.getMemoryContent(projectPath);
|
||||
|
||||
// Build mode header for this feature
|
||||
const modeHeader = feature.skipTests
|
||||
? `**🔨 MODE: Manual Review (No Automated Tests)**
|
||||
This feature is set for manual review - focus on clean implementation without automated tests.`
|
||||
: `**🧪 MODE: Test-Driven Development (TDD)**
|
||||
This feature requires automated Playwright tests to verify the implementation.`;
|
||||
|
||||
return `You are working on a feature implementation task.
|
||||
|
||||
${modeHeader}
|
||||
${memoryContent}
|
||||
**Current Feature to Implement:**
|
||||
|
||||
ID: ${feature.id}
|
||||
Category: ${feature.category}
|
||||
Description: ${feature.description}
|
||||
${skipTestsNote}${imagesNote}${contextFilesPreview}
|
||||
**Steps to Complete:**
|
||||
${feature.steps.map((step, i) => `${i + 1}. ${step}`).join("\n")}
|
||||
|
||||
**Your Task:**
|
||||
|
||||
1. Read the project files to understand the current codebase structure
|
||||
2. Implement the feature according to the description and steps
|
||||
${
|
||||
feature.skipTests
|
||||
? "3. Test the implementation manually (no automated tests needed for skipTests features)"
|
||||
: "3. Write Playwright tests to verify the feature works correctly\n4. Run the tests and ensure they pass\n5. **DELETE the test file(s) you created** - tests are only for immediate verification"
|
||||
}
|
||||
${
|
||||
feature.skipTests ? "4" : "6"
|
||||
}. **CRITICAL: Use the UpdateFeatureStatus tool to mark this feature as verified**
|
||||
${
|
||||
feature.skipTests
|
||||
? "5. **DO NOT commit changes** - the user will review and commit manually"
|
||||
: "7. Commit your changes with git"
|
||||
}
|
||||
|
||||
**IMPORTANT - Updating Feature Status:**
|
||||
|
||||
When you have completed the feature${
|
||||
feature.skipTests ? "" : " and all tests pass"
|
||||
}, you MUST use the \`mcp__automaker-tools__UpdateFeatureStatus\` tool to update the feature status:
|
||||
- Call the tool with: featureId="${feature.id}" and status="verified"
|
||||
- **You can also include a summary parameter** to describe what was done: summary="Brief summary of changes"
|
||||
- **DO NOT manually edit feature files** - this can cause race conditions
|
||||
- The UpdateFeatureStatus tool safely updates the feature status without risk of corrupting other data
|
||||
- **If skipTests=true, the tool will automatically convert "verified" to "waiting_approval"** - this is correct behavior
|
||||
|
||||
**IMPORTANT - Feature Summary (REQUIRED):**
|
||||
|
||||
When calling UpdateFeatureStatus, you MUST include a summary parameter that describes:
|
||||
- What files were modified/created
|
||||
- What functionality was added or changed
|
||||
- Any notable implementation decisions
|
||||
|
||||
Example:
|
||||
\`\`\`
|
||||
UpdateFeatureStatus(featureId="${
|
||||
feature.id
|
||||
}", status="verified", summary="Added dark mode toggle to settings. Modified: settings.tsx, theme-provider.tsx. Created new useTheme hook.")
|
||||
\`\`\`
|
||||
|
||||
The summary will be displayed on the Kanban card so the user can see what was done without checking the code.
|
||||
|
||||
**Important Guidelines:**
|
||||
|
||||
- Focus ONLY on implementing this specific feature
|
||||
- Write clean, production-quality code
|
||||
- Add proper error handling
|
||||
${
|
||||
feature.skipTests
|
||||
? "- Skip automated testing (skipTests=true) - user will manually verify"
|
||||
: "- Write comprehensive Playwright tests\n- Ensure all existing tests still pass\n- Mark the feature as passing only when all tests are green\n- **CRITICAL: Delete test files after verification** - tests accumulate and become brittle"
|
||||
}
|
||||
- **CRITICAL: Use UpdateFeatureStatus tool instead of editing feature files directly**
|
||||
- **CRITICAL: Always include a summary when marking feature as verified**
|
||||
${
|
||||
feature.skipTests
|
||||
? "- **DO NOT commit changes** - user will review and commit manually"
|
||||
: "- Make a git commit when complete"
|
||||
}
|
||||
|
||||
**Testing Utilities (CRITICAL):**
|
||||
|
||||
1. **Create/maintain tests/utils.ts** - Add helper functions for finding elements and common test operations
|
||||
2. **Use utilities in tests** - Import and use helper functions instead of repeating selectors
|
||||
3. **Add utilities as needed** - When you write a test, if you need a new helper, add it to utils.ts
|
||||
4. **Update utilities when functionality changes** - If you modify components, update corresponding utilities
|
||||
|
||||
Example utilities to add:
|
||||
- getByTestId(page, testId) - Find elements by data-testid
|
||||
- getButtonByText(page, text) - Find buttons by text
|
||||
- clickElement(page, testId) - Click an element by test ID
|
||||
- fillForm(page, formData) - Fill form fields
|
||||
- waitForElement(page, testId) - Wait for element to appear
|
||||
|
||||
This makes future tests easier to write and maintain!
|
||||
|
||||
**Test Deletion Policy:**
|
||||
After tests pass, delete them immediately:
|
||||
\`\`\`bash
|
||||
rm tests/[feature-name].spec.ts
|
||||
\`\`\`
|
||||
|
||||
Begin by reading the project structure and then implementing the feature.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the prompt for verifying a specific feature
|
||||
*/
|
||||
async buildVerificationPrompt(feature, projectPath) {
|
||||
const skipTestsNote = feature.skipTests
|
||||
? `\n**⚠️ IMPORTANT - Manual Testing Mode:**\nThis feature has skipTests=true, which means:\n- DO NOT commit changes automatically\n- DO NOT mark as verified - it will automatically go to "waiting_approval" status\n- The user will manually review and commit the changes\n- Just implement the feature and mark it as verified (it will be converted to waiting_approval)\n`
|
||||
: "";
|
||||
|
||||
let imagesNote = "";
|
||||
if (feature.imagePaths && feature.imagePaths.length > 0) {
|
||||
const imagesList = feature.imagePaths
|
||||
.map(
|
||||
(img, idx) =>
|
||||
` ${idx + 1}. ${img.filename} (${img.mimeType})\n Path: ${
|
||||
img.path
|
||||
}`
|
||||
)
|
||||
.join("\n");
|
||||
|
||||
imagesNote = `\n**📎 Context Images Attached:**\nThe user has attached ${feature.imagePaths.length} image(s) for context. These images are provided both visually (in the initial message) and as files you can read:
|
||||
|
||||
${imagesList}
|
||||
|
||||
You can use the Read tool to view these images at any time during implementation. Review them carefully before implementing.\n`;
|
||||
}
|
||||
|
||||
// Get context files preview
|
||||
const contextFilesPreview = await contextManager.getContextFilesPreview(
|
||||
projectPath
|
||||
);
|
||||
|
||||
// Get memory content (lessons learned from previous runs)
|
||||
const memoryContent = await contextManager.getMemoryContent(projectPath);
|
||||
|
||||
// Build mode header for this feature
|
||||
const modeHeader = feature.skipTests
|
||||
? `**🔨 MODE: Manual Review (No Automated Tests)**
|
||||
This feature is set for manual review - focus on completing implementation without automated tests.`
|
||||
: `**🧪 MODE: Test-Driven Development (TDD)**
|
||||
This feature requires automated Playwright tests to verify the implementation.`;
|
||||
|
||||
return `You are implementing and verifying a feature until it is complete and working correctly.
|
||||
|
||||
${modeHeader}
|
||||
${memoryContent}
|
||||
|
||||
**Feature to Implement/Verify:**
|
||||
|
||||
ID: ${feature.id}
|
||||
Category: ${feature.category}
|
||||
Description: ${feature.description}
|
||||
Current Status: ${feature.status}
|
||||
${skipTestsNote}${imagesNote}${contextFilesPreview}
|
||||
**Steps that should be implemented:**
|
||||
${feature.steps.map((step, i) => `${i + 1}. ${step}`).join("\n")}
|
||||
|
||||
**Your Task:**
|
||||
|
||||
1. Read the project files to understand the current implementation
|
||||
2. If the feature is not fully implemented, continue implementing it
|
||||
${
|
||||
feature.skipTests
|
||||
? "3. Test the implementation manually (no automated tests needed for skipTests features)"
|
||||
: `3. Write or update Playwright tests to verify the feature works correctly
|
||||
4. Run the Playwright tests: npx playwright test tests/[feature-name].spec.ts
|
||||
5. Check if all tests pass
|
||||
6. **If ANY tests fail:**
|
||||
- Analyze the test failures and error messages
|
||||
- Fix the implementation code to make the tests pass
|
||||
- Update test utilities in tests/utils.ts if needed
|
||||
- Re-run the tests to verify the fixes
|
||||
- **REPEAT this process until ALL tests pass**
|
||||
7. **If ALL tests pass:**
|
||||
- **DELETE the test file(s) for this feature** - tests are only for immediate verification`
|
||||
}
|
||||
${
|
||||
feature.skipTests ? "4" : "8"
|
||||
}. **CRITICAL: Use the UpdateFeatureStatus tool to mark this feature as verified**
|
||||
${
|
||||
feature.skipTests
|
||||
? "5. **DO NOT commit changes** - the user will review and commit manually"
|
||||
: "9. Explain what was implemented/fixed and that all tests passed\n10. Commit your changes with git"
|
||||
}
|
||||
|
||||
**IMPORTANT - Updating Feature Status:**
|
||||
|
||||
When you have completed the feature${
|
||||
feature.skipTests ? "" : " and all tests pass"
|
||||
}, you MUST use the \`mcp__automaker-tools__UpdateFeatureStatus\` tool to update the feature status:
|
||||
- Call the tool with: featureId="${feature.id}" and status="verified"
|
||||
- **You can also include a summary parameter** to describe what was done: summary="Brief summary of changes"
|
||||
- **DO NOT manually edit feature files** - this can cause race conditions
|
||||
- The UpdateFeatureStatus tool safely updates the feature status without risk of corrupting other data
|
||||
- **If skipTests=true, the tool will automatically convert "verified" to "waiting_approval"** - this is correct behavior
|
||||
|
||||
**IMPORTANT - Feature Summary (REQUIRED):**
|
||||
|
||||
When calling UpdateFeatureStatus, you MUST include a summary parameter that describes:
|
||||
- What files were modified/created
|
||||
- What functionality was added or changed
|
||||
- Any notable implementation decisions
|
||||
|
||||
Example:
|
||||
\`\`\`
|
||||
UpdateFeatureStatus(featureId="${
|
||||
feature.id
|
||||
}", status="verified", summary="Added dark mode toggle to settings. Modified: settings.tsx, theme-provider.tsx. Created new useTheme hook.")
|
||||
\`\`\`
|
||||
|
||||
The summary will be displayed on the Kanban card so the user can see what was done without checking the code.
|
||||
|
||||
**Testing Utilities:**
|
||||
- Check if tests/utils.ts exists and is being used
|
||||
- If utilities are outdated due to functionality changes, update them
|
||||
- Add new utilities as needed for this feature's tests
|
||||
- Ensure test utilities stay in sync with code changes
|
||||
|
||||
**Test Deletion Policy:**
|
||||
After tests pass, delete them immediately:
|
||||
\`\`\`bash
|
||||
rm tests/[feature-name].spec.ts
|
||||
\`\`\`
|
||||
|
||||
**Important:**
|
||||
${
|
||||
feature.skipTests
|
||||
? "- Skip automated testing (skipTests=true) - user will manually verify\n- **DO NOT commit changes** - user will review and commit manually"
|
||||
: "- **CONTINUE IMPLEMENTING until all tests pass** - don't stop at the first failure\n- Only mark as verified if Playwright tests pass\n- **CRITICAL: Delete test files after they pass** - tests should not accumulate\n- Update test utilities if functionality changed\n- Make a git commit when the feature is complete\n- Be thorough and persistent in fixing issues"
|
||||
}
|
||||
- **CRITICAL: Use UpdateFeatureStatus tool instead of editing feature files directly**
|
||||
- **CRITICAL: Always include a summary when marking feature as verified**
|
||||
|
||||
Begin by reading the project structure and understanding what needs to be implemented or fixed.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build prompt for resuming feature with previous context
|
||||
*/
|
||||
async buildResumePrompt(feature, previousContext, projectPath) {
|
||||
const skipTestsNote = feature.skipTests
|
||||
? `\n**⚠️ IMPORTANT - Manual Testing Mode:**\nThis feature has skipTests=true, which means:\n- DO NOT commit changes automatically\n- DO NOT mark as verified - it will automatically go to "waiting_approval" status\n- The user will manually review and commit the changes\n- Just implement the feature and mark it as verified (it will be converted to waiting_approval)\n`
|
||||
: "";
|
||||
|
||||
// For resume, check both followUpImages and imagePaths
|
||||
const imagePaths = feature.followUpImages || feature.imagePaths;
|
||||
let imagesNote = "";
|
||||
if (imagePaths && imagePaths.length > 0) {
|
||||
const imagesList = imagePaths
|
||||
.map((img, idx) => {
|
||||
// Handle both FeatureImagePath objects and simple path strings
|
||||
const path = typeof img === "string" ? img : img.path;
|
||||
const filename =
|
||||
typeof img === "string" ? path.split("/").pop() : img.filename;
|
||||
const mimeType = typeof img === "string" ? "image/*" : img.mimeType;
|
||||
return ` ${
|
||||
idx + 1
|
||||
}. ${filename} (${mimeType})\n Path: ${path}`;
|
||||
})
|
||||
.join("\n");
|
||||
|
||||
imagesNote = `\n**📎 Context Images Attached:**\nThe user has attached ${imagePaths.length} image(s) for context. These images are provided both visually (in the initial message) and as files you can read:
|
||||
|
||||
${imagesList}
|
||||
|
||||
You can use the Read tool to view these images at any time. Review them carefully.\n`;
|
||||
}
|
||||
|
||||
// Get context files preview
|
||||
const contextFilesPreview = await contextManager.getContextFilesPreview(
|
||||
projectPath
|
||||
);
|
||||
|
||||
// Get memory content (lessons learned from previous runs)
|
||||
const memoryContent = await contextManager.getMemoryContent(projectPath);
|
||||
|
||||
// Build mode header for this feature
|
||||
const modeHeader = feature.skipTests
|
||||
? `**🔨 MODE: Manual Review (No Automated Tests)**
|
||||
This feature is set for manual review - focus on clean implementation without automated tests.`
|
||||
: `**🧪 MODE: Test-Driven Development (TDD)**
|
||||
This feature requires automated Playwright tests to verify the implementation.`;
|
||||
|
||||
return `You are resuming work on a feature implementation that was previously started.
|
||||
|
||||
${modeHeader}
|
||||
${memoryContent}
|
||||
**Current Feature:**
|
||||
|
||||
ID: ${feature.id}
|
||||
Category: ${feature.category}
|
||||
Description: ${feature.description}
|
||||
${skipTestsNote}${imagesNote}${contextFilesPreview}
|
||||
**Steps to Complete:**
|
||||
${feature.steps.map((step, i) => `${i + 1}. ${step}`).join("\n")}
|
||||
|
||||
**Previous Work Context:**
|
||||
|
||||
${previousContext || "No previous context available - this is a fresh start."}
|
||||
|
||||
**Your Task:**
|
||||
|
||||
Continue where you left off and complete the feature implementation:
|
||||
|
||||
1. Review the previous work context above to understand what has been done
|
||||
2. Continue implementing the feature according to the description and steps
|
||||
${
|
||||
feature.skipTests
|
||||
? "3. Test the implementation manually (no automated tests needed for skipTests features)"
|
||||
: "3. Write Playwright tests to verify the feature works correctly (if not already done)\n4. Run the tests and ensure they pass\n5. **DELETE the test file(s) you created** - tests are only for immediate verification"
|
||||
}
|
||||
${
|
||||
feature.skipTests ? "4" : "6"
|
||||
}. **CRITICAL: Use the UpdateFeatureStatus tool to mark this feature as verified**
|
||||
${
|
||||
feature.skipTests
|
||||
? "5. **DO NOT commit changes** - the user will review and commit manually"
|
||||
: "7. Commit your changes with git"
|
||||
}
|
||||
|
||||
**IMPORTANT - Updating Feature Status:**
|
||||
|
||||
When you have completed the feature${
|
||||
feature.skipTests ? "" : " and all tests pass"
|
||||
}, you MUST use the \`mcp__automaker-tools__UpdateFeatureStatus\` tool to update the feature status:
|
||||
- Call the tool with: featureId="${feature.id}" and status="verified"
|
||||
- **You can also include a summary parameter** to describe what was done: summary="Brief summary of changes"
|
||||
- **DO NOT manually edit feature files** - this can cause race conditions
|
||||
- The UpdateFeatureStatus tool safely updates the feature status without risk of corrupting other data
|
||||
- **If skipTests=true, the tool will automatically convert "verified" to "waiting_approval"** - this is correct behavior
|
||||
|
||||
**IMPORTANT - Feature Summary (REQUIRED):**
|
||||
|
||||
When calling UpdateFeatureStatus, you MUST include a summary parameter that describes:
|
||||
- What files were modified/created
|
||||
- What functionality was added or changed
|
||||
- Any notable implementation decisions
|
||||
|
||||
Example:
|
||||
\`\`\`
|
||||
UpdateFeatureStatus(featureId="${
|
||||
feature.id
|
||||
}", status="verified", summary="Added dark mode toggle to settings. Modified: settings.tsx, theme-provider.tsx. Created new useTheme hook.")
|
||||
\`\`\`
|
||||
|
||||
The summary will be displayed on the Kanban card so the user can see what was done without checking the code.
|
||||
|
||||
**Important Guidelines:**
|
||||
|
||||
- Review what was already done in the previous context
|
||||
- Don't redo work that's already complete - continue from where it left off
|
||||
- Focus on completing any remaining tasks
|
||||
${
|
||||
feature.skipTests
|
||||
? "- Skip automated testing (skipTests=true) - user will manually verify"
|
||||
: "- Write comprehensive Playwright tests if not already done\n- Ensure all tests pass before marking as verified\n- **CRITICAL: Delete test files after verification**"
|
||||
}
|
||||
- **CRITICAL: Use UpdateFeatureStatus tool instead of editing feature files directly**
|
||||
- **CRITICAL: Always include a summary when marking feature as verified**
|
||||
${
|
||||
feature.skipTests
|
||||
? "- **DO NOT commit changes** - user will review and commit manually"
|
||||
: "- Make a git commit when complete"
|
||||
}
|
||||
|
||||
Begin by assessing what's been done and what remains to be completed.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the prompt for project analysis
|
||||
*/
|
||||
buildProjectAnalysisPrompt(projectPath) {
|
||||
return `You are analyzing a new project that was just opened in Automaker, an autonomous AI development studio.
|
||||
|
||||
**Your Task:**
|
||||
|
||||
Analyze this project's codebase and update the .automaker/app_spec.txt file with accurate information about:
|
||||
|
||||
1. **Project Name** - Detect the name from package.json, README, or directory name
|
||||
2. **Overview** - Brief description of what the project does
|
||||
3. **Technology Stack** - Languages, frameworks, libraries detected
|
||||
4. **Core Capabilities** - Main features and functionality
|
||||
5. **Implemented Features** - What features are already built
|
||||
6. **Implementation Roadmap** - Break down remaining work into phases with individual features
|
||||
|
||||
**Steps to Follow:**
|
||||
|
||||
1. First, explore the project structure:
|
||||
- Look at package.json, cargo.toml, go.mod, requirements.txt, etc. for tech stack
|
||||
- Check README.md for project description
|
||||
- List key directories (src, lib, components, etc.)
|
||||
|
||||
2. Identify the tech stack:
|
||||
- Frontend framework (React, Vue, Next.js, etc.)
|
||||
- Backend framework (Express, FastAPI, etc.)
|
||||
- Database (if any config files exist)
|
||||
- Testing framework
|
||||
- Build tools
|
||||
|
||||
3. Update .automaker/app_spec.txt with your findings in this format:
|
||||
\`\`\`xml
|
||||
<project_specification>
|
||||
<project_name>Detected Name</project_name>
|
||||
|
||||
<overview>
|
||||
Clear description of what this project does based on your analysis.
|
||||
</overview>
|
||||
|
||||
<technology_stack>
|
||||
<frontend>
|
||||
<framework>Framework Name</framework>
|
||||
<!-- Add detected technologies -->
|
||||
</frontend>
|
||||
<backend>
|
||||
<!-- If applicable -->
|
||||
</backend>
|
||||
<database>
|
||||
<!-- If applicable -->
|
||||
</database>
|
||||
<testing>
|
||||
<!-- Testing frameworks detected -->
|
||||
</testing>
|
||||
</technology_stack>
|
||||
|
||||
<core_capabilities>
|
||||
<!-- List main features/capabilities you found -->
|
||||
</core_capabilities>
|
||||
|
||||
<implemented_features>
|
||||
<!-- List specific features that appear to be implemented -->
|
||||
</implemented_features>
|
||||
|
||||
<implementation_roadmap>
|
||||
<phase_1_foundation>
|
||||
<!-- List foundational features to build first -->
|
||||
</phase_1_foundation>
|
||||
<phase_2_core_logic>
|
||||
<!-- List core logic features -->
|
||||
</phase_2_core_logic>
|
||||
<phase_3_polish>
|
||||
<!-- List polish and enhancement features -->
|
||||
</phase_3_polish>
|
||||
</implementation_roadmap>
|
||||
</project_specification>
|
||||
\`\`\`
|
||||
|
||||
4. Ensure .automaker/context/ directory exists
|
||||
|
||||
5. Ensure .automaker/features/ directory exists
|
||||
|
||||
**Important:**
|
||||
- Be concise but accurate
|
||||
- Only include information you can verify from the codebase
|
||||
- If unsure about something, note it as "to be determined"
|
||||
- Don't make up features that don't exist
|
||||
- Features are stored in .automaker/features/{id}/feature.json - each feature gets its own folder
|
||||
|
||||
Begin by exploring the project structure.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the system prompt for coding agent
|
||||
* @param {string} projectPath - Path to the project
|
||||
* @param {boolean} isTDD - Whether this is Test-Driven Development mode (skipTests=false)
|
||||
*/
|
||||
async getCodingPrompt(projectPath, isTDD = true) {
|
||||
// Get context files preview
|
||||
const contextFilesPreview = projectPath
|
||||
? await contextManager.getContextFilesPreview(projectPath)
|
||||
: "";
|
||||
|
||||
// Get memory content (lessons learned from previous runs)
|
||||
const memoryContent = projectPath
|
||||
? await contextManager.getMemoryContent(projectPath)
|
||||
: "";
|
||||
|
||||
// Build mode-specific instructions
|
||||
const modeHeader = isTDD
|
||||
? `**🧪 MODE: Test-Driven Development (TDD)**
|
||||
You are implementing features using TDD methodology. This means:
|
||||
- Write Playwright tests BEFORE or alongside implementation
|
||||
- Run tests frequently to verify your work
|
||||
- Tests are your validation mechanism
|
||||
- Delete tests after they pass (they're for immediate verification only)`
|
||||
: `**🔨 MODE: Manual Review (No Automated Tests)**
|
||||
You are implementing features for manual user review. This means:
|
||||
- Focus on clean, working implementation
|
||||
- NO automated test writing required
|
||||
- User will manually verify the implementation
|
||||
- DO NOT commit changes - user will review and commit`;
|
||||
|
||||
return `You are an AI coding agent working autonomously to implement features.
|
||||
|
||||
${modeHeader}
|
||||
${memoryContent}
|
||||
|
||||
**Feature Storage:**
|
||||
Features are stored in .automaker/features/{id}/feature.json - each feature has its own folder.
|
||||
|
||||
**THE ONLY WAY to update features:**
|
||||
Use the mcp__automaker-tools__UpdateFeatureStatus tool with featureId, status, and summary parameters.
|
||||
Do NOT manually edit feature.json files directly.
|
||||
|
||||
${contextFilesPreview}
|
||||
|
||||
Your role is to:
|
||||
- Implement features exactly as specified
|
||||
- Write production-quality code
|
||||
- Check if feature.skipTests is true - if so, skip automated testing and don't commit
|
||||
- Create comprehensive Playwright tests using testing utilities (only if skipTests is false)
|
||||
- Ensure all tests pass before marking features complete (only if skipTests is false)
|
||||
- **DELETE test files after successful verification** - tests are only for immediate feature verification (only if skipTests is false)
|
||||
- **Use the UpdateFeatureStatus tool to mark features as verified** - NEVER manually edit feature files
|
||||
- **Always include a summary parameter when calling UpdateFeatureStatus** - describe what was done
|
||||
- Commit working code to git (only if skipTests is false - skipTests features require manual review)
|
||||
- Be thorough and detail-oriented
|
||||
|
||||
**IMPORTANT - Manual Testing Mode (skipTests=true):**
|
||||
If a feature has skipTests=true:
|
||||
- DO NOT write automated tests
|
||||
- DO NOT commit changes - the user will review and commit manually
|
||||
- Still mark the feature as verified using UpdateFeatureStatus - it will automatically convert to "waiting_approval" for manual review
|
||||
- The user will manually verify and commit the changes
|
||||
|
||||
**IMPORTANT - UpdateFeatureStatus Tool:**
|
||||
You have access to the \`mcp__automaker-tools__UpdateFeatureStatus\` tool. When the feature is complete (and all tests pass if skipTests is false), use this tool to update the feature status:
|
||||
- Call with featureId, status="verified", and summary="Description of what was done"
|
||||
- **DO NOT manually edit feature files** - this can cause race conditions and restore old state
|
||||
- The tool safely updates the status without corrupting other feature data
|
||||
- **If skipTests=true, the tool will automatically convert "verified" to "waiting_approval"** - this is correct
|
||||
|
||||
**IMPORTANT - Feature Summary (REQUIRED):**
|
||||
When calling UpdateFeatureStatus, you MUST include a summary parameter that describes:
|
||||
- What files were modified/created
|
||||
- What functionality was added or changed
|
||||
- Any notable implementation decisions
|
||||
|
||||
Example: summary="Added dark mode toggle. Modified: settings.tsx, theme-provider.tsx. Created useTheme hook."
|
||||
|
||||
The summary will be displayed on the Kanban card so the user can quickly see what was done.
|
||||
|
||||
**Testing Utilities (CRITICAL):**
|
||||
- **Create and maintain tests/utils.ts** with helper functions for finding elements and common operations
|
||||
- **Always use utilities in tests** instead of repeating selectors
|
||||
- **Add new utilities as you write tests** - if you need a helper, add it to utils.ts
|
||||
- **Update utilities when functionality changes** - keep helpers in sync with code changes
|
||||
|
||||
This makes future tests easier to write and more maintainable!
|
||||
|
||||
**Test Deletion Policy:**
|
||||
Tests should NOT accumulate. After a feature is verified:
|
||||
1. Run the tests to ensure they pass
|
||||
2. Delete the test file for that feature
|
||||
3. Use UpdateFeatureStatus tool to mark the feature as "verified"
|
||||
|
||||
This prevents test brittleness as the app changes rapidly.
|
||||
|
||||
You have full access to:
|
||||
- Read and write files
|
||||
- Run bash commands
|
||||
- Execute tests
|
||||
- Delete files (rm command)
|
||||
- Make git commits
|
||||
- Search and analyze the codebase
|
||||
- **UpdateFeatureStatus tool** (mcp__automaker-tools__UpdateFeatureStatus) - Use this to update feature status
|
||||
|
||||
**🧠 Learning from Errors - Memory System:**
|
||||
|
||||
If you encounter an error or issue that:
|
||||
- Took multiple attempts to debug
|
||||
- Was caused by a non-obvious codebase quirk
|
||||
- Required understanding something specific about this project
|
||||
- Could trip up future agent runs
|
||||
|
||||
**ADD IT TO MEMORY** by appending to \`.automaker/memory.md\`:
|
||||
|
||||
\`\`\`markdown
|
||||
### Issue: [Brief Title]
|
||||
**Problem:** [1-2 sentence description of the issue]
|
||||
**Fix:** [Concise explanation of the solution]
|
||||
\`\`\`
|
||||
|
||||
Keep entries concise - focus on the essential information needed to avoid the issue in the future. This helps both you and other agents learn from mistakes.
|
||||
|
||||
Focus on one feature at a time and complete it fully before finishing. Always delete tests after they pass and use the UpdateFeatureStatus tool.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the system prompt for verification agent
|
||||
* @param {string} projectPath - Path to the project
|
||||
* @param {boolean} isTDD - Whether this is Test-Driven Development mode (skipTests=false)
|
||||
*/
|
||||
async getVerificationPrompt(projectPath, isTDD = true) {
|
||||
// Get context files preview
|
||||
const contextFilesPreview = projectPath
|
||||
? await contextManager.getContextFilesPreview(projectPath)
|
||||
: "";
|
||||
|
||||
// Get memory content (lessons learned from previous runs)
|
||||
const memoryContent = projectPath
|
||||
? await contextManager.getMemoryContent(projectPath)
|
||||
: "";
|
||||
|
||||
// Build mode-specific instructions
|
||||
const modeHeader = isTDD
|
||||
? `**🧪 MODE: Test-Driven Development (TDD)**
|
||||
You are verifying/completing features using TDD methodology. This means:
|
||||
- Run Playwright tests to verify implementation
|
||||
- Fix failing tests by updating code
|
||||
- Tests are your validation mechanism
|
||||
- Delete tests after they pass (they're for immediate verification only)`
|
||||
: `**🔨 MODE: Manual Review (No Automated Tests)**
|
||||
You are completing features for manual user review. This means:
|
||||
- Focus on clean, working implementation
|
||||
- NO automated test writing required
|
||||
- User will manually verify the implementation
|
||||
- DO NOT commit changes - user will review and commit`;
|
||||
|
||||
return `You are an AI implementation and verification agent focused on completing features and ensuring they work.
|
||||
|
||||
${modeHeader}
|
||||
${memoryContent}
|
||||
**Feature Storage:**
|
||||
Features are stored in .automaker/features/{id}/feature.json - each feature has its own folder.
|
||||
|
||||
**THE ONLY WAY to update features:**
|
||||
Use the mcp__automaker-tools__UpdateFeatureStatus tool with featureId, status, and summary parameters.
|
||||
Do NOT manually edit feature.json files directly.
|
||||
|
||||
${contextFilesPreview}
|
||||
|
||||
Your role is to:
|
||||
- **Continue implementing features until they are complete** - don't stop at the first failure
|
||||
- Check if feature.skipTests is true - if so, skip automated testing and don't commit
|
||||
- Write or update code to fix failing tests (only if skipTests is false)
|
||||
- Run Playwright tests to verify feature implementations (only if skipTests is false)
|
||||
- If tests fail, analyze errors and fix the implementation (only if skipTests is false)
|
||||
- If other tests fail, verify if those tests are still accurate or should be updated or deleted (only if skipTests is false)
|
||||
- Continue rerunning tests and fixing issues until ALL tests pass (only if skipTests is false)
|
||||
- **DELETE test files after successful verification** - tests are only for immediate feature verification (only if skipTests is false)
|
||||
- **Use the UpdateFeatureStatus tool to mark features as verified** - NEVER manually edit feature files
|
||||
- **Always include a summary parameter when calling UpdateFeatureStatus** - describe what was done
|
||||
- **Update test utilities (tests/utils.ts) if functionality changed** - keep helpers in sync with code (only if skipTests is false)
|
||||
- Commit working code to git (only if skipTests is false - skipTests features require manual review)
|
||||
|
||||
**IMPORTANT - Manual Testing Mode (skipTests=true):**
|
||||
If a feature has skipTests=true:
|
||||
- DO NOT write automated tests
|
||||
- DO NOT commit changes - the user will review and commit manually
|
||||
- Still mark the feature as verified using UpdateFeatureStatus - it will automatically convert to "waiting_approval" for manual review
|
||||
- The user will manually verify and commit the changes
|
||||
|
||||
**IMPORTANT - UpdateFeatureStatus Tool:**
|
||||
You have access to the \`mcp__automaker-tools__UpdateFeatureStatus\` tool. When the feature is complete (and all tests pass if skipTests is false), use this tool to update the feature status:
|
||||
- Call with featureId, status="verified", and summary="Description of what was done"
|
||||
- **DO NOT manually edit feature files** - this can cause race conditions and restore old state
|
||||
- The tool safely updates the status without corrupting other feature data
|
||||
- **If skipTests=true, the tool will automatically convert "verified" to "waiting_approval"** - this is correct
|
||||
|
||||
**IMPORTANT - Feature Summary (REQUIRED):**
|
||||
When calling UpdateFeatureStatus, you MUST include a summary parameter that describes:
|
||||
- What files were modified/created
|
||||
- What functionality was added or changed
|
||||
- Any notable implementation decisions
|
||||
|
||||
Example: summary="Fixed login validation. Modified: auth.ts, login-form.tsx. Added password strength check."
|
||||
|
||||
The summary will be displayed on the Kanban card so the user can quickly see what was done.
|
||||
|
||||
**Testing Utilities:**
|
||||
- Check if tests/utils.ts needs updates based on code changes
|
||||
- If a component's selectors or behavior changed, update the corresponding utility functions
|
||||
- Add new utilities as needed for the feature's tests
|
||||
- Ensure utilities remain accurate and helpful for future tests
|
||||
|
||||
**Test Deletion Policy:**
|
||||
Tests should NOT accumulate. After a feature is verified:
|
||||
1. Delete the test file for that feature
|
||||
2. Use UpdateFeatureStatus tool to mark the feature as "verified"
|
||||
|
||||
This prevents test brittleness as the app changes rapidly.
|
||||
|
||||
You have access to:
|
||||
- Read and edit files
|
||||
- Write new code or modify existing code
|
||||
- Run bash commands (especially Playwright tests)
|
||||
- Delete files (rm command)
|
||||
- Analyze test output
|
||||
- Make git commits
|
||||
- **UpdateFeatureStatus tool** (mcp__automaker-tools__UpdateFeatureStatus) - Use this to update feature status
|
||||
|
||||
**🧠 Learning from Errors - Memory System:**
|
||||
|
||||
If you encounter an error or issue that:
|
||||
- Took multiple attempts to debug
|
||||
- Was caused by a non-obvious codebase quirk
|
||||
- Required understanding something specific about this project
|
||||
- Could trip up future agent runs
|
||||
|
||||
**ADD IT TO MEMORY** by appending to \`.automaker/memory.md\`:
|
||||
|
||||
\`\`\`markdown
|
||||
### Issue: [Brief Title]
|
||||
**Problem:** [1-2 sentence description of the issue]
|
||||
**Fix:** [Concise explanation of the solution]
|
||||
\`\`\`
|
||||
|
||||
Keep entries concise - focus on the essential information needed to avoid the issue in the future. This helps both you and other agents learn from mistakes.
|
||||
|
||||
**CRITICAL:** Be persistent and thorough - keep iterating on the implementation until all tests pass. Don't give up after the first failure. Always delete tests after they pass, use the UpdateFeatureStatus tool with a summary, and commit your work.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get system prompt for project analysis agent
|
||||
*/
|
||||
getProjectAnalysisSystemPrompt() {
|
||||
return `You are a project analysis agent that examines codebases to understand their structure, tech stack, and implemented features.
|
||||
|
||||
Your goal is to:
|
||||
- Quickly scan and understand project structure
|
||||
- Identify programming languages, frameworks, and libraries
|
||||
- Detect existing features and capabilities
|
||||
- Update the .automaker/app_spec.txt with accurate information
|
||||
- Ensure all required .automaker files and directories exist
|
||||
|
||||
Be efficient - don't read every file, focus on:
|
||||
- Configuration files (package.json, tsconfig.json, etc.)
|
||||
- Main entry points
|
||||
- Directory structure
|
||||
- README and documentation
|
||||
|
||||
**Feature Storage:**
|
||||
Features are stored in .automaker/features/{id}/feature.json - each feature has its own folder.
|
||||
Use the UpdateFeatureStatus tool to manage features, not direct file edits.
|
||||
|
||||
You have access to Read, Write, Edit, Glob, Grep, and Bash tools. Use them to explore the structure and write the necessary files.`;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = new PromptBuilder();
|
||||
@@ -1,467 +0,0 @@
|
||||
const { query, AbortError } = require("@anthropic-ai/claude-agent-sdk");
|
||||
const fs = require("fs/promises");
|
||||
const path = require("path");
|
||||
|
||||
/**
|
||||
* XML template for app_spec.txt
|
||||
*/
|
||||
const APP_SPEC_XML_TEMPLATE = `<project_specification>
|
||||
<project_name></project_name>
|
||||
|
||||
<overview>
|
||||
</overview>
|
||||
|
||||
<technology_stack>
|
||||
<frontend>
|
||||
<framework></framework>
|
||||
<ui_library></ui_library>
|
||||
<styling></styling>
|
||||
<state_management></state_management>
|
||||
<drag_drop></drag_drop>
|
||||
<icons></icons>
|
||||
</frontend>
|
||||
<desktop_shell>
|
||||
<framework></framework>
|
||||
<language></language>
|
||||
<inter_process_communication></inter_process_communication>
|
||||
<file_system></file_system>
|
||||
</desktop_shell>
|
||||
<ai_engine>
|
||||
<logic_model></logic_model>
|
||||
<design_model></design_model>
|
||||
<orchestration></orchestration>
|
||||
</ai_engine>
|
||||
<testing>
|
||||
<framework></framework>
|
||||
<unit></unit>
|
||||
</testing>
|
||||
</technology_stack>
|
||||
|
||||
<core_capabilities>
|
||||
<project_management>
|
||||
</project_management>
|
||||
|
||||
<intelligent_analysis>
|
||||
</intelligent_analysis>
|
||||
|
||||
<kanban_workflow>
|
||||
</kanban_workflow>
|
||||
|
||||
<autonomous_agent_engine>
|
||||
</autonomous_agent_engine>
|
||||
|
||||
<extensibility>
|
||||
</extensibility>
|
||||
</core_capabilities>
|
||||
|
||||
<ui_layout>
|
||||
<window_structure>
|
||||
</window_structure>
|
||||
<theme>
|
||||
</theme>
|
||||
</ui_layout>
|
||||
|
||||
<development_workflow>
|
||||
<local_testing>
|
||||
</local_testing>
|
||||
</development_workflow>
|
||||
|
||||
<implementation_roadmap>
|
||||
<phase_1_foundation>
|
||||
</phase_1_foundation>
|
||||
<phase_2_core_logic>
|
||||
</phase_2_core_logic>
|
||||
<phase_3_kanban_and_interaction>
|
||||
</phase_3_kanban_and_interaction>
|
||||
<phase_4_polish>
|
||||
</phase_4_polish>
|
||||
</implementation_roadmap>
|
||||
</project_specification>`;
|
||||
|
||||
/**
|
||||
* Spec Regeneration Service - Regenerates app spec based on project description and tech stack
|
||||
*/
|
||||
class SpecRegenerationService {
|
||||
constructor() {
|
||||
this.runningRegeneration = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create initial app spec for a new project
|
||||
* @param {string} projectPath - Path to the project
|
||||
* @param {string} projectOverview - User's project description
|
||||
* @param {Function} sendToRenderer - Function to send events to renderer
|
||||
* @param {Object} execution - Execution context with abort controller
|
||||
* @param {boolean} generateFeatures - Whether to generate feature entries in features folder
|
||||
*/
|
||||
async createInitialSpec(projectPath, projectOverview, sendToRenderer, execution, generateFeatures = true) {
|
||||
console.log(`[SpecRegeneration] Creating initial spec for: ${projectPath}, generateFeatures: ${generateFeatures}`);
|
||||
|
||||
try {
|
||||
const abortController = new AbortController();
|
||||
execution.abortController = abortController;
|
||||
|
||||
const options = {
|
||||
model: "claude-sonnet-4-20250514",
|
||||
systemPrompt: this.getInitialCreationSystemPrompt(generateFeatures),
|
||||
maxTurns: 50,
|
||||
cwd: projectPath,
|
||||
allowedTools: ["Read", "Write", "Edit", "Glob", "Grep", "Bash"],
|
||||
permissionMode: "acceptEdits",
|
||||
sandbox: {
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
},
|
||||
abortController: abortController,
|
||||
};
|
||||
|
||||
const prompt = this.buildInitialCreationPrompt(projectOverview, generateFeatures);
|
||||
|
||||
sendToRenderer({
|
||||
type: "spec_regeneration_progress",
|
||||
content: "Starting project analysis and spec creation...\n",
|
||||
});
|
||||
|
||||
const currentQuery = query({ prompt, options });
|
||||
execution.query = currentQuery;
|
||||
|
||||
let fullResponse = "";
|
||||
for await (const msg of currentQuery) {
|
||||
if (!execution.isActive()) break;
|
||||
|
||||
if (msg.type === "assistant" && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === "text") {
|
||||
fullResponse += block.text;
|
||||
sendToRenderer({
|
||||
type: "spec_regeneration_progress",
|
||||
content: block.text,
|
||||
});
|
||||
} else if (block.type === "tool_use") {
|
||||
sendToRenderer({
|
||||
type: "spec_regeneration_tool",
|
||||
tool: block.name,
|
||||
input: block.input,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
execution.query = null;
|
||||
execution.abortController = null;
|
||||
|
||||
sendToRenderer({
|
||||
type: "spec_regeneration_complete",
|
||||
message: "Initial spec creation complete!",
|
||||
});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: "Initial spec creation complete",
|
||||
};
|
||||
} catch (error) {
|
||||
if (error instanceof AbortError || error?.name === "AbortError") {
|
||||
console.log("[SpecRegeneration] Creation aborted");
|
||||
if (execution) {
|
||||
execution.abortController = null;
|
||||
execution.query = null;
|
||||
}
|
||||
return {
|
||||
success: false,
|
||||
message: "Creation aborted",
|
||||
};
|
||||
}
|
||||
|
||||
console.error("[SpecRegeneration] Error creating initial spec:", error);
|
||||
if (execution) {
|
||||
execution.abortController = null;
|
||||
execution.query = null;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the system prompt for initial spec creation
|
||||
* @param {boolean} generateFeatures - Whether features should be generated
|
||||
*/
|
||||
getInitialCreationSystemPrompt(generateFeatures = true) {
|
||||
return `You are an expert software architect and product manager. Your job is to analyze an existing codebase and generate a comprehensive application specification based on a user's project overview.
|
||||
|
||||
You should:
|
||||
1. First, thoroughly analyze the project structure to understand the existing tech stack
|
||||
2. Read key configuration files (package.json, tsconfig.json, Cargo.toml, requirements.txt, etc.) to understand dependencies and frameworks
|
||||
3. Understand the current architecture and patterns used
|
||||
4. Based on the user's project overview, create a comprehensive app specification
|
||||
5. Be liberal and comprehensive when defining features - include everything needed for a complete, polished application
|
||||
6. Use the XML template format provided
|
||||
7. Write the specification to .automaker/app_spec.txt
|
||||
|
||||
When analyzing, look at:
|
||||
- package.json, cargo.toml, requirements.txt or similar config files for tech stack
|
||||
- Source code structure and organization
|
||||
- Framework-specific patterns (Next.js, React, Django, etc.)
|
||||
- Database configurations and schemas
|
||||
- API structures and patterns
|
||||
|
||||
**Feature Storage:**
|
||||
Features are stored in .automaker/features/{id}/feature.json - each feature has its own folder.
|
||||
Do NOT manually create feature files. Use the UpdateFeatureStatus tool to manage features.
|
||||
|
||||
You CAN and SHOULD modify:
|
||||
- .automaker/app_spec.txt (this is your primary target)
|
||||
|
||||
You have access to file reading, writing, and search tools. Use them to understand the codebase and write the new spec.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the prompt for initial spec creation
|
||||
* @param {string} projectOverview - User's project description
|
||||
* @param {boolean} generateFeatures - Whether to generate feature entries in features folder
|
||||
*/
|
||||
buildInitialCreationPrompt(projectOverview, generateFeatures = true) {
|
||||
return `I need you to create an initial application specification for my project. I haven't set up an app_spec.txt yet, so this will be the first one.
|
||||
|
||||
**My Project Overview:**
|
||||
${projectOverview}
|
||||
|
||||
**Your Task:**
|
||||
|
||||
1. First, explore the project to understand the existing tech stack:
|
||||
- Read package.json, Cargo.toml, requirements.txt, or similar config files
|
||||
- Identify all frameworks and libraries being used
|
||||
- Understand the current project structure and architecture
|
||||
- Note any database, authentication, or other infrastructure in use
|
||||
|
||||
2. Based on my project overview and the existing tech stack, create a comprehensive app specification using this XML template:
|
||||
|
||||
\`\`\`xml
|
||||
${APP_SPEC_XML_TEMPLATE}
|
||||
\`\`\`
|
||||
|
||||
3. Fill out the template with:
|
||||
- **project_name**: Extract from the project or derive from overview
|
||||
- **overview**: A clear description based on my project overview
|
||||
- **technology_stack**: All technologies you discover in the project (fill out the relevant sections, remove irrelevant ones)
|
||||
- **core_capabilities**: List all the major capabilities the app should have based on my overview
|
||||
- **ui_layout**: Describe the UI structure if relevant
|
||||
- **development_workflow**: Note any testing or development patterns
|
||||
- **implementation_roadmap**: Break down the features into phases - be VERY detailed here, listing every feature that needs to be built
|
||||
|
||||
4. **IMPORTANT**: Write the complete specification to the file \`.automaker/app_spec.txt\`
|
||||
|
||||
**Guidelines:**
|
||||
- Be comprehensive! Include ALL features needed for a complete application
|
||||
- Only include technology_stack sections that are relevant (e.g., skip desktop_shell if it's a web-only app)
|
||||
- Add new sections to core_capabilities as needed for the specific project
|
||||
- The implementation_roadmap should reflect logical phases for building out the app - list EVERY feature individually
|
||||
- Consider user flows, error states, and edge cases when defining features
|
||||
- Each phase should have multiple specific, actionable features
|
||||
|
||||
Begin by exploring the project structure.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Regenerate the app spec based on user's project definition
|
||||
*/
|
||||
async regenerateSpec(projectPath, projectDefinition, sendToRenderer, execution) {
|
||||
console.log(`[SpecRegeneration] Regenerating spec for: ${projectPath}`);
|
||||
|
||||
try {
|
||||
const abortController = new AbortController();
|
||||
execution.abortController = abortController;
|
||||
|
||||
const options = {
|
||||
model: "claude-sonnet-4-20250514",
|
||||
systemPrompt: this.getSystemPrompt(),
|
||||
maxTurns: 50,
|
||||
cwd: projectPath,
|
||||
allowedTools: ["Read", "Write", "Edit", "Glob", "Grep", "Bash"],
|
||||
permissionMode: "acceptEdits",
|
||||
sandbox: {
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
},
|
||||
abortController: abortController,
|
||||
};
|
||||
|
||||
const prompt = this.buildRegenerationPrompt(projectDefinition);
|
||||
|
||||
sendToRenderer({
|
||||
type: "spec_regeneration_progress",
|
||||
content: "Starting spec regeneration...\n",
|
||||
});
|
||||
|
||||
const currentQuery = query({ prompt, options });
|
||||
execution.query = currentQuery;
|
||||
|
||||
let fullResponse = "";
|
||||
for await (const msg of currentQuery) {
|
||||
if (!execution.isActive()) break;
|
||||
|
||||
if (msg.type === "assistant" && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === "text") {
|
||||
fullResponse += block.text;
|
||||
sendToRenderer({
|
||||
type: "spec_regeneration_progress",
|
||||
content: block.text,
|
||||
});
|
||||
} else if (block.type === "tool_use") {
|
||||
sendToRenderer({
|
||||
type: "spec_regeneration_tool",
|
||||
tool: block.name,
|
||||
input: block.input,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
execution.query = null;
|
||||
execution.abortController = null;
|
||||
|
||||
sendToRenderer({
|
||||
type: "spec_regeneration_complete",
|
||||
message: "Spec regeneration complete!",
|
||||
});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: "Spec regeneration complete",
|
||||
};
|
||||
} catch (error) {
|
||||
if (error instanceof AbortError || error?.name === "AbortError") {
|
||||
console.log("[SpecRegeneration] Regeneration aborted");
|
||||
if (execution) {
|
||||
execution.abortController = null;
|
||||
execution.query = null;
|
||||
}
|
||||
return {
|
||||
success: false,
|
||||
message: "Regeneration aborted",
|
||||
};
|
||||
}
|
||||
|
||||
console.error("[SpecRegeneration] Error regenerating spec:", error);
|
||||
if (execution) {
|
||||
execution.abortController = null;
|
||||
execution.query = null;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the system prompt for spec regeneration
|
||||
*/
|
||||
getSystemPrompt() {
|
||||
return `You are an expert software architect and product manager. Your job is to analyze an existing codebase and generate a comprehensive application specification based on a user's project definition.
|
||||
|
||||
You should:
|
||||
1. First, thoroughly analyze the project structure to understand the existing tech stack
|
||||
2. Read key configuration files (package.json, tsconfig.json, etc.) to understand dependencies and frameworks
|
||||
3. Understand the current architecture and patterns used
|
||||
4. Based on the user's project definition, create a comprehensive app specification that includes ALL features needed to realize their vision
|
||||
5. Be liberal and comprehensive when defining features - include everything needed for a complete, polished application
|
||||
6. Write the specification to .automaker/app_spec.txt
|
||||
|
||||
When analyzing, look at:
|
||||
- package.json, cargo.toml, or similar config files for tech stack
|
||||
- Source code structure and organization
|
||||
- Framework-specific patterns (Next.js, React, etc.)
|
||||
- Database configurations and schemas
|
||||
- API structures and patterns
|
||||
|
||||
**Feature Storage:**
|
||||
Features are stored in .automaker/features/{id}/feature.json - each feature has its own folder.
|
||||
Do NOT manually create feature files. Use the UpdateFeatureStatus tool to manage features.
|
||||
|
||||
You CAN and SHOULD modify:
|
||||
- .automaker/app_spec.txt (this is your primary target)
|
||||
|
||||
You have access to file reading, writing, and search tools. Use them to understand the codebase and write the new spec.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the prompt for regenerating the spec
|
||||
*/
|
||||
buildRegenerationPrompt(projectDefinition) {
|
||||
return `I need you to regenerate my application specification based on the following project definition. Be very comprehensive and liberal when defining features - I want a complete, polished application.
|
||||
|
||||
**My Project Definition:**
|
||||
${projectDefinition}
|
||||
|
||||
**Your Task:**
|
||||
|
||||
1. First, explore the project to understand the existing tech stack:
|
||||
- Read package.json or similar config files
|
||||
- Identify all frameworks and libraries being used
|
||||
- Understand the current project structure and architecture
|
||||
- Note any database, authentication, or other infrastructure in use
|
||||
|
||||
2. Based on my project definition and the existing tech stack, create a comprehensive app specification that includes:
|
||||
- Product Overview: A clear description of what the app does
|
||||
- Tech Stack: All technologies currently in use
|
||||
- Features: A COMPREHENSIVE list of all features needed to realize my vision
|
||||
- Be liberal! Include all features that would make this a complete, production-ready application
|
||||
- Include core features, supporting features, and nice-to-have features
|
||||
- Think about user experience, error handling, edge cases, etc.
|
||||
- Architecture Notes: Any important architectural decisions or patterns
|
||||
|
||||
3. **IMPORTANT**: Write the complete specification to the file \`.automaker/app_spec.txt\`
|
||||
|
||||
**Format Guidelines for the Spec:**
|
||||
|
||||
Use this general structure:
|
||||
|
||||
\`\`\`
|
||||
# [App Name] - Application Specification
|
||||
|
||||
## Product Overview
|
||||
[Description of what the app does and its purpose]
|
||||
|
||||
## Tech Stack
|
||||
- Frontend: [frameworks, libraries]
|
||||
- Backend: [frameworks, APIs]
|
||||
- Database: [if applicable]
|
||||
- Other: [other relevant tech]
|
||||
|
||||
## Features
|
||||
|
||||
### [Category 1]
|
||||
- **[Feature Name]**: [Detailed description of the feature]
|
||||
- **[Feature Name]**: [Detailed description]
|
||||
...
|
||||
|
||||
### [Category 2]
|
||||
- **[Feature Name]**: [Detailed description]
|
||||
...
|
||||
|
||||
## Architecture Notes
|
||||
[Any important architectural notes, patterns, or conventions]
|
||||
\`\`\`
|
||||
|
||||
**Remember:**
|
||||
- Be comprehensive! Include ALL features needed for a complete application
|
||||
- Consider user flows, error states, loading states, etc.
|
||||
- Include authentication, authorization if relevant
|
||||
- Think about what would make this a polished, production-ready app
|
||||
- The more detailed and complete the spec, the better
|
||||
|
||||
Begin by exploring the project structure.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop the current regeneration
|
||||
*/
|
||||
stop() {
|
||||
if (this.runningRegeneration && this.runningRegeneration.abortController) {
|
||||
this.runningRegeneration.abortController.abort();
|
||||
}
|
||||
this.runningRegeneration = null;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = new SpecRegenerationService();
|
||||
@@ -1,569 +0,0 @@
|
||||
const path = require("path");
|
||||
const fs = require("fs/promises");
|
||||
const { exec, spawn } = require("child_process");
|
||||
const { promisify } = require("util");
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
/**
|
||||
* Worktree Manager - Handles git worktrees for feature isolation
|
||||
*
|
||||
* This service creates isolated git worktrees for each feature, allowing:
|
||||
* - Features to be worked on in isolation without affecting the main branch
|
||||
* - Easy rollback/revert by simply deleting the worktree
|
||||
* - Checkpointing - user can see changes in the worktree before merging
|
||||
*/
|
||||
class WorktreeManager {
|
||||
constructor() {
|
||||
// Cache for worktree info
|
||||
this.worktreeCache = new Map();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the base worktree directory path
|
||||
*/
|
||||
getWorktreeBasePath(projectPath) {
|
||||
return path.join(projectPath, ".automaker", "worktrees");
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a safe branch name from feature description
|
||||
*/
|
||||
generateBranchName(feature) {
|
||||
// Create a slug from the description
|
||||
const slug = feature.description
|
||||
.toLowerCase()
|
||||
.replace(/[^a-z0-9\s-]/g, "") // Remove special chars
|
||||
.replace(/\s+/g, "-") // Replace spaces with hyphens
|
||||
.substring(0, 40); // Limit length
|
||||
|
||||
// Add feature ID for uniqueness
|
||||
const shortId = feature.id.replace("feature-", "").substring(0, 12);
|
||||
return `feature/${shortId}-${slug}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the project is a git repository
|
||||
*/
|
||||
async isGitRepo(projectPath) {
|
||||
try {
|
||||
await execAsync("git rev-parse --is-inside-work-tree", { cwd: projectPath });
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current branch name
|
||||
*/
|
||||
async getCurrentBranch(projectPath) {
|
||||
try {
|
||||
const { stdout } = await execAsync("git rev-parse --abbrev-ref HEAD", { cwd: projectPath });
|
||||
return stdout.trim();
|
||||
} catch (error) {
|
||||
console.error("[WorktreeManager] Failed to get current branch:", error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a branch exists (local or remote)
|
||||
*/
|
||||
async branchExists(projectPath, branchName) {
|
||||
try {
|
||||
await execAsync(`git rev-parse --verify ${branchName}`, { cwd: projectPath });
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* List all existing worktrees
|
||||
*/
|
||||
async listWorktrees(projectPath) {
|
||||
try {
|
||||
const { stdout } = await execAsync("git worktree list --porcelain", { cwd: projectPath });
|
||||
const worktrees = [];
|
||||
const lines = stdout.split("\n");
|
||||
|
||||
let currentWorktree = null;
|
||||
for (const line of lines) {
|
||||
if (line.startsWith("worktree ")) {
|
||||
if (currentWorktree) {
|
||||
worktrees.push(currentWorktree);
|
||||
}
|
||||
currentWorktree = { path: line.replace("worktree ", "") };
|
||||
} else if (line.startsWith("branch ") && currentWorktree) {
|
||||
currentWorktree.branch = line.replace("branch refs/heads/", "");
|
||||
} else if (line.startsWith("HEAD ") && currentWorktree) {
|
||||
currentWorktree.head = line.replace("HEAD ", "");
|
||||
}
|
||||
}
|
||||
if (currentWorktree) {
|
||||
worktrees.push(currentWorktree);
|
||||
}
|
||||
|
||||
return worktrees;
|
||||
} catch (error) {
|
||||
console.error("[WorktreeManager] Failed to list worktrees:", error);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a worktree for a feature
|
||||
* @param {string} projectPath - Path to the main project
|
||||
* @param {object} feature - Feature object with id and description
|
||||
* @returns {object} - { success, worktreePath, branchName, error }
|
||||
*/
|
||||
async createWorktree(projectPath, feature) {
|
||||
console.log(`[WorktreeManager] Creating worktree for feature: ${feature.id}`);
|
||||
|
||||
// Check if project is a git repo
|
||||
if (!await this.isGitRepo(projectPath)) {
|
||||
return { success: false, error: "Project is not a git repository" };
|
||||
}
|
||||
|
||||
const branchName = this.generateBranchName(feature);
|
||||
const worktreeBasePath = this.getWorktreeBasePath(projectPath);
|
||||
const worktreePath = path.join(worktreeBasePath, branchName.replace("feature/", ""));
|
||||
|
||||
try {
|
||||
// Ensure worktree directory exists
|
||||
await fs.mkdir(worktreeBasePath, { recursive: true });
|
||||
|
||||
// Check if worktree already exists
|
||||
const worktrees = await this.listWorktrees(projectPath);
|
||||
const existingWorktree = worktrees.find(
|
||||
w => w.path === worktreePath || w.branch === branchName
|
||||
);
|
||||
|
||||
if (existingWorktree) {
|
||||
console.log(`[WorktreeManager] Worktree already exists for feature: ${feature.id}`);
|
||||
return {
|
||||
success: true,
|
||||
worktreePath: existingWorktree.path,
|
||||
branchName: existingWorktree.branch,
|
||||
existed: true,
|
||||
};
|
||||
}
|
||||
|
||||
// Get current branch to base the new branch on
|
||||
const baseBranch = await this.getCurrentBranch(projectPath);
|
||||
if (!baseBranch) {
|
||||
return { success: false, error: "Could not determine current branch" };
|
||||
}
|
||||
|
||||
// Check if branch already exists
|
||||
const branchExists = await this.branchExists(projectPath, branchName);
|
||||
|
||||
if (branchExists) {
|
||||
// Use existing branch
|
||||
console.log(`[WorktreeManager] Using existing branch: ${branchName}`);
|
||||
await execAsync(`git worktree add "${worktreePath}" ${branchName}`, { cwd: projectPath });
|
||||
} else {
|
||||
// Create new worktree with new branch
|
||||
console.log(`[WorktreeManager] Creating new branch: ${branchName} based on ${baseBranch}`);
|
||||
await execAsync(`git worktree add -b ${branchName} "${worktreePath}" ${baseBranch}`, { cwd: projectPath });
|
||||
}
|
||||
|
||||
// Copy .automaker directory to worktree (except worktrees directory itself to avoid recursion)
|
||||
const automakerSrc = path.join(projectPath, ".automaker");
|
||||
const automakerDst = path.join(worktreePath, ".automaker");
|
||||
|
||||
try {
|
||||
await fs.mkdir(automakerDst, { recursive: true });
|
||||
|
||||
// Note: Features are stored in .automaker/features/{id}/feature.json
|
||||
// These are managed by the main project, not copied to worktrees
|
||||
|
||||
// Copy app_spec.txt if it exists
|
||||
const appSpecSrc = path.join(automakerSrc, "app_spec.txt");
|
||||
const appSpecDst = path.join(automakerDst, "app_spec.txt");
|
||||
try {
|
||||
const content = await fs.readFile(appSpecSrc, "utf-8");
|
||||
await fs.writeFile(appSpecDst, content, "utf-8");
|
||||
} catch {
|
||||
// App spec might not exist yet
|
||||
}
|
||||
|
||||
// Copy categories.json if it exists
|
||||
const categoriesSrc = path.join(automakerSrc, "categories.json");
|
||||
const categoriesDst = path.join(automakerDst, "categories.json");
|
||||
try {
|
||||
const content = await fs.readFile(categoriesSrc, "utf-8");
|
||||
await fs.writeFile(categoriesDst, content, "utf-8");
|
||||
} catch {
|
||||
// Categories might not exist yet
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn("[WorktreeManager] Failed to copy .automaker directory:", error);
|
||||
}
|
||||
|
||||
// Store worktree info in cache
|
||||
this.worktreeCache.set(feature.id, {
|
||||
worktreePath,
|
||||
branchName,
|
||||
createdAt: new Date().toISOString(),
|
||||
baseBranch,
|
||||
});
|
||||
|
||||
console.log(`[WorktreeManager] Worktree created at: ${worktreePath}`);
|
||||
return {
|
||||
success: true,
|
||||
worktreePath,
|
||||
branchName,
|
||||
baseBranch,
|
||||
existed: false,
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("[WorktreeManager] Failed to create worktree:", error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get worktree info for a feature
|
||||
*/
|
||||
async getWorktreeInfo(projectPath, featureId) {
|
||||
// Check cache first
|
||||
if (this.worktreeCache.has(featureId)) {
|
||||
return { success: true, ...this.worktreeCache.get(featureId) };
|
||||
}
|
||||
|
||||
// Scan worktrees to find matching one
|
||||
const worktrees = await this.listWorktrees(projectPath);
|
||||
const worktreeBasePath = this.getWorktreeBasePath(projectPath);
|
||||
|
||||
for (const worktree of worktrees) {
|
||||
// Check if this worktree is in our worktree directory
|
||||
if (worktree.path.startsWith(worktreeBasePath)) {
|
||||
// Check if the feature ID is in the branch name
|
||||
const shortId = featureId.replace("feature-", "").substring(0, 12);
|
||||
if (worktree.branch && worktree.branch.includes(shortId)) {
|
||||
const info = {
|
||||
worktreePath: worktree.path,
|
||||
branchName: worktree.branch,
|
||||
head: worktree.head,
|
||||
};
|
||||
this.worktreeCache.set(featureId, info);
|
||||
return { success: true, ...info };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { success: false, error: "Worktree not found" };
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a worktree for a feature
|
||||
* This effectively reverts all changes made by the agent
|
||||
*/
|
||||
async removeWorktree(projectPath, featureId, deleteBranch = false) {
|
||||
console.log(`[WorktreeManager] Removing worktree for feature: ${featureId}`);
|
||||
|
||||
const worktreeInfo = await this.getWorktreeInfo(projectPath, featureId);
|
||||
if (!worktreeInfo.success) {
|
||||
console.log(`[WorktreeManager] No worktree found for feature: ${featureId}`);
|
||||
return { success: true, message: "No worktree to remove" };
|
||||
}
|
||||
|
||||
const { worktreePath, branchName } = worktreeInfo;
|
||||
|
||||
try {
|
||||
// Remove the worktree
|
||||
await execAsync(`git worktree remove "${worktreePath}" --force`, { cwd: projectPath });
|
||||
console.log(`[WorktreeManager] Worktree removed: ${worktreePath}`);
|
||||
|
||||
// Optionally delete the branch too
|
||||
if (deleteBranch && branchName) {
|
||||
try {
|
||||
await execAsync(`git branch -D ${branchName}`, { cwd: projectPath });
|
||||
console.log(`[WorktreeManager] Branch deleted: ${branchName}`);
|
||||
} catch (error) {
|
||||
console.warn(`[WorktreeManager] Could not delete branch ${branchName}:`, error.message);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove from cache
|
||||
this.worktreeCache.delete(featureId);
|
||||
|
||||
return { success: true, removedPath: worktreePath, removedBranch: deleteBranch ? branchName : null };
|
||||
} catch (error) {
|
||||
console.error("[WorktreeManager] Failed to remove worktree:", error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get status of changes in a worktree
|
||||
*/
|
||||
async getWorktreeStatus(worktreePath) {
|
||||
try {
|
||||
const { stdout: statusOutput } = await execAsync("git status --porcelain", { cwd: worktreePath });
|
||||
const { stdout: diffStat } = await execAsync("git diff --stat", { cwd: worktreePath });
|
||||
const { stdout: commitLog } = await execAsync("git log --oneline -10", { cwd: worktreePath });
|
||||
|
||||
const files = statusOutput.trim().split("\n").filter(Boolean);
|
||||
const commits = commitLog.trim().split("\n").filter(Boolean);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
modifiedFiles: files.length,
|
||||
files: files.slice(0, 20), // Limit to 20 files
|
||||
diffStat: diffStat.trim(),
|
||||
recentCommits: commits.slice(0, 5), // Last 5 commits
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("[WorktreeManager] Failed to get worktree status:", error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get detailed file diff content for a worktree
|
||||
* Returns unified diff format for all changes
|
||||
*/
|
||||
async getFileDiffs(worktreePath) {
|
||||
try {
|
||||
// Get both staged and unstaged diffs
|
||||
const { stdout: unstagedDiff } = await execAsync("git diff --no-color", {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 10 * 1024 * 1024 // 10MB buffer for large diffs
|
||||
});
|
||||
const { stdout: stagedDiff } = await execAsync("git diff --cached --no-color", {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 10 * 1024 * 1024
|
||||
});
|
||||
|
||||
// Get list of files with their status
|
||||
const { stdout: statusOutput } = await execAsync("git status --porcelain", { cwd: worktreePath });
|
||||
const files = statusOutput.trim().split("\n").filter(Boolean);
|
||||
|
||||
// Parse file statuses
|
||||
const fileStatuses = files.map(line => {
|
||||
const status = line.substring(0, 2);
|
||||
const filePath = line.substring(3);
|
||||
return {
|
||||
status: status.trim() || 'M',
|
||||
path: filePath,
|
||||
statusText: this.getStatusText(status)
|
||||
};
|
||||
});
|
||||
|
||||
// Combine diffs
|
||||
const combinedDiff = [stagedDiff, unstagedDiff].filter(Boolean).join("\n");
|
||||
|
||||
return {
|
||||
success: true,
|
||||
diff: combinedDiff,
|
||||
files: fileStatuses,
|
||||
hasChanges: files.length > 0
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("[WorktreeManager] Failed to get file diffs:", error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get human-readable status text from git status code
|
||||
*/
|
||||
getStatusText(status) {
|
||||
const statusMap = {
|
||||
'M': 'Modified',
|
||||
'A': 'Added',
|
||||
'D': 'Deleted',
|
||||
'R': 'Renamed',
|
||||
'C': 'Copied',
|
||||
'U': 'Updated',
|
||||
'?': 'Untracked',
|
||||
'!': 'Ignored'
|
||||
};
|
||||
const firstChar = status.charAt(0);
|
||||
const secondChar = status.charAt(1);
|
||||
return statusMap[firstChar] || statusMap[secondChar] || 'Changed';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get diff for a specific file in a worktree
|
||||
*/
|
||||
async getFileDiff(worktreePath, filePath) {
|
||||
try {
|
||||
// Try to get unstaged diff first, then staged if no unstaged changes
|
||||
let diff = '';
|
||||
try {
|
||||
const { stdout } = await execAsync(`git diff --no-color -- "${filePath}"`, {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 5 * 1024 * 1024
|
||||
});
|
||||
diff = stdout;
|
||||
} catch {
|
||||
// File might be staged
|
||||
}
|
||||
|
||||
if (!diff) {
|
||||
try {
|
||||
const { stdout } = await execAsync(`git diff --cached --no-color -- "${filePath}"`, {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 5 * 1024 * 1024
|
||||
});
|
||||
diff = stdout;
|
||||
} catch {
|
||||
// File might be untracked, show the content
|
||||
}
|
||||
}
|
||||
|
||||
// If still no diff, might be an untracked file - show the content
|
||||
if (!diff) {
|
||||
try {
|
||||
const fullPath = path.join(worktreePath, filePath);
|
||||
const content = await fs.readFile(fullPath, 'utf-8');
|
||||
diff = `+++ ${filePath} (new file)\n${content.split('\n').map(l => '+' + l).join('\n')}`;
|
||||
} catch {
|
||||
diff = '(Unable to read file content)';
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
diff,
|
||||
filePath
|
||||
};
|
||||
} catch (error) {
|
||||
console.error(`[WorktreeManager] Failed to get diff for ${filePath}:`, error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge worktree changes back to the main branch
|
||||
*/
|
||||
async mergeWorktree(projectPath, featureId, options = {}) {
|
||||
console.log(`[WorktreeManager] Merging worktree for feature: ${featureId}`);
|
||||
|
||||
const worktreeInfo = await this.getWorktreeInfo(projectPath, featureId);
|
||||
if (!worktreeInfo.success) {
|
||||
return { success: false, error: "Worktree not found" };
|
||||
}
|
||||
|
||||
const { branchName, worktreePath } = worktreeInfo;
|
||||
const baseBranch = await this.getCurrentBranch(projectPath);
|
||||
|
||||
try {
|
||||
// First commit any uncommitted changes in the worktree
|
||||
const { stdout: status } = await execAsync("git status --porcelain", { cwd: worktreePath });
|
||||
if (status.trim()) {
|
||||
// There are uncommitted changes - commit them
|
||||
await execAsync("git add -A", { cwd: worktreePath });
|
||||
const commitMsg = options.commitMessage || `feat: complete ${featureId}`;
|
||||
await execAsync(`git commit -m "${commitMsg}"`, { cwd: worktreePath });
|
||||
}
|
||||
|
||||
// Merge the feature branch into the current branch in the main repo
|
||||
if (options.squash) {
|
||||
await execAsync(`git merge --squash ${branchName}`, { cwd: projectPath });
|
||||
const squashMsg = options.squashMessage || `feat: ${featureId} - squashed merge`;
|
||||
await execAsync(`git commit -m "${squashMsg}"`, { cwd: projectPath });
|
||||
} else {
|
||||
await execAsync(`git merge ${branchName} --no-ff -m "Merge ${branchName}"`, { cwd: projectPath });
|
||||
}
|
||||
|
||||
console.log(`[WorktreeManager] Successfully merged ${branchName} into ${baseBranch}`);
|
||||
|
||||
// Optionally cleanup worktree after merge
|
||||
if (options.cleanup) {
|
||||
await this.removeWorktree(projectPath, featureId, true);
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
mergedBranch: branchName,
|
||||
intoBranch: baseBranch,
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("[WorktreeManager] Failed to merge worktree:", error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sync changes from main branch to worktree (rebase or merge)
|
||||
*/
|
||||
async syncWorktree(projectPath, featureId, method = "rebase") {
|
||||
console.log(`[WorktreeManager] Syncing worktree for feature: ${featureId}`);
|
||||
|
||||
const worktreeInfo = await this.getWorktreeInfo(projectPath, featureId);
|
||||
if (!worktreeInfo.success) {
|
||||
return { success: false, error: "Worktree not found" };
|
||||
}
|
||||
|
||||
const { worktreePath, baseBranch } = worktreeInfo;
|
||||
|
||||
try {
|
||||
if (method === "rebase") {
|
||||
await execAsync(`git rebase ${baseBranch}`, { cwd: worktreePath });
|
||||
} else {
|
||||
await execAsync(`git merge ${baseBranch}`, { cwd: worktreePath });
|
||||
}
|
||||
|
||||
return { success: true, method };
|
||||
} catch (error) {
|
||||
console.error("[WorktreeManager] Failed to sync worktree:", error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get list of all feature worktrees
|
||||
*/
|
||||
async getAllFeatureWorktrees(projectPath) {
|
||||
const worktrees = await this.listWorktrees(projectPath);
|
||||
const worktreeBasePath = this.getWorktreeBasePath(projectPath);
|
||||
|
||||
return worktrees.filter(w =>
|
||||
w.path.startsWith(worktreeBasePath) &&
|
||||
w.branch &&
|
||||
w.branch.startsWith("feature/")
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup orphaned worktrees (worktrees without matching features)
|
||||
*/
|
||||
async cleanupOrphanedWorktrees(projectPath, activeFeatureIds) {
|
||||
console.log("[WorktreeManager] Cleaning up orphaned worktrees...");
|
||||
|
||||
const worktrees = await this.getAllFeatureWorktrees(projectPath);
|
||||
const cleaned = [];
|
||||
|
||||
for (const worktree of worktrees) {
|
||||
// Extract feature ID from branch name
|
||||
const branchParts = worktree.branch.replace("feature/", "").split("-");
|
||||
const shortId = branchParts[0];
|
||||
|
||||
// Check if any active feature has this short ID
|
||||
const hasMatchingFeature = activeFeatureIds.some(id => {
|
||||
const featureShortId = id.replace("feature-", "").substring(0, 12);
|
||||
return featureShortId === shortId;
|
||||
});
|
||||
|
||||
if (!hasMatchingFeature) {
|
||||
console.log(`[WorktreeManager] Removing orphaned worktree: ${worktree.path}`);
|
||||
try {
|
||||
await execAsync(`git worktree remove "${worktree.path}" --force`, { cwd: projectPath });
|
||||
await execAsync(`git branch -D ${worktree.branch}`, { cwd: projectPath });
|
||||
cleaned.push(worktree.path);
|
||||
} catch (error) {
|
||||
console.warn(`[WorktreeManager] Failed to cleanup worktree ${worktree.path}:`, error.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { success: true, cleaned };
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = new WorktreeManager();
|
||||
@@ -1,20 +0,0 @@
|
||||
import { defineConfig, globalIgnores } from "eslint/config";
|
||||
import nextVitals from "eslint-config-next/core-web-vitals";
|
||||
import nextTs from "eslint-config-next/typescript";
|
||||
|
||||
const eslintConfig = defineConfig([
|
||||
...nextVitals,
|
||||
...nextTs,
|
||||
// Override default ignores of eslint-config-next.
|
||||
globalIgnores([
|
||||
// Default ignores of eslint-config-next:
|
||||
".next/**",
|
||||
"out/**",
|
||||
"build/**",
|
||||
"next-env.d.ts",
|
||||
// Electron files use CommonJS
|
||||
"electron/**",
|
||||
]),
|
||||
]);
|
||||
|
||||
export default eslintConfig;
|
||||
@@ -1,9 +0,0 @@
|
||||
import type { NextConfig } from "next";
|
||||
|
||||
const nextConfig: NextConfig = {
|
||||
env: {
|
||||
CLAUDE_CODE_OAUTH_TOKEN: process.env.CLAUDE_CODE_OAUTH_TOKEN || "",
|
||||
},
|
||||
};
|
||||
|
||||
export default nextConfig;
|
||||
13896
app/package-lock.json
generated
13896
app/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
140
app/package.json
140
app/package.json
@@ -1,140 +0,0 @@
|
||||
{
|
||||
"name": "automaker",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"license": "Unlicense",
|
||||
"main": "electron/main.js",
|
||||
"scripts": {
|
||||
"dev": "next dev -p 3007",
|
||||
"dev:web": "next dev -p 3007",
|
||||
"dev:electron": "concurrently \"next dev -p 3007\" \"wait-on http://localhost:3007 && electron .\"",
|
||||
"build": "next build",
|
||||
"build:electron": "next build && electron-builder",
|
||||
"start": "next start",
|
||||
"lint": "eslint",
|
||||
"test": "playwright test",
|
||||
"test:headed": "playwright test --headed"
|
||||
},
|
||||
"dependencies": {
|
||||
"@anthropic-ai/claude-agent-sdk": "^0.1.61",
|
||||
"@dnd-kit/core": "^6.3.1",
|
||||
"@dnd-kit/sortable": "^10.0.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@radix-ui/react-checkbox": "^1.3.3",
|
||||
"@radix-ui/react-dialog": "^1.1.15",
|
||||
"@radix-ui/react-dropdown-menu": "^2.1.16",
|
||||
"@radix-ui/react-label": "^2.1.8",
|
||||
"@radix-ui/react-popover": "^1.1.15",
|
||||
"@radix-ui/react-slider": "^1.3.6",
|
||||
"@radix-ui/react-slot": "^1.2.4",
|
||||
"@radix-ui/react-tabs": "^1.1.13",
|
||||
"@radix-ui/react-tooltip": "^1.2.8",
|
||||
"@tanstack/react-query": "^5.90.12",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "^2.1.1",
|
||||
"cmdk": "^1.1.1",
|
||||
"dotenv": "^17.2.3",
|
||||
"lucide-react": "^0.556.0",
|
||||
"next": "16.0.7",
|
||||
"react": "19.2.0",
|
||||
"react-dom": "19.2.0",
|
||||
"react-markdown": "^10.1.0",
|
||||
"sonner": "^2.0.7",
|
||||
"tailwind-merge": "^3.4.0",
|
||||
"zustand": "^5.0.9"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@playwright/test": "^1.57.0",
|
||||
"@tailwindcss/postcss": "^4",
|
||||
"@types/node": "^20",
|
||||
"@types/react": "^19",
|
||||
"@types/react-dom": "^19",
|
||||
"concurrently": "^9.2.1",
|
||||
"electron": "^39.2.6",
|
||||
"electron-builder": "^26.0.12",
|
||||
"eslint": "^9",
|
||||
"eslint-config-next": "16.0.7",
|
||||
"tailwindcss": "^4",
|
||||
"tw-animate-css": "^1.4.0",
|
||||
"typescript": "^5",
|
||||
"wait-on": "^9.0.3"
|
||||
},
|
||||
"build": {
|
||||
"appId": "com.automaker.app",
|
||||
"productName": "Automaker",
|
||||
"directories": {
|
||||
"output": "dist"
|
||||
},
|
||||
"files": [
|
||||
"electron/**/*",
|
||||
".next/**/*",
|
||||
"public/**/*",
|
||||
"!node_modules/**/*",
|
||||
"node_modules/@anthropic-ai/**/*"
|
||||
],
|
||||
"extraResources": [
|
||||
{
|
||||
"from": ".env",
|
||||
"to": ".env",
|
||||
"filter": [
|
||||
"**/*"
|
||||
]
|
||||
}
|
||||
],
|
||||
"mac": {
|
||||
"category": "public.app-category.developer-tools",
|
||||
"target": [
|
||||
{
|
||||
"target": "dmg",
|
||||
"arch": [
|
||||
"x64",
|
||||
"arm64"
|
||||
]
|
||||
},
|
||||
{
|
||||
"target": "zip",
|
||||
"arch": [
|
||||
"x64",
|
||||
"arm64"
|
||||
]
|
||||
}
|
||||
],
|
||||
"icon": "public/logo.png"
|
||||
},
|
||||
"win": {
|
||||
"target": [
|
||||
{
|
||||
"target": "nsis",
|
||||
"arch": [
|
||||
"x64"
|
||||
]
|
||||
}
|
||||
],
|
||||
"icon": "public/logo.png"
|
||||
},
|
||||
"linux": {
|
||||
"target": [
|
||||
{
|
||||
"target": "AppImage",
|
||||
"arch": [
|
||||
"x64"
|
||||
]
|
||||
},
|
||||
{
|
||||
"target": "deb",
|
||||
"arch": [
|
||||
"x64"
|
||||
]
|
||||
}
|
||||
],
|
||||
"category": "Development",
|
||||
"icon": "public/logo.png"
|
||||
},
|
||||
"nsis": {
|
||||
"oneClick": false,
|
||||
"allowToChangeInstallationDirectory": true,
|
||||
"createDesktopShortcut": true,
|
||||
"createStartMenuShortcut": true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
import { defineConfig, devices } from "@playwright/test";
|
||||
|
||||
const port = process.env.TEST_PORT || 3007;
|
||||
const reuseServer = process.env.TEST_REUSE_SERVER === "true";
|
||||
|
||||
export default defineConfig({
|
||||
testDir: "./tests",
|
||||
fullyParallel: true,
|
||||
forbidOnly: !!process.env.CI,
|
||||
retries: process.env.CI ? 2 : 0,
|
||||
workers: process.env.CI ? 1 : undefined,
|
||||
reporter: "html",
|
||||
timeout: 30000,
|
||||
use: {
|
||||
baseURL: `http://localhost:${port}`,
|
||||
trace: "on-first-retry",
|
||||
screenshot: "only-on-failure",
|
||||
},
|
||||
projects: [
|
||||
{
|
||||
name: "chromium",
|
||||
use: { ...devices["Desktop Chrome"] },
|
||||
},
|
||||
],
|
||||
...(reuseServer
|
||||
? {}
|
||||
: {
|
||||
webServer: {
|
||||
command: `npx next dev -p ${port}`,
|
||||
url: `http://localhost:${port}`,
|
||||
reuseExistingServer: !process.env.CI,
|
||||
timeout: 120000,
|
||||
},
|
||||
}),
|
||||
});
|
||||
@@ -1,30 +0,0 @@
|
||||
import { defineConfig, devices } from "@playwright/test";
|
||||
|
||||
const port = process.env.TEST_PORT || 3007;
|
||||
|
||||
export default defineConfig({
|
||||
testDir: "./tests",
|
||||
fullyParallel: true,
|
||||
forbidOnly: !!process.env.CI,
|
||||
retries: process.env.CI ? 2 : 0,
|
||||
workers: process.env.CI ? 1 : undefined,
|
||||
reporter: "html",
|
||||
timeout: 10000,
|
||||
use: {
|
||||
baseURL: `http://localhost:${port}`,
|
||||
trace: "on-first-retry",
|
||||
screenshot: "only-on-failure",
|
||||
},
|
||||
projects: [
|
||||
{
|
||||
name: "chromium",
|
||||
use: { ...devices["Desktop Chrome"] },
|
||||
},
|
||||
],
|
||||
webServer: {
|
||||
command: `npx next dev -p ${port}`,
|
||||
url: `http://localhost:${port}`,
|
||||
reuseExistingServer: true,
|
||||
timeout: 60000,
|
||||
},
|
||||
});
|
||||
@@ -1,7 +0,0 @@
|
||||
const config = {
|
||||
plugins: {
|
||||
"@tailwindcss/postcss": {},
|
||||
},
|
||||
};
|
||||
|
||||
export default config;
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 142 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 147 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 108 KiB |
Binary file not shown.
@@ -1,172 +0,0 @@
|
||||
import {
|
||||
query,
|
||||
Options,
|
||||
SDKAssistantMessage,
|
||||
} from "@anthropic-ai/claude-agent-sdk";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import path from "path";
|
||||
|
||||
const systemPrompt = `You are an AI assistant helping users build software. You are part of the Automaker application,
|
||||
which is designed to help developers plan, design, and implement software projects autonomously.
|
||||
|
||||
Your role is to:
|
||||
- Help users define their project requirements and specifications
|
||||
- Ask clarifying questions to better understand their needs
|
||||
- Suggest technical approaches and architectures
|
||||
- Guide them through the development process
|
||||
- Be conversational and helpful
|
||||
- Write, edit, and modify code files as requested
|
||||
- Execute commands and tests
|
||||
- Search and analyze the codebase
|
||||
|
||||
When discussing projects, help users think through:
|
||||
- Core functionality and features
|
||||
- Technical stack choices
|
||||
- Data models and architecture
|
||||
- User experience considerations
|
||||
- Testing strategies
|
||||
|
||||
You have full access to the codebase and can:
|
||||
- Read files to understand existing code
|
||||
- Write new files
|
||||
- Edit existing files
|
||||
- Run bash commands
|
||||
- Search for code patterns
|
||||
- Execute tests and builds`;
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const { messages, workingDirectory } = await request.json();
|
||||
|
||||
console.log(
|
||||
"[API] CLAUDE_CODE_OAUTH_TOKEN present:",
|
||||
!!process.env.CLAUDE_CODE_OAUTH_TOKEN
|
||||
);
|
||||
|
||||
if (!process.env.CLAUDE_CODE_OAUTH_TOKEN) {
|
||||
return NextResponse.json(
|
||||
{ error: "CLAUDE_CODE_OAUTH_TOKEN not configured" },
|
||||
{ status: 500 }
|
||||
);
|
||||
}
|
||||
|
||||
// Get the last user message
|
||||
const lastMessage = messages[messages.length - 1];
|
||||
|
||||
// Determine working directory - default to parent of app directory
|
||||
const cwd = workingDirectory || path.resolve(process.cwd(), "..");
|
||||
|
||||
console.log("[API] Working directory:", cwd);
|
||||
|
||||
// Create query with options that enable code modification
|
||||
const options: Options = {
|
||||
// model: "claude-sonnet-4-20250514",
|
||||
model: "claude-opus-4-5-20251101",
|
||||
systemPrompt,
|
||||
maxTurns: 20,
|
||||
cwd,
|
||||
// Enable all core tools for code modification
|
||||
allowedTools: [
|
||||
"Read",
|
||||
"Write",
|
||||
"Edit",
|
||||
"Glob",
|
||||
"Grep",
|
||||
"Bash",
|
||||
"WebSearch",
|
||||
"WebFetch",
|
||||
],
|
||||
// Auto-accept file edits within the working directory
|
||||
permissionMode: "acceptEdits",
|
||||
// Enable sandbox for safer bash execution
|
||||
sandbox: {
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
},
|
||||
};
|
||||
|
||||
// Convert message history to SDK format to preserve conversation context
|
||||
// Include both user and assistant messages for full context
|
||||
const sessionId = `api-session-${Date.now()}`;
|
||||
const conversationMessages = messages.map(
|
||||
(msg: { role: string; content: string }) => {
|
||||
if (msg.role === "user") {
|
||||
return {
|
||||
type: "user" as const,
|
||||
message: {
|
||||
role: "user" as const,
|
||||
content: msg.content,
|
||||
},
|
||||
parent_tool_use_id: null,
|
||||
session_id: sessionId,
|
||||
};
|
||||
} else {
|
||||
// Assistant message
|
||||
return {
|
||||
type: "assistant" as const,
|
||||
message: {
|
||||
role: "assistant" as const,
|
||||
content: [
|
||||
{
|
||||
type: "text" as const,
|
||||
text: msg.content,
|
||||
},
|
||||
],
|
||||
},
|
||||
session_id: sessionId,
|
||||
};
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// Execute query with full conversation context
|
||||
const queryResult = query({
|
||||
prompt:
|
||||
conversationMessages.length > 0
|
||||
? conversationMessages
|
||||
: lastMessage.content,
|
||||
options,
|
||||
});
|
||||
|
||||
let responseText = "";
|
||||
const toolUses: Array<{ name: string; input: unknown }> = [];
|
||||
|
||||
// Collect the response from the async generator
|
||||
for await (const msg of queryResult) {
|
||||
if (msg.type === "assistant") {
|
||||
const assistantMsg = msg as SDKAssistantMessage;
|
||||
if (assistantMsg.message.content) {
|
||||
for (const block of assistantMsg.message.content) {
|
||||
if (block.type === "text") {
|
||||
responseText += block.text;
|
||||
} else if (block.type === "tool_use") {
|
||||
// Track tool usage for transparency
|
||||
toolUses.push({
|
||||
name: block.name,
|
||||
input: block.input,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (msg.type === "result") {
|
||||
if (msg.subtype === "success") {
|
||||
if (msg.result) {
|
||||
responseText = msg.result;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
content: responseText || "Sorry, I couldn't generate a response.",
|
||||
toolUses: toolUses.length > 0 ? toolUses : undefined,
|
||||
});
|
||||
} catch (error: unknown) {
|
||||
console.error("Claude API error:", error);
|
||||
const errorMessage =
|
||||
error instanceof Error
|
||||
? error.message
|
||||
: "Failed to get response from Claude";
|
||||
return NextResponse.json({ error: errorMessage }, { status: 500 });
|
||||
}
|
||||
}
|
||||
@@ -1,97 +0,0 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
|
||||
interface AnthropicResponse {
|
||||
content?: Array<{ type: string; text?: string }>;
|
||||
model?: string;
|
||||
error?: { message?: string };
|
||||
}
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const { apiKey } = await request.json();
|
||||
|
||||
// Use provided API key or fall back to environment variable
|
||||
const effectiveApiKey = apiKey || process.env.ANTHROPIC_API_KEY || process.env.CLAUDE_CODE_OAUTH_TOKEN;
|
||||
|
||||
if (!effectiveApiKey) {
|
||||
return NextResponse.json(
|
||||
{ success: false, error: "No API key provided or configured in environment" },
|
||||
{ status: 400 }
|
||||
);
|
||||
}
|
||||
|
||||
// Send a simple test prompt to the Anthropic API
|
||||
const response = await fetch("https://api.anthropic.com/v1/messages", {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"x-api-key": effectiveApiKey,
|
||||
"anthropic-version": "2023-06-01",
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: "claude-sonnet-4-20250514",
|
||||
max_tokens: 100,
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: "Respond with exactly: 'Claude API connection successful!' and nothing else.",
|
||||
},
|
||||
],
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = (await response.json()) as AnthropicResponse;
|
||||
const errorMessage = errorData.error?.message || `HTTP ${response.status}`;
|
||||
|
||||
if (response.status === 401) {
|
||||
return NextResponse.json(
|
||||
{ success: false, error: "Invalid API key. Please check your Anthropic API key." },
|
||||
{ status: 401 }
|
||||
);
|
||||
}
|
||||
|
||||
if (response.status === 429) {
|
||||
return NextResponse.json(
|
||||
{ success: false, error: "Rate limit exceeded. Please try again later." },
|
||||
{ status: 429 }
|
||||
);
|
||||
}
|
||||
|
||||
return NextResponse.json(
|
||||
{ success: false, error: `API error: ${errorMessage}` },
|
||||
{ status: response.status }
|
||||
);
|
||||
}
|
||||
|
||||
const data = (await response.json()) as AnthropicResponse;
|
||||
|
||||
// Check if we got a valid response
|
||||
if (data.content && data.content.length > 0) {
|
||||
const textContent = data.content.find((block) => block.type === "text");
|
||||
if (textContent && textContent.type === "text" && textContent.text) {
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
message: `Connection successful! Response: "${textContent.text}"`,
|
||||
model: data.model,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
message: "Connection successful! Claude responded.",
|
||||
model: data.model,
|
||||
});
|
||||
} catch (error: unknown) {
|
||||
console.error("Claude API test error:", error);
|
||||
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : "Failed to connect to Claude API";
|
||||
|
||||
return NextResponse.json(
|
||||
{ success: false, error: errorMessage },
|
||||
{ status: 500 }
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,191 +0,0 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
|
||||
interface GeminiContent {
|
||||
parts: Array<{
|
||||
text?: string;
|
||||
inlineData?: {
|
||||
mimeType: string;
|
||||
data: string;
|
||||
};
|
||||
}>;
|
||||
role?: string;
|
||||
}
|
||||
|
||||
interface GeminiRequest {
|
||||
contents: GeminiContent[];
|
||||
generationConfig?: {
|
||||
maxOutputTokens?: number;
|
||||
temperature?: number;
|
||||
};
|
||||
}
|
||||
|
||||
interface GeminiResponse {
|
||||
candidates?: Array<{
|
||||
content: {
|
||||
parts: Array<{
|
||||
text: string;
|
||||
}>;
|
||||
role: string;
|
||||
};
|
||||
finishReason: string;
|
||||
safetyRatings?: Array<{
|
||||
category: string;
|
||||
probability: string;
|
||||
}>;
|
||||
}>;
|
||||
promptFeedback?: {
|
||||
safetyRatings?: Array<{
|
||||
category: string;
|
||||
probability: string;
|
||||
}>;
|
||||
};
|
||||
error?: {
|
||||
code: number;
|
||||
message: string;
|
||||
status: string;
|
||||
};
|
||||
}
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const { apiKey, imageData, mimeType, prompt } = await request.json();
|
||||
|
||||
// Use provided API key or fall back to environment variable
|
||||
const effectiveApiKey = apiKey || process.env.GOOGLE_API_KEY;
|
||||
|
||||
if (!effectiveApiKey) {
|
||||
return NextResponse.json(
|
||||
{ success: false, error: "No API key provided or configured in environment" },
|
||||
{ status: 400 }
|
||||
);
|
||||
}
|
||||
|
||||
// Build the request body
|
||||
const requestBody: GeminiRequest = {
|
||||
contents: [
|
||||
{
|
||||
parts: [],
|
||||
},
|
||||
],
|
||||
generationConfig: {
|
||||
maxOutputTokens: 150,
|
||||
temperature: 0.4,
|
||||
},
|
||||
};
|
||||
|
||||
// Add image if provided
|
||||
if (imageData && mimeType) {
|
||||
requestBody.contents[0].parts.push({
|
||||
inlineData: {
|
||||
mimeType: mimeType,
|
||||
data: imageData,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// Add text prompt
|
||||
const textPrompt = prompt || (imageData
|
||||
? "Describe what you see in this image briefly."
|
||||
: "Respond with exactly: 'Gemini SDK connection successful!' and nothing else.");
|
||||
|
||||
requestBody.contents[0].parts.push({
|
||||
text: textPrompt,
|
||||
});
|
||||
|
||||
// Call Gemini API - using gemini-1.5-flash as it supports both text and vision
|
||||
const model = imageData ? "gemini-1.5-flash" : "gemini-1.5-flash";
|
||||
const geminiUrl = `https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${effectiveApiKey}`;
|
||||
|
||||
const response = await fetch(geminiUrl, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify(requestBody),
|
||||
});
|
||||
|
||||
const data: GeminiResponse = await response.json();
|
||||
|
||||
// Check for API errors
|
||||
if (data.error) {
|
||||
const errorMessage = data.error.message || "Unknown Gemini API error";
|
||||
const statusCode = data.error.code || 500;
|
||||
|
||||
if (statusCode === 400 && errorMessage.includes("API key")) {
|
||||
return NextResponse.json(
|
||||
{ success: false, error: "Invalid API key. Please check your Google API key." },
|
||||
{ status: 401 }
|
||||
);
|
||||
}
|
||||
|
||||
if (statusCode === 429) {
|
||||
return NextResponse.json(
|
||||
{ success: false, error: "Rate limit exceeded. Please try again later." },
|
||||
{ status: 429 }
|
||||
);
|
||||
}
|
||||
|
||||
return NextResponse.json(
|
||||
{ success: false, error: `API error: ${errorMessage}` },
|
||||
{ status: statusCode }
|
||||
);
|
||||
}
|
||||
|
||||
// Check for valid response
|
||||
if (!response.ok) {
|
||||
return NextResponse.json(
|
||||
{ success: false, error: `HTTP error: ${response.status} ${response.statusText}` },
|
||||
{ status: response.status }
|
||||
);
|
||||
}
|
||||
|
||||
// Extract response text
|
||||
if (data.candidates && data.candidates.length > 0 && data.candidates[0].content?.parts?.length > 0) {
|
||||
const responseText = data.candidates[0].content.parts
|
||||
.filter((part) => part.text)
|
||||
.map((part) => part.text)
|
||||
.join("");
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
message: `Connection successful! Response: "${responseText.substring(0, 200)}${responseText.length > 200 ? '...' : ''}"`,
|
||||
model: model,
|
||||
hasImage: !!imageData,
|
||||
});
|
||||
}
|
||||
|
||||
// Handle blocked responses
|
||||
if (data.promptFeedback?.safetyRatings) {
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
message: "Connection successful! Gemini responded (response may have been filtered).",
|
||||
model: model,
|
||||
hasImage: !!imageData,
|
||||
});
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
message: "Connection successful! Gemini responded.",
|
||||
model: model,
|
||||
hasImage: !!imageData,
|
||||
});
|
||||
} catch (error: unknown) {
|
||||
console.error("Gemini API test error:", error);
|
||||
|
||||
if (error instanceof TypeError && error.message.includes("fetch")) {
|
||||
return NextResponse.json(
|
||||
{ success: false, error: "Network error. Unable to reach Gemini API." },
|
||||
{ status: 503 }
|
||||
);
|
||||
}
|
||||
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : "Failed to connect to Gemini API";
|
||||
|
||||
return NextResponse.json(
|
||||
{ success: false, error: errorMessage },
|
||||
{ status: 500 }
|
||||
);
|
||||
}
|
||||
}
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 25 KiB |
File diff suppressed because it is too large
Load Diff
@@ -1,36 +0,0 @@
|
||||
import type { Metadata } from "next";
|
||||
import { Geist, Geist_Mono } from "next/font/google";
|
||||
import { Toaster } from "sonner";
|
||||
import "./globals.css";
|
||||
|
||||
const geistSans = Geist({
|
||||
variable: "--font-geist-sans",
|
||||
subsets: ["latin"],
|
||||
});
|
||||
|
||||
const geistMono = Geist_Mono({
|
||||
variable: "--font-geist-mono",
|
||||
subsets: ["latin"],
|
||||
});
|
||||
|
||||
export const metadata: Metadata = {
|
||||
title: "Automaker - Autonomous AI Development Studio",
|
||||
description: "Build software autonomously with intelligent orchestration",
|
||||
};
|
||||
|
||||
export default function RootLayout({
|
||||
children,
|
||||
}: Readonly<{
|
||||
children: React.ReactNode;
|
||||
}>) {
|
||||
return (
|
||||
<html lang="en" suppressHydrationWarning>
|
||||
<body
|
||||
className={`${geistSans.variable} ${geistMono.variable} antialiased`}
|
||||
>
|
||||
{children}
|
||||
<Toaster richColors position="bottom-right" />
|
||||
</body>
|
||||
</html>
|
||||
);
|
||||
}
|
||||
@@ -1,223 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useEffect, useState, useCallback } from "react";
|
||||
import { Sidebar } from "@/components/layout/sidebar";
|
||||
import { WelcomeView } from "@/components/views/welcome-view";
|
||||
import { BoardView } from "@/components/views/board-view";
|
||||
import { SpecView } from "@/components/views/spec-view";
|
||||
import { AgentView } from "@/components/views/agent-view";
|
||||
import { SettingsView } from "@/components/views/settings-view";
|
||||
import { AgentToolsView } from "@/components/views/agent-tools-view";
|
||||
import { InterviewView } from "@/components/views/interview-view";
|
||||
import { ContextView } from "@/components/views/context-view";
|
||||
import { ProfilesView } from "@/components/views/profiles-view";
|
||||
import { SetupView } from "@/components/views/setup-view";
|
||||
import { useAppStore } from "@/store/app-store";
|
||||
import { useSetupStore } from "@/store/setup-store";
|
||||
import { getElectronAPI, isElectron } from "@/lib/electron";
|
||||
|
||||
export default function Home() {
|
||||
const { currentView, setCurrentView, setIpcConnected, theme, currentProject } = useAppStore();
|
||||
const { isFirstRun, setupComplete } = useSetupStore();
|
||||
const [isMounted, setIsMounted] = useState(false);
|
||||
const [streamerPanelOpen, setStreamerPanelOpen] = useState(false);
|
||||
|
||||
// Hidden streamer panel - opens with "\" key
|
||||
const handleStreamerPanelShortcut = useCallback((event: KeyboardEvent) => {
|
||||
// Don't trigger when typing in inputs
|
||||
const activeElement = document.activeElement;
|
||||
if (activeElement) {
|
||||
const tagName = activeElement.tagName.toLowerCase();
|
||||
if (tagName === "input" || tagName === "textarea" || tagName === "select") {
|
||||
return;
|
||||
}
|
||||
if (activeElement.getAttribute("contenteditable") === "true") {
|
||||
return;
|
||||
}
|
||||
const role = activeElement.getAttribute("role");
|
||||
if (role === "textbox" || role === "searchbox" || role === "combobox") {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Don't trigger with modifier keys
|
||||
if (event.ctrlKey || event.altKey || event.metaKey) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Check for "\" key (backslash)
|
||||
if (event.key === "\\") {
|
||||
event.preventDefault();
|
||||
setStreamerPanelOpen((prev) => !prev);
|
||||
}
|
||||
}, []);
|
||||
|
||||
// Register the "\" shortcut for streamer panel
|
||||
useEffect(() => {
|
||||
window.addEventListener("keydown", handleStreamerPanelShortcut);
|
||||
return () => {
|
||||
window.removeEventListener("keydown", handleStreamerPanelShortcut);
|
||||
};
|
||||
}, [handleStreamerPanelShortcut]);
|
||||
|
||||
// Compute the effective theme: project theme takes priority over global theme
|
||||
// This is reactive because it depends on currentProject and theme from the store
|
||||
const effectiveTheme = currentProject?.theme || theme;
|
||||
|
||||
// Prevent hydration issues
|
||||
useEffect(() => {
|
||||
setIsMounted(true);
|
||||
}, []);
|
||||
|
||||
// Check if this is first run and redirect to setup if needed
|
||||
useEffect(() => {
|
||||
console.log("[Setup Flow] Checking setup state:", {
|
||||
isMounted,
|
||||
isFirstRun,
|
||||
setupComplete,
|
||||
currentView,
|
||||
shouldShowSetup: isMounted && isFirstRun && !setupComplete,
|
||||
});
|
||||
|
||||
if (isMounted && isFirstRun && !setupComplete) {
|
||||
console.log("[Setup Flow] Redirecting to setup wizard (first run, not complete)");
|
||||
setCurrentView("setup");
|
||||
} else if (isMounted && setupComplete) {
|
||||
console.log("[Setup Flow] Setup already complete, showing normal view");
|
||||
}
|
||||
}, [isMounted, isFirstRun, setupComplete, setCurrentView, currentView]);
|
||||
|
||||
// Test IPC connection on mount
|
||||
useEffect(() => {
|
||||
const testConnection = async () => {
|
||||
try {
|
||||
const api = getElectronAPI();
|
||||
const result = await api.ping();
|
||||
setIpcConnected(result === "pong" || result === "pong (mock)");
|
||||
} catch (error) {
|
||||
console.error("IPC connection failed:", error);
|
||||
setIpcConnected(false);
|
||||
}
|
||||
};
|
||||
|
||||
testConnection();
|
||||
}, [setIpcConnected]);
|
||||
|
||||
// Apply theme class to document (uses effective theme - project-specific or global)
|
||||
useEffect(() => {
|
||||
const root = document.documentElement;
|
||||
root.classList.remove(
|
||||
"dark",
|
||||
"retro",
|
||||
"light",
|
||||
"dracula",
|
||||
"nord",
|
||||
"monokai",
|
||||
"tokyonight",
|
||||
"solarized",
|
||||
"gruvbox",
|
||||
"catppuccin",
|
||||
"onedark",
|
||||
"synthwave"
|
||||
);
|
||||
|
||||
if (effectiveTheme === "dark") {
|
||||
root.classList.add("dark");
|
||||
} else if (effectiveTheme === "retro") {
|
||||
root.classList.add("retro");
|
||||
} else if (effectiveTheme === "dracula") {
|
||||
root.classList.add("dracula");
|
||||
} else if (effectiveTheme === "nord") {
|
||||
root.classList.add("nord");
|
||||
} else if (effectiveTheme === "monokai") {
|
||||
root.classList.add("monokai");
|
||||
} else if (effectiveTheme === "tokyonight") {
|
||||
root.classList.add("tokyonight");
|
||||
} else if (effectiveTheme === "solarized") {
|
||||
root.classList.add("solarized");
|
||||
} else if (effectiveTheme === "gruvbox") {
|
||||
root.classList.add("gruvbox");
|
||||
} else if (effectiveTheme === "catppuccin") {
|
||||
root.classList.add("catppuccin");
|
||||
} else if (effectiveTheme === "onedark") {
|
||||
root.classList.add("onedark");
|
||||
} else if (effectiveTheme === "synthwave") {
|
||||
root.classList.add("synthwave");
|
||||
} else if (effectiveTheme === "light") {
|
||||
root.classList.add("light");
|
||||
} else if (effectiveTheme === "system") {
|
||||
// System theme
|
||||
const isDark = window.matchMedia("(prefers-color-scheme: dark)").matches;
|
||||
if (isDark) {
|
||||
root.classList.add("dark");
|
||||
} else {
|
||||
root.classList.add("light");
|
||||
}
|
||||
}
|
||||
}, [effectiveTheme]);
|
||||
|
||||
const renderView = () => {
|
||||
switch (currentView) {
|
||||
case "welcome":
|
||||
return <WelcomeView />;
|
||||
case "setup":
|
||||
return <SetupView />;
|
||||
case "board":
|
||||
return <BoardView />;
|
||||
case "spec":
|
||||
return <SpecView />;
|
||||
case "agent":
|
||||
return <AgentView />;
|
||||
case "settings":
|
||||
return <SettingsView />;
|
||||
case "tools":
|
||||
return <AgentToolsView />;
|
||||
case "interview":
|
||||
return <InterviewView />;
|
||||
case "context":
|
||||
return <ContextView />;
|
||||
case "profiles":
|
||||
return <ProfilesView />;
|
||||
default:
|
||||
return <WelcomeView />;
|
||||
}
|
||||
};
|
||||
|
||||
// Setup view is full-screen without sidebar
|
||||
if (currentView === "setup") {
|
||||
return (
|
||||
<main className="h-screen overflow-hidden" data-testid="app-container">
|
||||
<SetupView />
|
||||
{/* Environment indicator */}
|
||||
{isMounted && !isElectron() && (
|
||||
<div className="fixed bottom-4 right-4 px-3 py-1.5 bg-yellow-500/10 text-yellow-500 text-xs rounded-full border border-yellow-500/20 pointer-events-none">
|
||||
Web Mode (Mock IPC)
|
||||
</div>
|
||||
)}
|
||||
</main>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<main className="flex h-screen overflow-hidden" data-testid="app-container">
|
||||
<Sidebar />
|
||||
<div className="flex-1 flex flex-col overflow-hidden transition-all duration-300" style={{ marginRight: streamerPanelOpen ? '250px' : '0' }}>
|
||||
{renderView()}
|
||||
</div>
|
||||
|
||||
{/* Environment indicator - only show after mount to prevent hydration issues */}
|
||||
{isMounted && !isElectron() && (
|
||||
<div className="fixed bottom-4 right-4 px-3 py-1.5 bg-yellow-500/10 text-yellow-500 text-xs rounded-full border border-yellow-500/20 pointer-events-none">
|
||||
Web Mode (Mock IPC)
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Hidden streamer panel - opens with "\" key, pushes content */}
|
||||
<div
|
||||
className={`fixed top-0 right-0 h-full w-[250px] bg-background border-l border-border transition-transform duration-300 ${
|
||||
streamerPanelOpen ? 'translate-x-0' : 'translate-x-full'
|
||||
}`}
|
||||
/>
|
||||
</main>
|
||||
);
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,36 +0,0 @@
|
||||
import * as React from "react";
|
||||
import { cva, type VariantProps } from "class-variance-authority";
|
||||
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
const badgeVariants = cva(
|
||||
"inline-flex items-center rounded-full border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2",
|
||||
{
|
||||
variants: {
|
||||
variant: {
|
||||
default:
|
||||
"border-transparent bg-primary text-primary-foreground hover:bg-primary/80",
|
||||
secondary:
|
||||
"border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80",
|
||||
destructive:
|
||||
"border-transparent bg-destructive text-destructive-foreground hover:bg-destructive/80",
|
||||
outline: "text-foreground",
|
||||
},
|
||||
},
|
||||
defaultVariants: {
|
||||
variant: "default",
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
export interface BadgeProps
|
||||
extends React.HTMLAttributes<HTMLDivElement>,
|
||||
VariantProps<typeof badgeVariants> {}
|
||||
|
||||
function Badge({ className, variant, ...props }: BadgeProps) {
|
||||
return (
|
||||
<div className={cn(badgeVariants({ variant }), className)} {...props} />
|
||||
);
|
||||
}
|
||||
|
||||
export { Badge, badgeVariants };
|
||||
@@ -1,95 +0,0 @@
|
||||
import * as React from "react";
|
||||
import { Slot } from "@radix-ui/react-slot";
|
||||
import { cva, type VariantProps } from "class-variance-authority";
|
||||
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
const buttonVariants = cva(
|
||||
"inline-flex items-center justify-center gap-2 whitespace-nowrap rounded-md text-sm font-medium transition-all cursor-pointer disabled:pointer-events-none disabled:opacity-50 disabled:cursor-not-allowed [&_svg]:pointer-events-none [&_svg:not([class*='size-'])]:size-4 shrink-0 [&_svg]:shrink-0 outline-none focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px] aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive",
|
||||
{
|
||||
variants: {
|
||||
variant: {
|
||||
default: "bg-primary text-primary-foreground hover:bg-primary/90",
|
||||
destructive:
|
||||
"bg-destructive text-white hover:bg-destructive/90 focus-visible:ring-destructive/20 dark:focus-visible:ring-destructive/40 dark:bg-destructive/60",
|
||||
outline:
|
||||
"border bg-background shadow-xs hover:bg-accent hover:text-accent-foreground dark:bg-input/30 dark:border-input dark:hover:bg-input/50",
|
||||
secondary:
|
||||
"bg-secondary text-secondary-foreground hover:bg-secondary/80",
|
||||
ghost:
|
||||
"hover:bg-accent hover:text-accent-foreground dark:hover:bg-accent/50",
|
||||
link: "text-primary underline-offset-4 hover:underline",
|
||||
"animated-outline":
|
||||
"relative overflow-hidden rounded-xl hover:bg-transparent shadow-none",
|
||||
},
|
||||
size: {
|
||||
default: "h-9 px-4 py-2 has-[>svg]:px-3",
|
||||
sm: "h-8 rounded-md gap-1.5 px-3 has-[>svg]:px-2.5",
|
||||
lg: "h-10 rounded-md px-6 has-[>svg]:px-4",
|
||||
icon: "size-9",
|
||||
"icon-sm": "size-8",
|
||||
"icon-lg": "size-10",
|
||||
},
|
||||
},
|
||||
defaultVariants: {
|
||||
variant: "default",
|
||||
size: "default",
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
function Button({
|
||||
className,
|
||||
variant,
|
||||
size,
|
||||
asChild = false,
|
||||
children,
|
||||
...props
|
||||
}: React.ComponentProps<"button"> &
|
||||
VariantProps<typeof buttonVariants> & {
|
||||
asChild?: boolean;
|
||||
}) {
|
||||
// Special handling for animated-outline variant
|
||||
if (variant === "animated-outline" && !asChild) {
|
||||
return (
|
||||
<button
|
||||
className={cn(
|
||||
buttonVariants({ variant, size }),
|
||||
"p-[1px]", // Force 1px padding for the gradient border
|
||||
className
|
||||
)}
|
||||
data-slot="button"
|
||||
{...props}
|
||||
>
|
||||
{/* Animated rotating gradient border */}
|
||||
<span className="absolute inset-[-1000%] animate-[spin_2s_linear_infinite] animated-outline-gradient" />
|
||||
|
||||
{/* Inner content container */}
|
||||
<span
|
||||
className={cn(
|
||||
"animated-outline-inner inline-flex h-full w-full cursor-pointer items-center justify-center gap-2 rounded-[10px] px-4 py-1 text-sm font-medium backdrop-blur-3xl transition-all",
|
||||
size === "sm" && "px-3 text-xs gap-1.5",
|
||||
size === "lg" && "px-8",
|
||||
size === "icon" && "p-0 gap-0"
|
||||
)}
|
||||
>
|
||||
{children}
|
||||
</span>
|
||||
</button>
|
||||
);
|
||||
}
|
||||
|
||||
const Comp = asChild ? Slot : "button";
|
||||
|
||||
return (
|
||||
<Comp
|
||||
data-slot="button"
|
||||
className={cn(buttonVariants({ variant, size, className }))}
|
||||
{...props}
|
||||
>
|
||||
{children}
|
||||
</Comp>
|
||||
);
|
||||
}
|
||||
|
||||
export { Button, buttonVariants };
|
||||
@@ -1,92 +0,0 @@
|
||||
import * as React from "react";
|
||||
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
function Card({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card"
|
||||
className={cn(
|
||||
"bg-card text-card-foreground flex flex-col gap-6 rounded-xl border border-white/10 backdrop-blur-sm py-6 shadow-sm",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
function CardHeader({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-header"
|
||||
className={cn(
|
||||
"@container/card-header grid auto-rows-min grid-rows-[auto_auto] items-start gap-2 px-6 has-data-[slot=card-action]:grid-cols-[1fr_auto] [.border-b]:pb-6",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
function CardTitle({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-title"
|
||||
className={cn("leading-none font-semibold", className)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
function CardDescription({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-description"
|
||||
className={cn("text-muted-foreground text-sm", className)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
function CardAction({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-action"
|
||||
className={cn(
|
||||
"col-start-2 row-span-2 row-start-1 self-start justify-self-end",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
function CardContent({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-content"
|
||||
className={cn("px-6", className)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
function CardFooter({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-footer"
|
||||
className={cn("flex items-center px-6 [.border-t]:pt-6", className)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
export {
|
||||
Card,
|
||||
CardHeader,
|
||||
CardFooter,
|
||||
CardTitle,
|
||||
CardAction,
|
||||
CardDescription,
|
||||
CardContent,
|
||||
};
|
||||
@@ -1,91 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import * as React from "react";
|
||||
import { Check, ChevronsUpDown } from "lucide-react";
|
||||
|
||||
import { cn } from "@/lib/utils";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import {
|
||||
Command,
|
||||
CommandEmpty,
|
||||
CommandGroup,
|
||||
CommandInput,
|
||||
CommandItem,
|
||||
CommandList,
|
||||
} from "@/components/ui/command";
|
||||
import {
|
||||
Popover,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
} from "@/components/ui/popover";
|
||||
|
||||
interface CategoryAutocompleteProps {
|
||||
value: string;
|
||||
onChange: (value: string) => void;
|
||||
suggestions: string[];
|
||||
placeholder?: string;
|
||||
className?: string;
|
||||
disabled?: boolean;
|
||||
"data-testid"?: string;
|
||||
}
|
||||
|
||||
export function CategoryAutocomplete({
|
||||
value,
|
||||
onChange,
|
||||
suggestions,
|
||||
placeholder = "Select or type a category...",
|
||||
className,
|
||||
disabled = false,
|
||||
"data-testid": testId,
|
||||
}: CategoryAutocompleteProps) {
|
||||
const [open, setOpen] = React.useState(false);
|
||||
|
||||
return (
|
||||
<Popover open={open} onOpenChange={setOpen}>
|
||||
<PopoverTrigger asChild>
|
||||
<Button
|
||||
variant="outline"
|
||||
role="combobox"
|
||||
aria-expanded={open}
|
||||
disabled={disabled}
|
||||
className={cn("w-full justify-between", className)}
|
||||
data-testid={testId}
|
||||
>
|
||||
{value
|
||||
? suggestions.find((s) => s === value) ?? value
|
||||
: placeholder}
|
||||
<ChevronsUpDown className="opacity-50" />
|
||||
</Button>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent className="w-[200px] p-0">
|
||||
<Command>
|
||||
<CommandInput placeholder="Search category..." className="h-9" />
|
||||
<CommandList>
|
||||
<CommandEmpty>No category found.</CommandEmpty>
|
||||
<CommandGroup>
|
||||
{suggestions.map((suggestion) => (
|
||||
<CommandItem
|
||||
key={suggestion}
|
||||
value={suggestion}
|
||||
onSelect={(currentValue) => {
|
||||
onChange(currentValue === value ? "" : currentValue);
|
||||
setOpen(false);
|
||||
}}
|
||||
data-testid={`category-option-${suggestion.toLowerCase().replace(/\s+/g, "-")}`}
|
||||
>
|
||||
{suggestion}
|
||||
<Check
|
||||
className={cn(
|
||||
"ml-auto",
|
||||
value === suggestion ? "opacity-100" : "opacity-0"
|
||||
)}
|
||||
/>
|
||||
</CommandItem>
|
||||
))}
|
||||
</CommandGroup>
|
||||
</CommandList>
|
||||
</Command>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import * as React from "react";
|
||||
import * as CheckboxPrimitive from "@radix-ui/react-checkbox";
|
||||
import { Check } from "lucide-react";
|
||||
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
const Checkbox = React.forwardRef<
|
||||
React.ElementRef<typeof CheckboxPrimitive.Root>,
|
||||
React.ComponentPropsWithoutRef<typeof CheckboxPrimitive.Root>
|
||||
>(({ className, ...props }, ref) => (
|
||||
<CheckboxPrimitive.Root
|
||||
ref={ref}
|
||||
className={cn(
|
||||
"peer h-4 w-4 shrink-0 rounded-sm border border-primary ring-offset-background cursor-pointer focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50 data-[state=checked]:bg-primary data-[state=checked]:text-primary-foreground hover:border-primary/80",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
>
|
||||
<CheckboxPrimitive.Indicator
|
||||
className={cn("flex items-center justify-center text-current")}
|
||||
>
|
||||
<Check className="h-4 w-4" />
|
||||
</CheckboxPrimitive.Indicator>
|
||||
</CheckboxPrimitive.Root>
|
||||
));
|
||||
Checkbox.displayName = CheckboxPrimitive.Root.displayName;
|
||||
|
||||
export { Checkbox };
|
||||
@@ -1,407 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import React, { useState, useRef, useCallback, useEffect } from "react";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { ImageIcon, X, Loader2 } from "lucide-react";
|
||||
import { Textarea } from "@/components/ui/textarea";
|
||||
import { getElectronAPI } from "@/lib/electron";
|
||||
import { useAppStore } from "@/store/app-store";
|
||||
|
||||
export interface FeatureImagePath {
|
||||
id: string;
|
||||
path: string; // Path to the temp file
|
||||
filename: string;
|
||||
mimeType: string;
|
||||
}
|
||||
|
||||
// Map to store preview data by image ID (persisted across component re-mounts)
|
||||
export type ImagePreviewMap = Map<string, string>;
|
||||
|
||||
interface DescriptionImageDropZoneProps {
|
||||
value: string;
|
||||
onChange: (value: string) => void;
|
||||
images: FeatureImagePath[];
|
||||
onImagesChange: (images: FeatureImagePath[]) => void;
|
||||
placeholder?: string;
|
||||
className?: string;
|
||||
disabled?: boolean;
|
||||
maxFiles?: number;
|
||||
maxFileSize?: number; // in bytes, default 10MB
|
||||
// Optional: pass preview map from parent to persist across tab switches
|
||||
previewMap?: ImagePreviewMap;
|
||||
onPreviewMapChange?: (map: ImagePreviewMap) => void;
|
||||
autoFocus?: boolean;
|
||||
error?: boolean; // Show error state with red border
|
||||
}
|
||||
|
||||
const ACCEPTED_IMAGE_TYPES = [
|
||||
"image/jpeg",
|
||||
"image/jpg",
|
||||
"image/png",
|
||||
"image/gif",
|
||||
"image/webp",
|
||||
];
|
||||
const DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024; // 10MB
|
||||
|
||||
export function DescriptionImageDropZone({
|
||||
value,
|
||||
onChange,
|
||||
images,
|
||||
onImagesChange,
|
||||
placeholder = "Describe the feature...",
|
||||
className,
|
||||
disabled = false,
|
||||
maxFiles = 5,
|
||||
maxFileSize = DEFAULT_MAX_FILE_SIZE,
|
||||
previewMap,
|
||||
onPreviewMapChange,
|
||||
autoFocus = false,
|
||||
error = false,
|
||||
}: DescriptionImageDropZoneProps) {
|
||||
const [isDragOver, setIsDragOver] = useState(false);
|
||||
const [isProcessing, setIsProcessing] = useState(false);
|
||||
// Use parent-provided preview map if available, otherwise use local state
|
||||
const [localPreviewImages, setLocalPreviewImages] = useState<Map<string, string>>(
|
||||
() => new Map()
|
||||
);
|
||||
|
||||
// Determine which preview map to use - prefer parent-controlled state
|
||||
const previewImages = previewMap !== undefined ? previewMap : localPreviewImages;
|
||||
const setPreviewImages = useCallback((updater: Map<string, string> | ((prev: Map<string, string>) => Map<string, string>)) => {
|
||||
if (onPreviewMapChange) {
|
||||
const currentMap = previewMap !== undefined ? previewMap : localPreviewImages;
|
||||
const newMap = typeof updater === 'function' ? updater(currentMap) : updater;
|
||||
onPreviewMapChange(newMap);
|
||||
} else {
|
||||
setLocalPreviewImages((prev) => {
|
||||
const newMap = typeof updater === 'function' ? updater(prev) : updater;
|
||||
return newMap;
|
||||
});
|
||||
}
|
||||
}, [onPreviewMapChange, previewMap, localPreviewImages]);
|
||||
|
||||
const fileInputRef = useRef<HTMLInputElement>(null);
|
||||
const currentProject = useAppStore((state) => state.currentProject);
|
||||
|
||||
const fileToBase64 = (file: File): Promise<string> => {
|
||||
return new Promise((resolve, reject) => {
|
||||
const reader = new FileReader();
|
||||
reader.onload = () => {
|
||||
if (typeof reader.result === "string") {
|
||||
resolve(reader.result);
|
||||
} else {
|
||||
reject(new Error("Failed to read file as base64"));
|
||||
}
|
||||
};
|
||||
reader.onerror = () => reject(new Error("Failed to read file"));
|
||||
reader.readAsDataURL(file);
|
||||
});
|
||||
};
|
||||
|
||||
const saveImageToTemp = async (
|
||||
base64Data: string,
|
||||
filename: string,
|
||||
mimeType: string
|
||||
): Promise<string | null> => {
|
||||
try {
|
||||
const api = getElectronAPI();
|
||||
// Check if saveImageToTemp method exists
|
||||
if (!api.saveImageToTemp) {
|
||||
// Fallback for mock API - return a mock path in .automaker/images
|
||||
console.log("[DescriptionImageDropZone] Using mock path for image");
|
||||
return `.automaker/images/${Date.now()}_${filename}`;
|
||||
}
|
||||
|
||||
// Get projectPath from the store if available
|
||||
const projectPath = currentProject?.path;
|
||||
const result = await api.saveImageToTemp(base64Data, filename, mimeType, projectPath);
|
||||
if (result.success && result.path) {
|
||||
return result.path;
|
||||
}
|
||||
console.error("[DescriptionImageDropZone] Failed to save image:", result.error);
|
||||
return null;
|
||||
} catch (error) {
|
||||
console.error("[DescriptionImageDropZone] Error saving image:", error);
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
const processFiles = useCallback(
|
||||
async (files: FileList) => {
|
||||
if (disabled || isProcessing) return;
|
||||
|
||||
setIsProcessing(true);
|
||||
const newImages: FeatureImagePath[] = [];
|
||||
const newPreviews = new Map(previewImages);
|
||||
const errors: string[] = [];
|
||||
|
||||
for (const file of Array.from(files)) {
|
||||
// Validate file type
|
||||
if (!ACCEPTED_IMAGE_TYPES.includes(file.type)) {
|
||||
errors.push(
|
||||
`${file.name}: Unsupported file type. Please use JPG, PNG, GIF, or WebP.`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Validate file size
|
||||
if (file.size > maxFileSize) {
|
||||
const maxSizeMB = maxFileSize / (1024 * 1024);
|
||||
errors.push(
|
||||
`${file.name}: File too large. Maximum size is ${maxSizeMB}MB.`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if we've reached max files
|
||||
if (newImages.length + images.length >= maxFiles) {
|
||||
errors.push(`Maximum ${maxFiles} images allowed.`);
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
const base64 = await fileToBase64(file);
|
||||
const tempPath = await saveImageToTemp(base64, file.name, file.type);
|
||||
|
||||
if (tempPath) {
|
||||
const imageId = `img-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`;
|
||||
const imagePathRef: FeatureImagePath = {
|
||||
id: imageId,
|
||||
path: tempPath,
|
||||
filename: file.name,
|
||||
mimeType: file.type,
|
||||
};
|
||||
newImages.push(imagePathRef);
|
||||
// Store preview for display
|
||||
newPreviews.set(imageId, base64);
|
||||
} else {
|
||||
errors.push(`${file.name}: Failed to save image.`);
|
||||
}
|
||||
} catch (error) {
|
||||
errors.push(`${file.name}: Failed to process image.`);
|
||||
}
|
||||
}
|
||||
|
||||
if (errors.length > 0) {
|
||||
console.warn("Image upload errors:", errors);
|
||||
}
|
||||
|
||||
if (newImages.length > 0) {
|
||||
onImagesChange([...images, ...newImages]);
|
||||
setPreviewImages(newPreviews);
|
||||
}
|
||||
|
||||
setIsProcessing(false);
|
||||
},
|
||||
[disabled, isProcessing, images, maxFiles, maxFileSize, onImagesChange, previewImages]
|
||||
);
|
||||
|
||||
const handleDrop = useCallback(
|
||||
(e: React.DragEvent) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
setIsDragOver(false);
|
||||
|
||||
if (disabled) return;
|
||||
|
||||
const files = e.dataTransfer.files;
|
||||
if (files.length > 0) {
|
||||
processFiles(files);
|
||||
}
|
||||
},
|
||||
[disabled, processFiles]
|
||||
);
|
||||
|
||||
const handleDragOver = useCallback(
|
||||
(e: React.DragEvent) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
if (!disabled) {
|
||||
setIsDragOver(true);
|
||||
}
|
||||
},
|
||||
[disabled]
|
||||
);
|
||||
|
||||
const handleDragLeave = useCallback((e: React.DragEvent) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
setIsDragOver(false);
|
||||
}, []);
|
||||
|
||||
const handleFileSelect = useCallback(
|
||||
(e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const files = e.target.files;
|
||||
if (files && files.length > 0) {
|
||||
processFiles(files);
|
||||
}
|
||||
// Reset the input so the same file can be selected again
|
||||
if (fileInputRef.current) {
|
||||
fileInputRef.current.value = "";
|
||||
}
|
||||
},
|
||||
[processFiles]
|
||||
);
|
||||
|
||||
const handleBrowseClick = useCallback(() => {
|
||||
if (!disabled && fileInputRef.current) {
|
||||
fileInputRef.current.click();
|
||||
}
|
||||
}, [disabled]);
|
||||
|
||||
const removeImage = useCallback(
|
||||
(imageId: string) => {
|
||||
onImagesChange(images.filter((img) => img.id !== imageId));
|
||||
setPreviewImages((prev) => {
|
||||
const newMap = new Map(prev);
|
||||
newMap.delete(imageId);
|
||||
return newMap;
|
||||
});
|
||||
},
|
||||
[images, onImagesChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<div className={cn("relative", className)}>
|
||||
{/* Hidden file input */}
|
||||
<input
|
||||
ref={fileInputRef}
|
||||
type="file"
|
||||
multiple
|
||||
accept={ACCEPTED_IMAGE_TYPES.join(",")}
|
||||
onChange={handleFileSelect}
|
||||
className="hidden"
|
||||
disabled={disabled}
|
||||
data-testid="description-image-input"
|
||||
/>
|
||||
|
||||
{/* Drop zone wrapper */}
|
||||
<div
|
||||
onDrop={handleDrop}
|
||||
onDragOver={handleDragOver}
|
||||
onDragLeave={handleDragLeave}
|
||||
className={cn(
|
||||
"relative rounded-md transition-all duration-200",
|
||||
{
|
||||
"ring-2 ring-blue-400 ring-offset-2 ring-offset-background":
|
||||
isDragOver && !disabled,
|
||||
}
|
||||
)}
|
||||
>
|
||||
{/* Drag overlay */}
|
||||
{isDragOver && !disabled && (
|
||||
<div
|
||||
className="absolute inset-0 z-10 flex items-center justify-center rounded-md bg-blue-500/20 border-2 border-dashed border-blue-400 pointer-events-none"
|
||||
data-testid="drop-overlay"
|
||||
>
|
||||
<div className="flex flex-col items-center gap-2 text-blue-400">
|
||||
<ImageIcon className="w-8 h-8" />
|
||||
<span className="text-sm font-medium">Drop images here</span>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Textarea */}
|
||||
<Textarea
|
||||
placeholder={placeholder}
|
||||
value={value}
|
||||
onChange={(e) => onChange(e.target.value)}
|
||||
disabled={disabled}
|
||||
autoFocus={autoFocus}
|
||||
aria-invalid={error}
|
||||
className={cn(
|
||||
"min-h-[120px]",
|
||||
isProcessing && "opacity-50 pointer-events-none"
|
||||
)}
|
||||
data-testid="feature-description-input"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Hint text */}
|
||||
<p className="text-xs text-muted-foreground mt-1">
|
||||
Drag and drop images here or{" "}
|
||||
<button
|
||||
type="button"
|
||||
onClick={handleBrowseClick}
|
||||
className="text-primary hover:text-primary/80 underline"
|
||||
disabled={disabled || isProcessing}
|
||||
>
|
||||
browse
|
||||
</button>{" "}
|
||||
to attach context images
|
||||
</p>
|
||||
|
||||
{/* Processing indicator */}
|
||||
{isProcessing && (
|
||||
<div className="flex items-center gap-2 mt-2 text-sm text-muted-foreground">
|
||||
<Loader2 className="w-4 h-4 animate-spin" />
|
||||
<span>Saving images...</span>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Image previews */}
|
||||
{images.length > 0 && (
|
||||
<div className="mt-3 space-y-2" data-testid="description-image-previews">
|
||||
<div className="flex items-center justify-between">
|
||||
<p className="text-xs font-medium text-foreground">
|
||||
{images.length} image{images.length > 1 ? "s" : ""} attached
|
||||
</p>
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => {
|
||||
onImagesChange([]);
|
||||
setPreviewImages(new Map());
|
||||
}}
|
||||
className="text-xs text-muted-foreground hover:text-foreground"
|
||||
disabled={disabled}
|
||||
>
|
||||
Clear all
|
||||
</button>
|
||||
</div>
|
||||
<div className="flex flex-wrap gap-2">
|
||||
{images.map((image) => (
|
||||
<div
|
||||
key={image.id}
|
||||
className="relative group rounded-md border border-muted bg-muted/50 overflow-hidden"
|
||||
data-testid={`description-image-preview-${image.id}`}
|
||||
>
|
||||
{/* Image thumbnail or placeholder */}
|
||||
<div className="w-16 h-16 flex items-center justify-center bg-zinc-800">
|
||||
{previewImages.has(image.id) ? (
|
||||
<img
|
||||
src={previewImages.get(image.id)}
|
||||
alt={image.filename}
|
||||
className="max-w-full max-h-full object-contain"
|
||||
/>
|
||||
) : (
|
||||
<ImageIcon className="w-6 h-6 text-muted-foreground" />
|
||||
)}
|
||||
</div>
|
||||
{/* Remove button */}
|
||||
{!disabled && (
|
||||
<button
|
||||
type="button"
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
removeImage(image.id);
|
||||
}}
|
||||
className="absolute top-0.5 right-0.5 p-0.5 rounded-full bg-destructive text-destructive-foreground opacity-0 group-hover:opacity-100 transition-opacity"
|
||||
data-testid={`remove-description-image-${image.id}`}
|
||||
>
|
||||
<X className="h-3 w-3" />
|
||||
</button>
|
||||
)}
|
||||
{/* Filename tooltip on hover */}
|
||||
<div className="absolute bottom-0 left-0 right-0 bg-black/60 px-1 py-0.5 opacity-0 group-hover:opacity-100 transition-opacity">
|
||||
<p className="text-[10px] text-white truncate">
|
||||
{image.filename}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,157 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import * as React from "react";
|
||||
import * as DialogPrimitive from "@radix-ui/react-dialog";
|
||||
import { XIcon } from "lucide-react";
|
||||
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
function Dialog({
|
||||
...props
|
||||
}: React.ComponentProps<typeof DialogPrimitive.Root>) {
|
||||
return <DialogPrimitive.Root data-slot="dialog" {...props} />;
|
||||
}
|
||||
|
||||
function DialogTrigger({
|
||||
...props
|
||||
}: React.ComponentProps<typeof DialogPrimitive.Trigger>) {
|
||||
return <DialogPrimitive.Trigger data-slot="dialog-trigger" {...props} />;
|
||||
}
|
||||
|
||||
function DialogPortal({
|
||||
...props
|
||||
}: React.ComponentProps<typeof DialogPrimitive.Portal>) {
|
||||
return <DialogPrimitive.Portal data-slot="dialog-portal" {...props} />;
|
||||
}
|
||||
|
||||
function DialogClose({
|
||||
...props
|
||||
}: React.ComponentProps<typeof DialogPrimitive.Close>) {
|
||||
return <DialogPrimitive.Close data-slot="dialog-close" {...props} />;
|
||||
}
|
||||
|
||||
function DialogOverlay({
|
||||
className,
|
||||
...props
|
||||
}: React.ComponentProps<typeof DialogPrimitive.Overlay>) {
|
||||
return (
|
||||
<DialogPrimitive.Overlay
|
||||
data-slot="dialog-overlay"
|
||||
className={cn(
|
||||
"data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 fixed inset-0 z-50 bg-black/50",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
function DialogContent({
|
||||
className,
|
||||
children,
|
||||
showCloseButton = true,
|
||||
compact = false,
|
||||
...props
|
||||
}: React.ComponentProps<typeof DialogPrimitive.Content> & {
|
||||
showCloseButton?: boolean;
|
||||
compact?: boolean;
|
||||
}) {
|
||||
// Check if className contains a custom max-width
|
||||
const hasCustomMaxWidth =
|
||||
typeof className === "string" && className.includes("max-w-");
|
||||
|
||||
return (
|
||||
<DialogPortal data-slot="dialog-portal">
|
||||
<DialogOverlay />
|
||||
<DialogPrimitive.Content
|
||||
data-slot="dialog-content"
|
||||
className={cn(
|
||||
"bg-background data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 fixed top-[50%] left-[50%] z-50 flex flex-col w-full max-w-[calc(100%-2rem)] translate-x-[-50%] translate-y-[-50%] rounded-lg border shadow-lg duration-200 max-h-[calc(100vh-4rem)]",
|
||||
compact
|
||||
? "max-w-4xl p-4"
|
||||
: !hasCustomMaxWidth
|
||||
? "sm:max-w-2xl p-6"
|
||||
: "p-6",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
>
|
||||
{children}
|
||||
{showCloseButton && (
|
||||
<DialogPrimitive.Close
|
||||
data-slot="dialog-close"
|
||||
className={cn(
|
||||
"ring-offset-background focus:ring-ring data-[state=open]:bg-accent data-[state=open]:text-muted-foreground absolute rounded-xs opacity-70 transition-opacity cursor-pointer hover:opacity-100 focus:ring-2 focus:ring-offset-2 focus:outline-hidden disabled:pointer-events-none disabled:cursor-not-allowed [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4",
|
||||
compact ? "top-2 right-2" : "top-4 right-4"
|
||||
)}
|
||||
>
|
||||
<XIcon />
|
||||
<span className="sr-only">Close</span>
|
||||
</DialogPrimitive.Close>
|
||||
)}
|
||||
</DialogPrimitive.Content>
|
||||
</DialogPortal>
|
||||
);
|
||||
}
|
||||
|
||||
function DialogHeader({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="dialog-header"
|
||||
className={cn("flex flex-col gap-2 text-center sm:text-left", className)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
function DialogFooter({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="dialog-footer"
|
||||
className={cn(
|
||||
"flex flex-col-reverse gap-2 sm:flex-row sm:justify-end",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
function DialogTitle({
|
||||
className,
|
||||
...props
|
||||
}: React.ComponentProps<typeof DialogPrimitive.Title>) {
|
||||
return (
|
||||
<DialogPrimitive.Title
|
||||
data-slot="dialog-title"
|
||||
className={cn("text-lg leading-none font-semibold", className)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
function DialogDescription({
|
||||
className,
|
||||
...props
|
||||
}: React.ComponentProps<typeof DialogPrimitive.Description>) {
|
||||
return (
|
||||
<DialogPrimitive.Description
|
||||
data-slot="dialog-description"
|
||||
className={cn("text-muted-foreground text-sm", className)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
export {
|
||||
Dialog,
|
||||
DialogClose,
|
||||
DialogContent,
|
||||
DialogDescription,
|
||||
DialogFooter,
|
||||
DialogHeader,
|
||||
DialogOverlay,
|
||||
DialogPortal,
|
||||
DialogTitle,
|
||||
DialogTrigger,
|
||||
};
|
||||
@@ -1,198 +0,0 @@
|
||||
"use client"
|
||||
|
||||
import * as React from "react"
|
||||
import * as DropdownMenuPrimitive from "@radix-ui/react-dropdown-menu"
|
||||
import { Check, ChevronRight, Circle } from "lucide-react"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
const DropdownMenu = DropdownMenuPrimitive.Root
|
||||
|
||||
const DropdownMenuTrigger = DropdownMenuPrimitive.Trigger
|
||||
|
||||
const DropdownMenuGroup = DropdownMenuPrimitive.Group
|
||||
|
||||
const DropdownMenuPortal = DropdownMenuPrimitive.Portal
|
||||
|
||||
const DropdownMenuSub = DropdownMenuPrimitive.Sub
|
||||
|
||||
const DropdownMenuRadioGroup = DropdownMenuPrimitive.RadioGroup
|
||||
|
||||
const DropdownMenuSubTrigger = React.forwardRef<
|
||||
React.ElementRef<typeof DropdownMenuPrimitive.SubTrigger>,
|
||||
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.SubTrigger> & {
|
||||
inset?: boolean
|
||||
}
|
||||
>(({ className, inset, children, ...props }, ref) => (
|
||||
<DropdownMenuPrimitive.SubTrigger
|
||||
ref={ref}
|
||||
className={cn(
|
||||
"flex cursor-pointer select-none items-center rounded-sm px-2 py-1.5 text-sm outline-none focus:bg-accent data-[state=open]:bg-accent hover:bg-accent",
|
||||
inset && "pl-8",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
>
|
||||
{children}
|
||||
<ChevronRight className="ml-auto h-4 w-4" />
|
||||
</DropdownMenuPrimitive.SubTrigger>
|
||||
))
|
||||
DropdownMenuSubTrigger.displayName = DropdownMenuPrimitive.SubTrigger.displayName
|
||||
|
||||
const DropdownMenuSubContent = React.forwardRef<
|
||||
React.ElementRef<typeof DropdownMenuPrimitive.SubContent>,
|
||||
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.SubContent>
|
||||
>(({ className, ...props }, ref) => (
|
||||
<DropdownMenuPrimitive.SubContent
|
||||
ref={ref}
|
||||
className={cn(
|
||||
"z-50 min-w-[8rem] overflow-hidden rounded-md border bg-popover p-1 text-popover-foreground shadow-lg data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
))
|
||||
DropdownMenuSubContent.displayName = DropdownMenuPrimitive.SubContent.displayName
|
||||
|
||||
const DropdownMenuContent = React.forwardRef<
|
||||
React.ElementRef<typeof DropdownMenuPrimitive.Content>,
|
||||
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Content>
|
||||
>(({ className, sideOffset = 4, ...props }, ref) => (
|
||||
<DropdownMenuPrimitive.Portal>
|
||||
<DropdownMenuPrimitive.Content
|
||||
ref={ref}
|
||||
sideOffset={sideOffset}
|
||||
className={cn(
|
||||
"z-50 min-w-[8rem] overflow-hidden rounded-md border bg-popover p-1 text-popover-foreground shadow-md data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
</DropdownMenuPrimitive.Portal>
|
||||
))
|
||||
DropdownMenuContent.displayName = DropdownMenuPrimitive.Content.displayName
|
||||
|
||||
const DropdownMenuItem = React.forwardRef<
|
||||
React.ElementRef<typeof DropdownMenuPrimitive.Item>,
|
||||
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Item> & {
|
||||
inset?: boolean
|
||||
}
|
||||
>(({ className, inset, ...props }, ref) => (
|
||||
<DropdownMenuPrimitive.Item
|
||||
ref={ref}
|
||||
className={cn(
|
||||
"relative flex cursor-pointer select-none items-center rounded-sm px-2 py-1.5 text-sm outline-none transition-colors focus:bg-accent focus:text-accent-foreground data-[disabled]:pointer-events-none data-[disabled]:opacity-50 data-[disabled]:cursor-not-allowed hover:bg-accent",
|
||||
inset && "pl-8",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
))
|
||||
DropdownMenuItem.displayName = DropdownMenuPrimitive.Item.displayName
|
||||
|
||||
const DropdownMenuCheckboxItem = React.forwardRef<
|
||||
React.ElementRef<typeof DropdownMenuPrimitive.CheckboxItem>,
|
||||
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.CheckboxItem>
|
||||
>(({ className, children, checked, ...props }, ref) => (
|
||||
<DropdownMenuPrimitive.CheckboxItem
|
||||
ref={ref}
|
||||
className={cn(
|
||||
"relative flex cursor-pointer select-none items-center rounded-sm py-1.5 pl-8 pr-2 text-sm outline-none transition-colors focus:bg-accent focus:text-accent-foreground data-[disabled]:pointer-events-none data-[disabled]:opacity-50 data-[disabled]:cursor-not-allowed hover:bg-accent",
|
||||
className
|
||||
)}
|
||||
checked={checked}
|
||||
{...props}
|
||||
>
|
||||
<span className="absolute left-2 flex h-3.5 w-3.5 items-center justify-center">
|
||||
<DropdownMenuPrimitive.ItemIndicator>
|
||||
<Check className="h-4 w-4" />
|
||||
</DropdownMenuPrimitive.ItemIndicator>
|
||||
</span>
|
||||
{children}
|
||||
</DropdownMenuPrimitive.CheckboxItem>
|
||||
))
|
||||
DropdownMenuCheckboxItem.displayName =
|
||||
DropdownMenuPrimitive.CheckboxItem.displayName
|
||||
|
||||
const DropdownMenuRadioItem = React.forwardRef<
|
||||
React.ElementRef<typeof DropdownMenuPrimitive.RadioItem>,
|
||||
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.RadioItem>
|
||||
>(({ className, children, ...props }, ref) => (
|
||||
<DropdownMenuPrimitive.RadioItem
|
||||
ref={ref}
|
||||
className={cn(
|
||||
"relative flex cursor-pointer select-none items-center rounded-sm py-1.5 pl-8 pr-2 text-sm outline-none transition-colors focus:bg-accent focus:text-accent-foreground data-[disabled]:pointer-events-none data-[disabled]:opacity-50 data-[disabled]:cursor-not-allowed hover:bg-accent",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
>
|
||||
<span className="absolute left-2 flex h-3.5 w-3.5 items-center justify-center">
|
||||
<DropdownMenuPrimitive.ItemIndicator>
|
||||
<Circle className="h-2 w-2 fill-current" />
|
||||
</DropdownMenuPrimitive.ItemIndicator>
|
||||
</span>
|
||||
{children}
|
||||
</DropdownMenuPrimitive.RadioItem>
|
||||
))
|
||||
DropdownMenuRadioItem.displayName = DropdownMenuPrimitive.RadioItem.displayName
|
||||
|
||||
const DropdownMenuLabel = React.forwardRef<
|
||||
React.ElementRef<typeof DropdownMenuPrimitive.Label>,
|
||||
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Label> & {
|
||||
inset?: boolean
|
||||
}
|
||||
>(({ className, inset, ...props }, ref) => (
|
||||
<DropdownMenuPrimitive.Label
|
||||
ref={ref}
|
||||
className={cn(
|
||||
"px-2 py-1.5 text-sm font-semibold",
|
||||
inset && "pl-8",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
))
|
||||
DropdownMenuLabel.displayName = DropdownMenuPrimitive.Label.displayName
|
||||
|
||||
const DropdownMenuSeparator = React.forwardRef<
|
||||
React.ElementRef<typeof DropdownMenuPrimitive.Separator>,
|
||||
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Separator>
|
||||
>(({ className, ...props }, ref) => (
|
||||
<DropdownMenuPrimitive.Separator
|
||||
ref={ref}
|
||||
className={cn("-mx-1 my-1 h-px bg-muted", className)}
|
||||
{...props}
|
||||
/>
|
||||
))
|
||||
DropdownMenuSeparator.displayName = DropdownMenuPrimitive.Separator.displayName
|
||||
|
||||
const DropdownMenuShortcut = ({
|
||||
className,
|
||||
...props
|
||||
}: React.HTMLAttributes<HTMLSpanElement>) => {
|
||||
return (
|
||||
<span
|
||||
className={cn("ml-auto text-xs tracking-widest text-brand-400/70", className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
DropdownMenuShortcut.displayName = "DropdownMenuShortcut"
|
||||
|
||||
export {
|
||||
DropdownMenu,
|
||||
DropdownMenuTrigger,
|
||||
DropdownMenuContent,
|
||||
DropdownMenuItem,
|
||||
DropdownMenuCheckboxItem,
|
||||
DropdownMenuRadioItem,
|
||||
DropdownMenuLabel,
|
||||
DropdownMenuSeparator,
|
||||
DropdownMenuShortcut,
|
||||
DropdownMenuGroup,
|
||||
DropdownMenuPortal,
|
||||
DropdownMenuSub,
|
||||
DropdownMenuSubContent,
|
||||
DropdownMenuSubTrigger,
|
||||
DropdownMenuRadioGroup,
|
||||
}
|
||||
@@ -1,290 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import React, { useState, useRef, useCallback } from "react";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { ImageIcon, X, Upload } from "lucide-react";
|
||||
import type { ImageAttachment } from "@/store/app-store";
|
||||
|
||||
interface ImageDropZoneProps {
|
||||
onImagesSelected: (images: ImageAttachment[]) => void;
|
||||
maxFiles?: number;
|
||||
maxFileSize?: number; // in bytes, default 10MB
|
||||
className?: string;
|
||||
children?: React.ReactNode;
|
||||
disabled?: boolean;
|
||||
images?: ImageAttachment[]; // Optional controlled images prop
|
||||
}
|
||||
|
||||
const ACCEPTED_IMAGE_TYPES = ['image/jpeg', 'image/jpg', 'image/png', 'image/gif', 'image/webp'];
|
||||
const DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024; // 10MB
|
||||
|
||||
export function ImageDropZone({
|
||||
onImagesSelected,
|
||||
maxFiles = 5,
|
||||
maxFileSize = DEFAULT_MAX_FILE_SIZE,
|
||||
className,
|
||||
children,
|
||||
disabled = false,
|
||||
images,
|
||||
}: ImageDropZoneProps) {
|
||||
const [isDragOver, setIsDragOver] = useState(false);
|
||||
const [isProcessing, setIsProcessing] = useState(false);
|
||||
const [internalImages, setInternalImages] = useState<ImageAttachment[]>([]);
|
||||
const fileInputRef = useRef<HTMLInputElement>(null);
|
||||
|
||||
// Use controlled images if provided, otherwise use internal state
|
||||
const selectedImages = images ?? internalImages;
|
||||
|
||||
// Update images - for controlled mode, just call the callback; for uncontrolled, also update internal state
|
||||
const updateImages = useCallback((newImages: ImageAttachment[]) => {
|
||||
if (images === undefined) {
|
||||
setInternalImages(newImages);
|
||||
}
|
||||
onImagesSelected(newImages);
|
||||
}, [images, onImagesSelected]);
|
||||
|
||||
const processFiles = useCallback(async (files: FileList) => {
|
||||
if (disabled || isProcessing) return;
|
||||
|
||||
setIsProcessing(true);
|
||||
const newImages: ImageAttachment[] = [];
|
||||
const errors: string[] = [];
|
||||
|
||||
for (const file of Array.from(files)) {
|
||||
// Validate file type
|
||||
if (!ACCEPTED_IMAGE_TYPES.includes(file.type)) {
|
||||
errors.push(`${file.name}: Unsupported file type. Please use JPG, PNG, GIF, or WebP.`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Validate file size
|
||||
if (file.size > maxFileSize) {
|
||||
const maxSizeMB = maxFileSize / (1024 * 1024);
|
||||
errors.push(`${file.name}: File too large. Maximum size is ${maxSizeMB}MB.`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if we've reached max files
|
||||
if (newImages.length + selectedImages.length >= maxFiles) {
|
||||
errors.push(`Maximum ${maxFiles} images allowed.`);
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
const base64 = await fileToBase64(file);
|
||||
const imageAttachment: ImageAttachment = {
|
||||
id: `img-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
|
||||
data: base64,
|
||||
mimeType: file.type,
|
||||
filename: file.name,
|
||||
size: file.size,
|
||||
};
|
||||
newImages.push(imageAttachment);
|
||||
} catch (error) {
|
||||
errors.push(`${file.name}: Failed to process image.`);
|
||||
}
|
||||
}
|
||||
|
||||
if (errors.length > 0) {
|
||||
console.warn('Image upload errors:', errors);
|
||||
// You could show these errors to the user via a toast or notification
|
||||
}
|
||||
|
||||
if (newImages.length > 0) {
|
||||
const allImages = [...selectedImages, ...newImages];
|
||||
updateImages(allImages);
|
||||
}
|
||||
|
||||
setIsProcessing(false);
|
||||
}, [disabled, isProcessing, maxFiles, maxFileSize, selectedImages, updateImages]);
|
||||
|
||||
const handleDrop = useCallback((e: React.DragEvent) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
setIsDragOver(false);
|
||||
|
||||
if (disabled) return;
|
||||
|
||||
const files = e.dataTransfer.files;
|
||||
if (files.length > 0) {
|
||||
processFiles(files);
|
||||
}
|
||||
}, [disabled, processFiles]);
|
||||
|
||||
const handleDragOver = useCallback((e: React.DragEvent) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
if (!disabled) {
|
||||
setIsDragOver(true);
|
||||
}
|
||||
}, [disabled]);
|
||||
|
||||
const handleDragLeave = useCallback((e: React.DragEvent) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
setIsDragOver(false);
|
||||
}, []);
|
||||
|
||||
const handleFileSelect = useCallback((e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const files = e.target.files;
|
||||
if (files && files.length > 0) {
|
||||
processFiles(files);
|
||||
}
|
||||
// Reset the input so the same file can be selected again
|
||||
if (fileInputRef.current) {
|
||||
fileInputRef.current.value = '';
|
||||
}
|
||||
}, [processFiles]);
|
||||
|
||||
const handleBrowseClick = useCallback(() => {
|
||||
if (!disabled && fileInputRef.current) {
|
||||
fileInputRef.current.click();
|
||||
}
|
||||
}, [disabled]);
|
||||
|
||||
const removeImage = useCallback((imageId: string) => {
|
||||
const updated = selectedImages.filter(img => img.id !== imageId);
|
||||
updateImages(updated);
|
||||
}, [selectedImages, updateImages]);
|
||||
|
||||
const clearAllImages = useCallback(() => {
|
||||
updateImages([]);
|
||||
}, [updateImages]);
|
||||
|
||||
return (
|
||||
<div className={cn("relative", className)}>
|
||||
{/* Hidden file input */}
|
||||
<input
|
||||
ref={fileInputRef}
|
||||
type="file"
|
||||
multiple
|
||||
accept={ACCEPTED_IMAGE_TYPES.join(',')}
|
||||
onChange={handleFileSelect}
|
||||
className="hidden"
|
||||
disabled={disabled}
|
||||
/>
|
||||
|
||||
{/* Drop zone */}
|
||||
<div
|
||||
onDrop={handleDrop}
|
||||
onDragOver={handleDragOver}
|
||||
onDragLeave={handleDragLeave}
|
||||
className={cn(
|
||||
"relative rounded-lg border-2 border-dashed transition-all duration-200",
|
||||
{
|
||||
"border-blue-400 bg-blue-50 dark:bg-blue-950/20": isDragOver && !disabled,
|
||||
"border-muted-foreground/25": !isDragOver && !disabled,
|
||||
"border-muted-foreground/10 opacity-50 cursor-not-allowed": disabled,
|
||||
"hover:border-blue-400 hover:bg-blue-50/50 dark:hover:bg-blue-950/10": !disabled && !isDragOver,
|
||||
}
|
||||
)}
|
||||
>
|
||||
{children || (
|
||||
<div className="flex flex-col items-center justify-center p-6 text-center">
|
||||
<div className={cn(
|
||||
"rounded-full p-3 mb-4",
|
||||
isDragOver && !disabled ? "bg-blue-100 dark:bg-blue-900/30" : "bg-muted"
|
||||
)}>
|
||||
{isProcessing ? (
|
||||
<Upload className="h-6 w-6 animate-spin text-muted-foreground" />
|
||||
) : (
|
||||
<ImageIcon className="h-6 w-6 text-muted-foreground" />
|
||||
)}
|
||||
</div>
|
||||
<p className="text-sm font-medium text-foreground mb-1">
|
||||
{isDragOver && !disabled ? "Drop your images here" : "Drag images here or click to browse"}
|
||||
</p>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
{maxFiles > 1 ? `Up to ${maxFiles} images` : "1 image"}, max {Math.round(maxFileSize / (1024 * 1024))}MB each
|
||||
</p>
|
||||
{!disabled && (
|
||||
<button
|
||||
onClick={handleBrowseClick}
|
||||
className="mt-2 text-xs text-blue-600 hover:text-blue-700 dark:text-blue-400 dark:hover:text-blue-300"
|
||||
disabled={isProcessing}
|
||||
>
|
||||
Browse files
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Image previews */}
|
||||
{selectedImages.length > 0 && (
|
||||
<div className="mt-3 space-y-2">
|
||||
<div className="flex items-center justify-between">
|
||||
<p className="text-xs font-medium text-foreground">
|
||||
{selectedImages.length} image{selectedImages.length > 1 ? 's' : ''} selected
|
||||
</p>
|
||||
<button
|
||||
onClick={clearAllImages}
|
||||
className="text-xs text-muted-foreground hover:text-foreground"
|
||||
disabled={disabled}
|
||||
>
|
||||
Clear all
|
||||
</button>
|
||||
</div>
|
||||
<div className="flex flex-wrap gap-2">
|
||||
{selectedImages.map((image) => (
|
||||
<div
|
||||
key={image.id}
|
||||
className="relative group rounded-md border border-muted bg-muted/50 p-2 flex items-center space-x-2"
|
||||
>
|
||||
{/* Image thumbnail */}
|
||||
<div className="w-8 h-8 rounded overflow-hidden bg-muted flex-shrink-0">
|
||||
<img
|
||||
src={image.data}
|
||||
alt={image.filename}
|
||||
className="w-full h-full object-cover"
|
||||
/>
|
||||
</div>
|
||||
{/* Image info */}
|
||||
<div className="min-w-0 flex-1">
|
||||
<p className="text-xs font-medium text-foreground truncate">
|
||||
{image.filename}
|
||||
</p>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
{formatFileSize(image.size)}
|
||||
</p>
|
||||
</div>
|
||||
{/* Remove button */}
|
||||
{!disabled && (
|
||||
<button
|
||||
onClick={() => removeImage(image.id)}
|
||||
className="opacity-0 group-hover:opacity-100 transition-opacity p-1 rounded-full hover:bg-destructive hover:text-destructive-foreground text-muted-foreground"
|
||||
>
|
||||
<X className="h-3 w-3" />
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function fileToBase64(file: File): Promise<string> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const reader = new FileReader();
|
||||
reader.onload = () => {
|
||||
if (typeof reader.result === 'string') {
|
||||
resolve(reader.result);
|
||||
} else {
|
||||
reject(new Error('Failed to read file as base64'));
|
||||
}
|
||||
};
|
||||
reader.onerror = () => reject(new Error('Failed to read file'));
|
||||
reader.readAsDataURL(file);
|
||||
});
|
||||
}
|
||||
|
||||
function formatFileSize(bytes: number): string {
|
||||
if (bytes === 0) return '0 B';
|
||||
const k = 1024;
|
||||
const sizes = ['B', 'KB', 'MB', 'GB'];
|
||||
const i = Math.floor(Math.log(bytes) / Math.log(k));
|
||||
return parseFloat((bytes / Math.pow(k, i)).toFixed(1)) + ' ' + sizes[i];
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
import * as React from "react"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
function Input({ className, type, ...props }: React.ComponentProps<"input">) {
|
||||
return (
|
||||
<input
|
||||
type={type}
|
||||
data-slot="input"
|
||||
className={cn(
|
||||
"file:text-foreground placeholder:text-muted-foreground selection:bg-primary selection:text-primary-foreground bg-input border-input h-9 w-full min-w-0 rounded-md border px-3 py-1 text-base shadow-xs transition-[color,box-shadow] outline-none file:inline-flex file:h-7 file:border-0 file:bg-transparent file:text-sm file:font-medium disabled:pointer-events-none disabled:cursor-not-allowed disabled:opacity-50 md:text-sm",
|
||||
"focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px]",
|
||||
"aria-invalid:ring-destructive/20 aria-invalid:border-destructive",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
export { Input }
|
||||
@@ -1,639 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import * as React from "react";
|
||||
import { useAppStore, DEFAULT_KEYBOARD_SHORTCUTS, parseShortcut, formatShortcut } from "@/store/app-store";
|
||||
import type { KeyboardShortcuts } from "@/store/app-store";
|
||||
import { cn } from "@/lib/utils";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipProvider,
|
||||
TooltipTrigger,
|
||||
} from "@/components/ui/tooltip";
|
||||
import { Input } from "@/components/ui/input";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { CheckCircle2, X, RotateCcw, Edit2 } from "lucide-react";
|
||||
import { Checkbox } from "@/components/ui/checkbox";
|
||||
import { Label } from "@/components/ui/label";
|
||||
|
||||
// Detect if running on Mac
|
||||
const isMac = typeof navigator !== 'undefined' && navigator.platform.toUpperCase().indexOf('MAC') >= 0;
|
||||
|
||||
// Keyboard layout - US QWERTY
|
||||
const KEYBOARD_ROWS = [
|
||||
// Number row
|
||||
[
|
||||
{ key: "`", label: "`", width: 1 },
|
||||
{ key: "1", label: "1", width: 1 },
|
||||
{ key: "2", label: "2", width: 1 },
|
||||
{ key: "3", label: "3", width: 1 },
|
||||
{ key: "4", label: "4", width: 1 },
|
||||
{ key: "5", label: "5", width: 1 },
|
||||
{ key: "6", label: "6", width: 1 },
|
||||
{ key: "7", label: "7", width: 1 },
|
||||
{ key: "8", label: "8", width: 1 },
|
||||
{ key: "9", label: "9", width: 1 },
|
||||
{ key: "0", label: "0", width: 1 },
|
||||
{ key: "-", label: "-", width: 1 },
|
||||
{ key: "=", label: "=", width: 1 },
|
||||
],
|
||||
// Top letter row
|
||||
[
|
||||
{ key: "Q", label: "Q", width: 1 },
|
||||
{ key: "W", label: "W", width: 1 },
|
||||
{ key: "E", label: "E", width: 1 },
|
||||
{ key: "R", label: "R", width: 1 },
|
||||
{ key: "T", label: "T", width: 1 },
|
||||
{ key: "Y", label: "Y", width: 1 },
|
||||
{ key: "U", label: "U", width: 1 },
|
||||
{ key: "I", label: "I", width: 1 },
|
||||
{ key: "O", label: "O", width: 1 },
|
||||
{ key: "P", label: "P", width: 1 },
|
||||
{ key: "[", label: "[", width: 1 },
|
||||
{ key: "]", label: "]", width: 1 },
|
||||
{ key: "\\", label: "\\", width: 1 },
|
||||
],
|
||||
// Home row
|
||||
[
|
||||
{ key: "A", label: "A", width: 1 },
|
||||
{ key: "S", label: "S", width: 1 },
|
||||
{ key: "D", label: "D", width: 1 },
|
||||
{ key: "F", label: "F", width: 1 },
|
||||
{ key: "G", label: "G", width: 1 },
|
||||
{ key: "H", label: "H", width: 1 },
|
||||
{ key: "J", label: "J", width: 1 },
|
||||
{ key: "K", label: "K", width: 1 },
|
||||
{ key: "L", label: "L", width: 1 },
|
||||
{ key: ";", label: ";", width: 1 },
|
||||
{ key: "'", label: "'", width: 1 },
|
||||
],
|
||||
// Bottom letter row
|
||||
[
|
||||
{ key: "Z", label: "Z", width: 1 },
|
||||
{ key: "X", label: "X", width: 1 },
|
||||
{ key: "C", label: "C", width: 1 },
|
||||
{ key: "V", label: "V", width: 1 },
|
||||
{ key: "B", label: "B", width: 1 },
|
||||
{ key: "N", label: "N", width: 1 },
|
||||
{ key: "M", label: "M", width: 1 },
|
||||
{ key: ",", label: ",", width: 1 },
|
||||
{ key: ".", label: ".", width: 1 },
|
||||
{ key: "/", label: "/", width: 1 },
|
||||
],
|
||||
];
|
||||
|
||||
// Map shortcut names to human-readable labels
|
||||
const SHORTCUT_LABELS: Record<keyof KeyboardShortcuts, string> = {
|
||||
board: "Kanban Board",
|
||||
agent: "Agent Runner",
|
||||
spec: "Spec Editor",
|
||||
context: "Context",
|
||||
tools: "Agent Tools",
|
||||
settings: "Settings",
|
||||
profiles: "AI Profiles",
|
||||
toggleSidebar: "Toggle Sidebar",
|
||||
addFeature: "Add Feature",
|
||||
addContextFile: "Add Context File",
|
||||
startNext: "Start Next",
|
||||
newSession: "New Session",
|
||||
openProject: "Open Project",
|
||||
projectPicker: "Project Picker",
|
||||
cyclePrevProject: "Prev Project",
|
||||
cycleNextProject: "Next Project",
|
||||
addProfile: "Add Profile",
|
||||
};
|
||||
|
||||
// Categorize shortcuts for color coding
|
||||
const SHORTCUT_CATEGORIES: Record<keyof KeyboardShortcuts, "navigation" | "ui" | "action"> = {
|
||||
board: "navigation",
|
||||
agent: "navigation",
|
||||
spec: "navigation",
|
||||
context: "navigation",
|
||||
tools: "navigation",
|
||||
settings: "navigation",
|
||||
profiles: "navigation",
|
||||
toggleSidebar: "ui",
|
||||
addFeature: "action",
|
||||
addContextFile: "action",
|
||||
startNext: "action",
|
||||
newSession: "action",
|
||||
openProject: "action",
|
||||
projectPicker: "action",
|
||||
cyclePrevProject: "action",
|
||||
cycleNextProject: "action",
|
||||
addProfile: "action",
|
||||
};
|
||||
|
||||
// Category colors
|
||||
const CATEGORY_COLORS = {
|
||||
navigation: {
|
||||
bg: "bg-blue-500/20",
|
||||
border: "border-blue-500/50",
|
||||
text: "text-blue-400",
|
||||
label: "Navigation",
|
||||
},
|
||||
ui: {
|
||||
bg: "bg-purple-500/20",
|
||||
border: "border-purple-500/50",
|
||||
text: "text-purple-400",
|
||||
label: "UI Controls",
|
||||
},
|
||||
action: {
|
||||
bg: "bg-green-500/20",
|
||||
border: "border-green-500/50",
|
||||
text: "text-green-400",
|
||||
label: "Actions",
|
||||
},
|
||||
};
|
||||
|
||||
interface KeyboardMapProps {
|
||||
onKeySelect?: (key: string) => void;
|
||||
selectedKey?: string | null;
|
||||
className?: string;
|
||||
}
|
||||
|
||||
export function KeyboardMap({ onKeySelect, selectedKey, className }: KeyboardMapProps) {
|
||||
const { keyboardShortcuts } = useAppStore();
|
||||
|
||||
// Create a reverse map: base key -> list of shortcut names (including info about modifiers)
|
||||
const keyToShortcuts = React.useMemo(() => {
|
||||
const map: Record<string, Array<{ name: keyof KeyboardShortcuts; hasModifiers: boolean }>> = {};
|
||||
(Object.entries(keyboardShortcuts) as [keyof KeyboardShortcuts, string][]).forEach(
|
||||
([shortcutName, shortcutStr]) => {
|
||||
const parsed = parseShortcut(shortcutStr);
|
||||
const normalizedKey = parsed.key.toUpperCase();
|
||||
const hasModifiers = !!(parsed.shift || parsed.cmdCtrl || parsed.alt);
|
||||
if (!map[normalizedKey]) {
|
||||
map[normalizedKey] = [];
|
||||
}
|
||||
map[normalizedKey].push({ name: shortcutName, hasModifiers });
|
||||
}
|
||||
);
|
||||
return map;
|
||||
}, [keyboardShortcuts]);
|
||||
|
||||
const renderKey = (keyDef: { key: string; label: string; width: number }) => {
|
||||
const normalizedKey = keyDef.key.toUpperCase();
|
||||
const shortcutInfos = keyToShortcuts[normalizedKey] || [];
|
||||
const shortcuts = shortcutInfos.map(s => s.name);
|
||||
const isBound = shortcuts.length > 0;
|
||||
const isSelected = selectedKey?.toUpperCase() === normalizedKey;
|
||||
const isModified = shortcuts.some(
|
||||
(s) => keyboardShortcuts[s] !== DEFAULT_KEYBOARD_SHORTCUTS[s]
|
||||
);
|
||||
|
||||
// Get category for coloring (use first shortcut's category if multiple)
|
||||
const category = shortcuts.length > 0 ? SHORTCUT_CATEGORIES[shortcuts[0]] : null;
|
||||
const colors = category ? CATEGORY_COLORS[category] : null;
|
||||
|
||||
const keyElement = (
|
||||
<button
|
||||
key={keyDef.key}
|
||||
onClick={() => onKeySelect?.(keyDef.key)}
|
||||
className={cn(
|
||||
"relative flex flex-col items-center justify-center rounded-lg border transition-all",
|
||||
"h-12 min-w-11 py-1",
|
||||
keyDef.width > 1 && `w-[${keyDef.width * 2.75}rem]`,
|
||||
// Base styles
|
||||
!isBound && "bg-sidebar-accent/10 border-sidebar-border hover:bg-sidebar-accent/20",
|
||||
// Bound key styles
|
||||
isBound && colors && `${colors.bg} ${colors.border} hover:brightness-110`,
|
||||
// Selected state
|
||||
isSelected && "ring-2 ring-brand-500 ring-offset-2 ring-offset-background",
|
||||
// Modified indicator
|
||||
isModified && "ring-1 ring-yellow-500/50"
|
||||
)}
|
||||
data-testid={`keyboard-key-${keyDef.key}`}
|
||||
>
|
||||
{/* Key label - always at top */}
|
||||
<span
|
||||
className={cn(
|
||||
"text-sm font-mono font-bold leading-none",
|
||||
isBound && colors ? colors.text : "text-muted-foreground"
|
||||
)}
|
||||
>
|
||||
{keyDef.label}
|
||||
</span>
|
||||
{/* Shortcut label - always takes up space to maintain consistent height */}
|
||||
<span
|
||||
className={cn(
|
||||
"text-[9px] leading-tight text-center px-0.5 truncate max-w-full h-3 mt-0.5",
|
||||
isBound && shortcuts.length > 0
|
||||
? (colors ? colors.text : "text-muted-foreground")
|
||||
: "opacity-0"
|
||||
)}
|
||||
>
|
||||
{isBound && shortcuts.length > 0
|
||||
? (shortcuts.length === 1
|
||||
? SHORTCUT_LABELS[shortcuts[0]].split(" ")[0]
|
||||
: `${shortcuts.length}x`)
|
||||
: "\u00A0" // Non-breaking space to maintain height
|
||||
}
|
||||
</span>
|
||||
{isModified && (
|
||||
<span className="absolute -top-1 -right-1 w-2 h-2 rounded-full bg-yellow-500" />
|
||||
)}
|
||||
</button>
|
||||
);
|
||||
|
||||
// Wrap in tooltip if bound
|
||||
if (isBound) {
|
||||
return (
|
||||
<Tooltip key={keyDef.key}>
|
||||
<TooltipTrigger asChild>{keyElement}</TooltipTrigger>
|
||||
<TooltipContent side="top" className="max-w-xs">
|
||||
<div className="space-y-1">
|
||||
{shortcuts.map((shortcut) => {
|
||||
const shortcutStr = keyboardShortcuts[shortcut];
|
||||
const displayShortcut = formatShortcut(shortcutStr, true);
|
||||
return (
|
||||
<div key={shortcut} className="flex items-center gap-2">
|
||||
<span
|
||||
className={cn(
|
||||
"w-2 h-2 rounded-full",
|
||||
CATEGORY_COLORS[SHORTCUT_CATEGORIES[shortcut]].bg.replace("/20", "")
|
||||
)}
|
||||
/>
|
||||
<span className="text-sm">{SHORTCUT_LABELS[shortcut]}</span>
|
||||
<kbd className="text-xs font-mono bg-sidebar-accent/30 px-1 rounded">
|
||||
{displayShortcut}
|
||||
</kbd>
|
||||
{keyboardShortcuts[shortcut] !== DEFAULT_KEYBOARD_SHORTCUTS[shortcut] && (
|
||||
<span className="text-xs text-yellow-400">(custom)</span>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
);
|
||||
}
|
||||
|
||||
return keyElement;
|
||||
};
|
||||
|
||||
return (
|
||||
<TooltipProvider>
|
||||
<div className={cn("space-y-4", className)} data-testid="keyboard-map">
|
||||
{/* Legend */}
|
||||
<div className="flex flex-wrap gap-4 justify-center text-xs">
|
||||
{Object.entries(CATEGORY_COLORS).map(([key, colors]) => (
|
||||
<div key={key} className="flex items-center gap-2">
|
||||
<div
|
||||
className={cn(
|
||||
"w-4 h-4 rounded border",
|
||||
colors.bg,
|
||||
colors.border
|
||||
)}
|
||||
/>
|
||||
<span className={colors.text}>{colors.label}</span>
|
||||
</div>
|
||||
))}
|
||||
<div className="flex items-center gap-2">
|
||||
<div className="w-4 h-4 rounded bg-sidebar-accent/10 border border-sidebar-border" />
|
||||
<span className="text-muted-foreground">Available</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-2">
|
||||
<div className="w-2 h-2 rounded-full bg-yellow-500" />
|
||||
<span className="text-yellow-400">Modified</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Keyboard layout */}
|
||||
<div className="flex flex-col items-center gap-1.5 p-4 rounded-xl bg-sidebar-accent/5 border border-sidebar-border">
|
||||
{KEYBOARD_ROWS.map((row, rowIndex) => (
|
||||
<div key={rowIndex} className="flex gap-1.5 justify-center">
|
||||
{row.map(renderKey)}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
|
||||
{/* Stats */}
|
||||
<div className="flex justify-center gap-6 text-xs text-muted-foreground">
|
||||
<span>
|
||||
<strong className="text-foreground">{Object.keys(keyboardShortcuts).length}</strong> shortcuts
|
||||
configured
|
||||
</span>
|
||||
<span>
|
||||
<strong className="text-foreground">
|
||||
{Object.keys(keyToShortcuts).length}
|
||||
</strong>{" "}
|
||||
keys in use
|
||||
</span>
|
||||
<span>
|
||||
<strong className="text-foreground">
|
||||
{KEYBOARD_ROWS.flat().length - Object.keys(keyToShortcuts).length}
|
||||
</strong>{" "}
|
||||
keys available
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</TooltipProvider>
|
||||
);
|
||||
}
|
||||
|
||||
// Full shortcut reference panel with editing capability
|
||||
interface ShortcutReferencePanelProps {
|
||||
editable?: boolean;
|
||||
}
|
||||
|
||||
export function ShortcutReferencePanel({ editable = false }: ShortcutReferencePanelProps) {
|
||||
const { keyboardShortcuts, setKeyboardShortcut, resetKeyboardShortcuts } = useAppStore();
|
||||
const [editingShortcut, setEditingShortcut] = React.useState<keyof KeyboardShortcuts | null>(null);
|
||||
const [keyValue, setKeyValue] = React.useState("");
|
||||
const [modifiers, setModifiers] = React.useState({ shift: false, cmdCtrl: false, alt: false });
|
||||
const [shortcutError, setShortcutError] = React.useState<string | null>(null);
|
||||
|
||||
const groupedShortcuts = React.useMemo(() => {
|
||||
const groups: Record<string, Array<{ key: keyof KeyboardShortcuts; label: string; value: string }>> = {
|
||||
navigation: [],
|
||||
ui: [],
|
||||
action: [],
|
||||
};
|
||||
|
||||
(Object.entries(SHORTCUT_CATEGORIES) as [keyof KeyboardShortcuts, string][]).forEach(
|
||||
([shortcut, category]) => {
|
||||
groups[category].push({
|
||||
key: shortcut,
|
||||
label: SHORTCUT_LABELS[shortcut],
|
||||
value: keyboardShortcuts[shortcut],
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
return groups;
|
||||
}, [keyboardShortcuts]);
|
||||
|
||||
// Build the full shortcut string from key + modifiers
|
||||
const buildShortcutString = React.useCallback((key: string, mods: typeof modifiers) => {
|
||||
const parts: string[] = [];
|
||||
if (mods.cmdCtrl) parts.push(isMac ? "Cmd" : "Ctrl");
|
||||
if (mods.alt) parts.push(isMac ? "Opt" : "Alt");
|
||||
if (mods.shift) parts.push("Shift");
|
||||
parts.push(key.toUpperCase());
|
||||
return parts.join("+");
|
||||
}, []);
|
||||
|
||||
// Check for conflicts with other shortcuts
|
||||
const checkConflict = React.useCallback((shortcutStr: string, currentKey: keyof KeyboardShortcuts) => {
|
||||
const conflict = Object.entries(keyboardShortcuts).find(
|
||||
([k, v]) => k !== currentKey && v.toUpperCase() === shortcutStr.toUpperCase()
|
||||
);
|
||||
return conflict ? SHORTCUT_LABELS[conflict[0] as keyof KeyboardShortcuts] : null;
|
||||
}, [keyboardShortcuts]);
|
||||
|
||||
const handleStartEdit = (key: keyof KeyboardShortcuts) => {
|
||||
const currentValue = keyboardShortcuts[key];
|
||||
const parsed = parseShortcut(currentValue);
|
||||
setEditingShortcut(key);
|
||||
setKeyValue(parsed.key);
|
||||
setModifiers({
|
||||
shift: parsed.shift || false,
|
||||
cmdCtrl: parsed.cmdCtrl || false,
|
||||
alt: parsed.alt || false,
|
||||
});
|
||||
setShortcutError(null);
|
||||
};
|
||||
|
||||
const handleSaveShortcut = () => {
|
||||
if (!editingShortcut || shortcutError || !keyValue) return;
|
||||
const shortcutStr = buildShortcutString(keyValue, modifiers);
|
||||
setKeyboardShortcut(editingShortcut, shortcutStr);
|
||||
setEditingShortcut(null);
|
||||
setKeyValue("");
|
||||
setModifiers({ shift: false, cmdCtrl: false, alt: false });
|
||||
setShortcutError(null);
|
||||
};
|
||||
|
||||
const handleCancelEdit = () => {
|
||||
setEditingShortcut(null);
|
||||
setKeyValue("");
|
||||
setModifiers({ shift: false, cmdCtrl: false, alt: false });
|
||||
setShortcutError(null);
|
||||
};
|
||||
|
||||
const handleKeyChange = (value: string, currentKey: keyof KeyboardShortcuts) => {
|
||||
setKeyValue(value);
|
||||
// Check for conflicts with full shortcut string
|
||||
if (!value) {
|
||||
setShortcutError("Key cannot be empty");
|
||||
} else {
|
||||
const shortcutStr = buildShortcutString(value, modifiers);
|
||||
const conflictLabel = checkConflict(shortcutStr, currentKey);
|
||||
if (conflictLabel) {
|
||||
setShortcutError(`Already used by "${conflictLabel}"`);
|
||||
} else {
|
||||
setShortcutError(null);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const handleModifierChange = (modifier: keyof typeof modifiers, checked: boolean, currentKey: keyof KeyboardShortcuts) => {
|
||||
// Enforce single modifier: when checking, uncheck all others (radio-button behavior)
|
||||
const newModifiers = checked
|
||||
? { shift: false, cmdCtrl: false, alt: false, [modifier]: true }
|
||||
: { ...modifiers, [modifier]: false };
|
||||
|
||||
setModifiers(newModifiers);
|
||||
|
||||
// Recheck for conflicts
|
||||
if (keyValue) {
|
||||
const shortcutStr = buildShortcutString(keyValue, newModifiers);
|
||||
const conflictLabel = checkConflict(shortcutStr, currentKey);
|
||||
if (conflictLabel) {
|
||||
setShortcutError(`Already used by "${conflictLabel}"`);
|
||||
} else {
|
||||
setShortcutError(null);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const handleKeyDown = (e: React.KeyboardEvent) => {
|
||||
if (e.key === "Enter" && !shortcutError && keyValue) {
|
||||
handleSaveShortcut();
|
||||
} else if (e.key === "Escape") {
|
||||
handleCancelEdit();
|
||||
}
|
||||
};
|
||||
|
||||
const handleResetShortcut = (key: keyof KeyboardShortcuts) => {
|
||||
setKeyboardShortcut(key, DEFAULT_KEYBOARD_SHORTCUTS[key]);
|
||||
};
|
||||
|
||||
return (
|
||||
<TooltipProvider>
|
||||
<div className="space-y-4" data-testid="shortcut-reference-panel">
|
||||
{editable && (
|
||||
<div className="flex justify-end">
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={() => resetKeyboardShortcuts()}
|
||||
className="gap-2 text-xs"
|
||||
data-testid="reset-all-shortcuts-button"
|
||||
>
|
||||
<RotateCcw className="w-3 h-3" />
|
||||
Reset All to Defaults
|
||||
</Button>
|
||||
</div>
|
||||
)}
|
||||
{Object.entries(groupedShortcuts).map(([category, shortcuts]) => {
|
||||
const colors = CATEGORY_COLORS[category as keyof typeof CATEGORY_COLORS];
|
||||
return (
|
||||
<div key={category} className="space-y-2">
|
||||
<h4 className={cn("text-sm font-semibold", colors.text)}>
|
||||
{colors.label}
|
||||
</h4>
|
||||
<div className="grid grid-cols-2 gap-2">
|
||||
{shortcuts.map(({ key, label, value }) => {
|
||||
const isModified = keyboardShortcuts[key] !== DEFAULT_KEYBOARD_SHORTCUTS[key];
|
||||
const isEditing = editingShortcut === key;
|
||||
|
||||
return (
|
||||
<div
|
||||
key={key}
|
||||
className={cn(
|
||||
"flex items-center justify-between p-2 rounded-lg bg-sidebar-accent/10 border transition-colors",
|
||||
isEditing ? "border-brand-500" : "border-sidebar-border",
|
||||
editable && !isEditing && "hover:bg-sidebar-accent/20 cursor-pointer"
|
||||
)}
|
||||
onClick={() => editable && !isEditing && handleStartEdit(key)}
|
||||
data-testid={`shortcut-row-${key}`}
|
||||
>
|
||||
<span className="text-sm text-foreground">{label}</span>
|
||||
<div className="flex items-center gap-2">
|
||||
{isEditing ? (
|
||||
<div className="flex items-center gap-2" onClick={(e) => e.stopPropagation()}>
|
||||
{/* Modifier checkboxes */}
|
||||
<div className="flex items-center gap-1.5 text-xs">
|
||||
<div className="flex items-center gap-1">
|
||||
<Checkbox
|
||||
id={`mod-cmd-${key}`}
|
||||
checked={modifiers.cmdCtrl}
|
||||
onCheckedChange={(checked) => handleModifierChange("cmdCtrl", !!checked, key)}
|
||||
className="h-3.5 w-3.5"
|
||||
/>
|
||||
<Label htmlFor={`mod-cmd-${key}`} className="text-xs text-muted-foreground cursor-pointer">
|
||||
{isMac ? "⌘" : "Ctrl"}
|
||||
</Label>
|
||||
</div>
|
||||
<div className="flex items-center gap-1">
|
||||
<Checkbox
|
||||
id={`mod-alt-${key}`}
|
||||
checked={modifiers.alt}
|
||||
onCheckedChange={(checked) => handleModifierChange("alt", !!checked, key)}
|
||||
className="h-3.5 w-3.5"
|
||||
/>
|
||||
<Label htmlFor={`mod-alt-${key}`} className="text-xs text-muted-foreground cursor-pointer">
|
||||
{isMac ? "⌥" : "Alt"}
|
||||
</Label>
|
||||
</div>
|
||||
<div className="flex items-center gap-1">
|
||||
<Checkbox
|
||||
id={`mod-shift-${key}`}
|
||||
checked={modifiers.shift}
|
||||
onCheckedChange={(checked) => handleModifierChange("shift", !!checked, key)}
|
||||
className="h-3.5 w-3.5"
|
||||
/>
|
||||
<Label htmlFor={`mod-shift-${key}`} className="text-xs text-muted-foreground cursor-pointer">
|
||||
⇧
|
||||
</Label>
|
||||
</div>
|
||||
</div>
|
||||
<span className="text-muted-foreground">+</span>
|
||||
<Input
|
||||
value={keyValue}
|
||||
onChange={(e) => handleKeyChange(e.target.value, key)}
|
||||
onKeyDown={handleKeyDown}
|
||||
className={cn(
|
||||
"w-12 h-7 text-center font-mono text-xs uppercase",
|
||||
shortcutError && "border-red-500 focus-visible:ring-red-500"
|
||||
)}
|
||||
placeholder="Key"
|
||||
maxLength={1}
|
||||
autoFocus
|
||||
data-testid={`edit-shortcut-input-${key}`}
|
||||
/>
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
className="h-7 w-7 p-0 hover:bg-green-500/20 hover:text-green-400"
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
handleSaveShortcut();
|
||||
}}
|
||||
disabled={!!shortcutError || !keyValue}
|
||||
data-testid={`save-shortcut-${key}`}
|
||||
>
|
||||
<CheckCircle2 className="w-4 h-4" />
|
||||
</Button>
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
className="h-7 w-7 p-0 hover:bg-red-500/20 hover:text-red-400"
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
handleCancelEdit();
|
||||
}}
|
||||
data-testid={`cancel-shortcut-${key}`}
|
||||
>
|
||||
<X className="w-4 h-4" />
|
||||
</Button>
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
<kbd
|
||||
className={cn(
|
||||
"px-2 py-1 text-xs font-mono rounded border",
|
||||
colors.bg,
|
||||
colors.border,
|
||||
colors.text
|
||||
)}
|
||||
>
|
||||
{formatShortcut(value, true)}
|
||||
</kbd>
|
||||
{isModified && editable && (
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
className="h-6 w-6 p-0 hover:bg-yellow-500/20 hover:text-yellow-400"
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
handleResetShortcut(key);
|
||||
}}
|
||||
data-testid={`reset-shortcut-${key}`}
|
||||
>
|
||||
<RotateCcw className="w-3 h-3" />
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side="top">
|
||||
Reset to default ({DEFAULT_KEYBOARD_SHORTCUTS[key]})
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
)}
|
||||
{isModified && !editable && (
|
||||
<span className="w-2 h-2 rounded-full bg-yellow-500" />
|
||||
)}
|
||||
{editable && !isModified && (
|
||||
<Edit2 className="w-3 h-3 text-muted-foreground opacity-0 group-hover:opacity-100" />
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
{editingShortcut && shortcutError && SHORTCUT_CATEGORIES[editingShortcut] === category && (
|
||||
<p className="text-xs text-red-400 mt-1">{shortcutError}</p>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</TooltipProvider>
|
||||
);
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
"use client"
|
||||
|
||||
import * as React from "react"
|
||||
import * as LabelPrimitive from "@radix-ui/react-label"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
function Label({
|
||||
className,
|
||||
...props
|
||||
}: React.ComponentProps<typeof LabelPrimitive.Root>) {
|
||||
return (
|
||||
<LabelPrimitive.Root
|
||||
data-slot="label"
|
||||
className={cn(
|
||||
"flex items-center gap-2 text-sm leading-none font-medium select-none group-data-[disabled=true]:pointer-events-none group-data-[disabled=true]:opacity-50 peer-disabled:cursor-not-allowed peer-disabled:opacity-50",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
export { Label }
|
||||
@@ -1,272 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState, useMemo } from "react";
|
||||
import {
|
||||
ChevronDown,
|
||||
ChevronRight,
|
||||
MessageSquare,
|
||||
Wrench,
|
||||
Zap,
|
||||
AlertCircle,
|
||||
CheckCircle2,
|
||||
AlertTriangle,
|
||||
Bug,
|
||||
Info,
|
||||
FileOutput,
|
||||
Brain,
|
||||
} from "lucide-react";
|
||||
import { cn } from "@/lib/utils";
|
||||
import {
|
||||
parseLogOutput,
|
||||
getLogTypeColors,
|
||||
type LogEntry,
|
||||
type LogEntryType,
|
||||
} from "@/lib/log-parser";
|
||||
|
||||
interface LogViewerProps {
|
||||
output: string;
|
||||
className?: string;
|
||||
}
|
||||
|
||||
const getLogIcon = (type: LogEntryType) => {
|
||||
switch (type) {
|
||||
case "prompt":
|
||||
return <MessageSquare className="w-4 h-4" />;
|
||||
case "tool_call":
|
||||
return <Wrench className="w-4 h-4" />;
|
||||
case "tool_result":
|
||||
return <FileOutput className="w-4 h-4" />;
|
||||
case "phase":
|
||||
return <Zap className="w-4 h-4" />;
|
||||
case "error":
|
||||
return <AlertCircle className="w-4 h-4" />;
|
||||
case "success":
|
||||
return <CheckCircle2 className="w-4 h-4" />;
|
||||
case "warning":
|
||||
return <AlertTriangle className="w-4 h-4" />;
|
||||
case "thinking":
|
||||
return <Brain className="w-4 h-4" />;
|
||||
case "debug":
|
||||
return <Bug className="w-4 h-4" />;
|
||||
default:
|
||||
return <Info className="w-4 h-4" />;
|
||||
}
|
||||
};
|
||||
|
||||
interface LogEntryItemProps {
|
||||
entry: LogEntry;
|
||||
isExpanded: boolean;
|
||||
onToggle: () => void;
|
||||
}
|
||||
|
||||
function LogEntryItem({ entry, isExpanded, onToggle }: LogEntryItemProps) {
|
||||
const colors = getLogTypeColors(entry.type);
|
||||
const hasContent = entry.content.length > 100;
|
||||
|
||||
// Format content - detect and highlight JSON
|
||||
const formattedContent = useMemo(() => {
|
||||
const content = entry.content;
|
||||
|
||||
// Try to find and format JSON blocks
|
||||
const jsonRegex = /(\{[\s\S]*?\}|\[[\s\S]*?\])/g;
|
||||
let lastIndex = 0;
|
||||
const parts: { type: "text" | "json"; content: string }[] = [];
|
||||
|
||||
let match;
|
||||
while ((match = jsonRegex.exec(content)) !== null) {
|
||||
// Add text before JSON
|
||||
if (match.index > lastIndex) {
|
||||
parts.push({
|
||||
type: "text",
|
||||
content: content.slice(lastIndex, match.index),
|
||||
});
|
||||
}
|
||||
|
||||
// Try to parse and format JSON
|
||||
try {
|
||||
const parsed = JSON.parse(match[1]);
|
||||
parts.push({
|
||||
type: "json",
|
||||
content: JSON.stringify(parsed, null, 2),
|
||||
});
|
||||
} catch {
|
||||
// Not valid JSON, treat as text
|
||||
parts.push({ type: "text", content: match[1] });
|
||||
}
|
||||
|
||||
lastIndex = match.index + match[1].length;
|
||||
}
|
||||
|
||||
// Add remaining text
|
||||
if (lastIndex < content.length) {
|
||||
parts.push({ type: "text", content: content.slice(lastIndex) });
|
||||
}
|
||||
|
||||
return parts.length > 0 ? parts : [{ type: "text" as const, content }];
|
||||
}, [entry.content]);
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"rounded-lg border-l-4 transition-all duration-200",
|
||||
colors.bg,
|
||||
colors.border,
|
||||
"hover:brightness-110"
|
||||
)}
|
||||
data-testid={`log-entry-${entry.type}`}
|
||||
>
|
||||
<button
|
||||
onClick={onToggle}
|
||||
className="w-full px-3 py-2 flex items-center gap-2 text-left"
|
||||
data-testid={`log-entry-toggle-${entry.id}`}
|
||||
>
|
||||
{hasContent ? (
|
||||
isExpanded ? (
|
||||
<ChevronDown className="w-4 h-4 text-zinc-400 flex-shrink-0" />
|
||||
) : (
|
||||
<ChevronRight className="w-4 h-4 text-zinc-400 flex-shrink-0" />
|
||||
)
|
||||
) : (
|
||||
<span className="w-4 flex-shrink-0" />
|
||||
)}
|
||||
|
||||
<span className={cn("flex-shrink-0", colors.icon)}>
|
||||
{getLogIcon(entry.type)}
|
||||
</span>
|
||||
|
||||
<span
|
||||
className={cn(
|
||||
"text-xs font-medium px-2 py-0.5 rounded-full flex-shrink-0",
|
||||
colors.badge
|
||||
)}
|
||||
data-testid="log-entry-badge"
|
||||
>
|
||||
{entry.title}
|
||||
</span>
|
||||
|
||||
<span className="text-xs text-zinc-400 truncate flex-1 ml-2">
|
||||
{!isExpanded &&
|
||||
entry.content.slice(0, 80) +
|
||||
(entry.content.length > 80 ? "..." : "")}
|
||||
</span>
|
||||
</button>
|
||||
|
||||
{(isExpanded || !hasContent) && (
|
||||
<div
|
||||
className="px-4 pb-3 pt-1"
|
||||
data-testid={`log-entry-content-${entry.id}`}
|
||||
>
|
||||
<div className="font-mono text-xs space-y-1">
|
||||
{formattedContent.map((part, index) => (
|
||||
<div key={index}>
|
||||
{part.type === "json" ? (
|
||||
<pre className="bg-zinc-900/50 rounded p-2 overflow-x-auto text-xs text-primary">
|
||||
{part.content}
|
||||
</pre>
|
||||
) : (
|
||||
<pre
|
||||
className={cn(
|
||||
"whitespace-pre-wrap break-words",
|
||||
colors.text
|
||||
)}
|
||||
>
|
||||
{part.content}
|
||||
</pre>
|
||||
)}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export function LogViewer({ output, className }: LogViewerProps) {
|
||||
const [expandedIds, setExpandedIds] = useState<Set<string>>(new Set());
|
||||
|
||||
const entries = useMemo(() => parseLogOutput(output), [output]);
|
||||
|
||||
const toggleEntry = (id: string) => {
|
||||
setExpandedIds((prev) => {
|
||||
const next = new Set(prev);
|
||||
if (next.has(id)) {
|
||||
next.delete(id);
|
||||
} else {
|
||||
next.add(id);
|
||||
}
|
||||
return next;
|
||||
});
|
||||
};
|
||||
|
||||
const expandAll = () => {
|
||||
setExpandedIds(new Set(entries.map((e) => e.id)));
|
||||
};
|
||||
|
||||
const collapseAll = () => {
|
||||
setExpandedIds(new Set());
|
||||
};
|
||||
|
||||
if (entries.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Count entries by type
|
||||
const typeCounts = entries.reduce((acc, entry) => {
|
||||
acc[entry.type] = (acc[entry.type] || 0) + 1;
|
||||
return acc;
|
||||
}, {} as Record<string, number>);
|
||||
|
||||
return (
|
||||
<div className={cn("flex flex-col gap-2", className)}>
|
||||
{/* Header with controls */}
|
||||
<div className="flex items-center justify-between px-1" data-testid="log-viewer-header">
|
||||
<div className="flex items-center gap-2 flex-wrap">
|
||||
{Object.entries(typeCounts).map(([type, count]) => {
|
||||
const colors = getLogTypeColors(type as LogEntryType);
|
||||
return (
|
||||
<span
|
||||
key={type}
|
||||
className={cn(
|
||||
"text-xs px-2 py-0.5 rounded-full",
|
||||
colors.badge
|
||||
)}
|
||||
data-testid={`log-type-count-${type}`}
|
||||
>
|
||||
{type}: {count}
|
||||
</span>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
<div className="flex items-center gap-1">
|
||||
<button
|
||||
onClick={expandAll}
|
||||
className="text-xs text-zinc-400 hover:text-zinc-200 px-2 py-1 rounded hover:bg-zinc-800/50 transition-colors"
|
||||
data-testid="log-expand-all"
|
||||
>
|
||||
Expand All
|
||||
</button>
|
||||
<button
|
||||
onClick={collapseAll}
|
||||
className="text-xs text-zinc-400 hover:text-zinc-200 px-2 py-1 rounded hover:bg-zinc-800/50 transition-colors"
|
||||
data-testid="log-collapse-all"
|
||||
>
|
||||
Collapse All
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Log entries */}
|
||||
<div className="space-y-2" data-testid="log-entries-container">
|
||||
{entries.map((entry) => (
|
||||
<LogEntryItem
|
||||
key={entry.id}
|
||||
entry={entry}
|
||||
isExpanded={expandedIds.has(entry.id)}
|
||||
onToggle={() => toggleEntry(entry.id)}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import ReactMarkdown from "react-markdown";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
interface MarkdownProps {
|
||||
children: string;
|
||||
className?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reusable Markdown component for rendering markdown content
|
||||
* Theme-aware styling that adapts to all predefined themes
|
||||
*/
|
||||
export function Markdown({ children, className }: MarkdownProps) {
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"prose prose-sm prose-invert max-w-none",
|
||||
// Headings
|
||||
"[&_h1]:text-xl [&_h1]:text-foreground [&_h1]:font-semibold [&_h1]:mt-4 [&_h1]:mb-2",
|
||||
"[&_h2]:text-lg [&_h2]:text-foreground [&_h2]:font-semibold [&_h2]:mt-4 [&_h2]:mb-2",
|
||||
"[&_h3]:text-base [&_h3]:text-foreground [&_h3]:font-semibold [&_h3]:mt-3 [&_h3]:mb-2",
|
||||
"[&_h4]:text-sm [&_h4]:text-foreground [&_h4]:font-semibold [&_h4]:mt-2 [&_h4]:mb-1",
|
||||
// Paragraphs
|
||||
"[&_p]:text-foreground-secondary [&_p]:leading-relaxed [&_p]:my-2",
|
||||
// Lists
|
||||
"[&_ul]:my-2 [&_ul]:pl-4 [&_ol]:my-2 [&_ol]:pl-4",
|
||||
"[&_li]:text-foreground-secondary [&_li]:my-0.5",
|
||||
// Code
|
||||
"[&_code]:text-chart-2 [&_code]:bg-muted [&_code]:px-1.5 [&_code]:py-0.5 [&_code]:rounded [&_code]:text-sm",
|
||||
"[&_pre]:bg-card [&_pre]:border [&_pre]:border-border [&_pre]:rounded-lg [&_pre]:my-2 [&_pre]:p-3 [&_pre]:overflow-x-auto",
|
||||
"[&_pre_code]:bg-transparent [&_pre_code]:p-0",
|
||||
// Strong/Bold
|
||||
"[&_strong]:text-foreground [&_strong]:font-semibold",
|
||||
// Links
|
||||
"[&_a]:text-brand-500 [&_a]:no-underline hover:[&_a]:underline",
|
||||
// Blockquotes
|
||||
"[&_blockquote]:border-l-2 [&_blockquote]:border-border [&_blockquote]:pl-4 [&_blockquote]:text-muted-foreground [&_blockquote]:italic [&_blockquote]:my-2",
|
||||
// Horizontal rules
|
||||
"[&_hr]:border-border [&_hr]:my-4",
|
||||
className
|
||||
)}
|
||||
>
|
||||
<ReactMarkdown>{children}</ReactMarkdown>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
"use client"
|
||||
|
||||
import * as React from "react"
|
||||
import * as PopoverPrimitive from "@radix-ui/react-popover"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
function Popover({
|
||||
...props
|
||||
}: React.ComponentProps<typeof PopoverPrimitive.Root>) {
|
||||
return <PopoverPrimitive.Root data-slot="popover" {...props} />
|
||||
}
|
||||
|
||||
function PopoverTrigger({
|
||||
...props
|
||||
}: React.ComponentProps<typeof PopoverPrimitive.Trigger>) {
|
||||
return <PopoverPrimitive.Trigger data-slot="popover-trigger" {...props} />
|
||||
}
|
||||
|
||||
function PopoverContent({
|
||||
className,
|
||||
align = "center",
|
||||
sideOffset = 4,
|
||||
...props
|
||||
}: React.ComponentProps<typeof PopoverPrimitive.Content>) {
|
||||
return (
|
||||
<PopoverPrimitive.Portal>
|
||||
<PopoverPrimitive.Content
|
||||
data-slot="popover-content"
|
||||
align={align}
|
||||
sideOffset={sideOffset}
|
||||
className={cn(
|
||||
"bg-popover text-popover-foreground data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 w-72 origin-(--radix-popover-content-transform-origin) rounded-md border p-4 shadow-md outline-hidden",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
</PopoverPrimitive.Portal>
|
||||
)
|
||||
}
|
||||
|
||||
function PopoverAnchor({
|
||||
...props
|
||||
}: React.ComponentProps<typeof PopoverPrimitive.Anchor>) {
|
||||
return <PopoverPrimitive.Anchor data-slot="popover-anchor" {...props} />
|
||||
}
|
||||
|
||||
export { Popover, PopoverTrigger, PopoverContent, PopoverAnchor }
|
||||
@@ -1,139 +0,0 @@
|
||||
"use client"
|
||||
|
||||
import * as React from "react"
|
||||
import * as SheetPrimitive from "@radix-ui/react-dialog"
|
||||
import { XIcon } from "lucide-react"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
function Sheet({ ...props }: React.ComponentProps<typeof SheetPrimitive.Root>) {
|
||||
return <SheetPrimitive.Root data-slot="sheet" {...props} />
|
||||
}
|
||||
|
||||
function SheetTrigger({
|
||||
...props
|
||||
}: React.ComponentProps<typeof SheetPrimitive.Trigger>) {
|
||||
return <SheetPrimitive.Trigger data-slot="sheet-trigger" {...props} />
|
||||
}
|
||||
|
||||
function SheetClose({
|
||||
...props
|
||||
}: React.ComponentProps<typeof SheetPrimitive.Close>) {
|
||||
return <SheetPrimitive.Close data-slot="sheet-close" {...props} />
|
||||
}
|
||||
|
||||
function SheetPortal({
|
||||
...props
|
||||
}: React.ComponentProps<typeof SheetPrimitive.Portal>) {
|
||||
return <SheetPrimitive.Portal data-slot="sheet-portal" {...props} />
|
||||
}
|
||||
|
||||
function SheetOverlay({
|
||||
className,
|
||||
...props
|
||||
}: React.ComponentProps<typeof SheetPrimitive.Overlay>) {
|
||||
return (
|
||||
<SheetPrimitive.Overlay
|
||||
data-slot="sheet-overlay"
|
||||
className={cn(
|
||||
"data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 fixed inset-0 z-50 bg-black/50",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function SheetContent({
|
||||
className,
|
||||
children,
|
||||
side = "right",
|
||||
...props
|
||||
}: React.ComponentProps<typeof SheetPrimitive.Content> & {
|
||||
side?: "top" | "right" | "bottom" | "left"
|
||||
}) {
|
||||
return (
|
||||
<SheetPortal>
|
||||
<SheetOverlay />
|
||||
<SheetPrimitive.Content
|
||||
data-slot="sheet-content"
|
||||
className={cn(
|
||||
"bg-background data-[state=open]:animate-in data-[state=closed]:animate-out fixed z-50 flex flex-col gap-4 shadow-lg transition ease-in-out data-[state=closed]:duration-300 data-[state=open]:duration-500",
|
||||
side === "right" &&
|
||||
"data-[state=closed]:slide-out-to-right data-[state=open]:slide-in-from-right inset-y-0 right-0 h-full w-3/4 border-l sm:max-w-sm",
|
||||
side === "left" &&
|
||||
"data-[state=closed]:slide-out-to-left data-[state=open]:slide-in-from-left inset-y-0 left-0 h-full w-3/4 border-r sm:max-w-sm",
|
||||
side === "top" &&
|
||||
"data-[state=closed]:slide-out-to-top data-[state=open]:slide-in-from-top inset-x-0 top-0 h-auto border-b",
|
||||
side === "bottom" &&
|
||||
"data-[state=closed]:slide-out-to-bottom data-[state=open]:slide-in-from-bottom inset-x-0 bottom-0 h-auto border-t",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
>
|
||||
{children}
|
||||
<SheetPrimitive.Close className="ring-offset-background focus:ring-ring data-[state=open]:bg-secondary absolute top-4 right-4 rounded-xs opacity-70 transition-opacity hover:opacity-100 focus:ring-2 focus:ring-offset-2 focus:outline-hidden disabled:pointer-events-none">
|
||||
<XIcon className="size-4" />
|
||||
<span className="sr-only">Close</span>
|
||||
</SheetPrimitive.Close>
|
||||
</SheetPrimitive.Content>
|
||||
</SheetPortal>
|
||||
)
|
||||
}
|
||||
|
||||
function SheetHeader({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="sheet-header"
|
||||
className={cn("flex flex-col gap-1.5 p-4", className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function SheetFooter({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="sheet-footer"
|
||||
className={cn("mt-auto flex flex-col gap-2 p-4", className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function SheetTitle({
|
||||
className,
|
||||
...props
|
||||
}: React.ComponentProps<typeof SheetPrimitive.Title>) {
|
||||
return (
|
||||
<SheetPrimitive.Title
|
||||
data-slot="sheet-title"
|
||||
className={cn("text-foreground font-semibold", className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function SheetDescription({
|
||||
className,
|
||||
...props
|
||||
}: React.ComponentProps<typeof SheetPrimitive.Description>) {
|
||||
return (
|
||||
<SheetPrimitive.Description
|
||||
data-slot="sheet-description"
|
||||
className={cn("text-muted-foreground text-sm", className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
export {
|
||||
Sheet,
|
||||
SheetTrigger,
|
||||
SheetClose,
|
||||
SheetContent,
|
||||
SheetHeader,
|
||||
SheetFooter,
|
||||
SheetTitle,
|
||||
SheetDescription,
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import * as React from "react";
|
||||
import * as SliderPrimitive from "@radix-ui/react-slider";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
const Slider = React.forwardRef<
|
||||
React.ComponentRef<typeof SliderPrimitive.Root>,
|
||||
React.ComponentPropsWithoutRef<typeof SliderPrimitive.Root>
|
||||
>(({ className, ...props }, ref) => (
|
||||
<SliderPrimitive.Root
|
||||
ref={ref}
|
||||
className={cn(
|
||||
"relative flex w-full touch-none select-none items-center",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
>
|
||||
<SliderPrimitive.Track className="slider-track relative h-1.5 w-full grow overflow-hidden rounded-full bg-muted cursor-pointer">
|
||||
<SliderPrimitive.Range className="slider-range absolute h-full bg-primary" />
|
||||
</SliderPrimitive.Track>
|
||||
<SliderPrimitive.Thumb className="slider-thumb block h-4 w-4 rounded-full border border-border bg-card shadow transition-colors cursor-grab active:cursor-grabbing focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-ring disabled:pointer-events-none disabled:opacity-50 disabled:cursor-not-allowed hover:bg-accent" />
|
||||
</SliderPrimitive.Root>
|
||||
));
|
||||
Slider.displayName = SliderPrimitive.Root.displayName;
|
||||
|
||||
export { Slider };
|
||||
@@ -1,71 +0,0 @@
|
||||
"use client"
|
||||
|
||||
import * as React from "react"
|
||||
import * as TabsPrimitive from "@radix-ui/react-tabs"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
function Tabs({
|
||||
className,
|
||||
...props
|
||||
}: React.ComponentProps<typeof TabsPrimitive.Root>) {
|
||||
return (
|
||||
<TabsPrimitive.Root
|
||||
data-slot="tabs"
|
||||
className={cn("flex flex-col gap-2", className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function TabsList({
|
||||
className,
|
||||
...props
|
||||
}: React.ComponentProps<typeof TabsPrimitive.List>) {
|
||||
return (
|
||||
<TabsPrimitive.List
|
||||
data-slot="tabs-list"
|
||||
className={cn(
|
||||
"bg-muted text-muted-foreground inline-flex h-9 w-fit items-center justify-center rounded-lg p-[3px] border border-border",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function TabsTrigger({
|
||||
className,
|
||||
...props
|
||||
}: React.ComponentProps<typeof TabsPrimitive.Trigger>) {
|
||||
return (
|
||||
<TabsPrimitive.Trigger
|
||||
data-slot="tabs-trigger"
|
||||
className={cn(
|
||||
"inline-flex h-[calc(100%-1px)] flex-1 items-center justify-center gap-1.5 rounded-md border border-transparent px-2 py-1 text-sm font-medium whitespace-nowrap transition-all duration-200 cursor-pointer",
|
||||
"text-foreground/70 hover:text-foreground hover:bg-accent",
|
||||
"data-[state=active]:bg-primary data-[state=active]:text-primary-foreground data-[state=active]:shadow-md data-[state=active]:border-primary/50",
|
||||
"focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:outline-ring focus-visible:ring-[3px] focus-visible:outline-1",
|
||||
"disabled:pointer-events-none disabled:opacity-50 disabled:cursor-not-allowed",
|
||||
"[&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function TabsContent({
|
||||
className,
|
||||
...props
|
||||
}: React.ComponentProps<typeof TabsPrimitive.Content>) {
|
||||
return (
|
||||
<TabsPrimitive.Content
|
||||
data-slot="tabs-content"
|
||||
className={cn("flex-1 outline-none", className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
export { Tabs, TabsList, TabsTrigger, TabsContent }
|
||||
@@ -1,20 +0,0 @@
|
||||
import * as React from "react"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
function Textarea({ className, ...props }: React.ComponentProps<"textarea">) {
|
||||
return (
|
||||
<textarea
|
||||
data-slot="textarea"
|
||||
className={cn(
|
||||
"placeholder:text-muted-foreground selection:bg-primary selection:text-primary-foreground dark:bg-input/30 border-input min-h-[80px] w-full min-w-0 rounded-md border bg-transparent px-3 py-2 text-base shadow-xs transition-[color,box-shadow] outline-none disabled:pointer-events-none disabled:cursor-not-allowed disabled:opacity-50 md:text-sm resize-none",
|
||||
"focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px]",
|
||||
"aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
export { Textarea }
|
||||
@@ -1,32 +0,0 @@
|
||||
"use client"
|
||||
|
||||
import * as React from "react"
|
||||
import * as TooltipPrimitive from "@radix-ui/react-tooltip"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
const TooltipProvider = TooltipPrimitive.Provider
|
||||
|
||||
const Tooltip = TooltipPrimitive.Root
|
||||
|
||||
const TooltipTrigger = TooltipPrimitive.Trigger
|
||||
|
||||
const TooltipContent = React.forwardRef<
|
||||
React.ElementRef<typeof TooltipPrimitive.Content>,
|
||||
React.ComponentPropsWithoutRef<typeof TooltipPrimitive.Content>
|
||||
>(({ className, sideOffset = 4, ...props }, ref) => (
|
||||
<TooltipPrimitive.Portal>
|
||||
<TooltipPrimitive.Content
|
||||
ref={ref}
|
||||
sideOffset={sideOffset}
|
||||
className={cn(
|
||||
"z-50 overflow-hidden rounded-md border bg-popover px-3 py-1.5 text-sm text-popover-foreground shadow-md animate-in fade-in-0 zoom-in-95 data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
</TooltipPrimitive.Portal>
|
||||
))
|
||||
TooltipContent.displayName = TooltipPrimitive.Content.displayName
|
||||
|
||||
export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider }
|
||||
@@ -1,290 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useRef, useCallback, useMemo } from "react";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
interface XmlSyntaxEditorProps {
|
||||
value: string;
|
||||
onChange: (value: string) => void;
|
||||
placeholder?: string;
|
||||
className?: string;
|
||||
"data-testid"?: string;
|
||||
}
|
||||
|
||||
// Tokenize XML content into parts for highlighting
|
||||
interface Token {
|
||||
type:
|
||||
| "tag-bracket"
|
||||
| "tag-name"
|
||||
| "attribute-name"
|
||||
| "attribute-equals"
|
||||
| "attribute-value"
|
||||
| "text"
|
||||
| "comment"
|
||||
| "cdata"
|
||||
| "doctype";
|
||||
value: string;
|
||||
}
|
||||
|
||||
function tokenizeXml(text: string): Token[] {
|
||||
const tokens: Token[] = [];
|
||||
let i = 0;
|
||||
|
||||
while (i < text.length) {
|
||||
// Comment: <!-- ... -->
|
||||
if (text.slice(i, i + 4) === "<!--") {
|
||||
const end = text.indexOf("-->", i + 4);
|
||||
if (end !== -1) {
|
||||
tokens.push({ type: "comment", value: text.slice(i, end + 3) });
|
||||
i = end + 3;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// CDATA: <![CDATA[ ... ]]>
|
||||
if (text.slice(i, i + 9) === "<![CDATA[") {
|
||||
const end = text.indexOf("]]>", i + 9);
|
||||
if (end !== -1) {
|
||||
tokens.push({ type: "cdata", value: text.slice(i, end + 3) });
|
||||
i = end + 3;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// DOCTYPE: <!DOCTYPE ... >
|
||||
if (text.slice(i, i + 9).toUpperCase() === "<!DOCTYPE") {
|
||||
const end = text.indexOf(">", i + 9);
|
||||
if (end !== -1) {
|
||||
tokens.push({ type: "doctype", value: text.slice(i, end + 1) });
|
||||
i = end + 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Tag: < ... >
|
||||
if (text[i] === "<") {
|
||||
// Find the end of the tag
|
||||
let tagEnd = i + 1;
|
||||
let inString: string | null = null;
|
||||
|
||||
while (tagEnd < text.length) {
|
||||
const char = text[tagEnd];
|
||||
|
||||
if (inString) {
|
||||
if (char === inString && text[tagEnd - 1] !== "\\") {
|
||||
inString = null;
|
||||
}
|
||||
} else {
|
||||
if (char === '"' || char === "'") {
|
||||
inString = char;
|
||||
} else if (char === ">") {
|
||||
tagEnd++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
tagEnd++;
|
||||
}
|
||||
|
||||
const tagContent = text.slice(i, tagEnd);
|
||||
const tagTokens = tokenizeTag(tagContent);
|
||||
tokens.push(...tagTokens);
|
||||
i = tagEnd;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Text content between tags
|
||||
const nextTag = text.indexOf("<", i);
|
||||
if (nextTag === -1) {
|
||||
tokens.push({ type: "text", value: text.slice(i) });
|
||||
break;
|
||||
} else if (nextTag > i) {
|
||||
tokens.push({ type: "text", value: text.slice(i, nextTag) });
|
||||
i = nextTag;
|
||||
}
|
||||
}
|
||||
|
||||
return tokens;
|
||||
}
|
||||
|
||||
function tokenizeTag(tag: string): Token[] {
|
||||
const tokens: Token[] = [];
|
||||
let i = 0;
|
||||
|
||||
// Opening bracket (< or </ or <?)
|
||||
if (tag.startsWith("</")) {
|
||||
tokens.push({ type: "tag-bracket", value: "</" });
|
||||
i = 2;
|
||||
} else if (tag.startsWith("<?")) {
|
||||
tokens.push({ type: "tag-bracket", value: "<?" });
|
||||
i = 2;
|
||||
} else {
|
||||
tokens.push({ type: "tag-bracket", value: "<" });
|
||||
i = 1;
|
||||
}
|
||||
|
||||
// Skip whitespace
|
||||
while (i < tag.length && /\s/.test(tag[i])) {
|
||||
tokens.push({ type: "text", value: tag[i] });
|
||||
i++;
|
||||
}
|
||||
|
||||
// Tag name
|
||||
let tagName = "";
|
||||
while (i < tag.length && /[a-zA-Z0-9_:-]/.test(tag[i])) {
|
||||
tagName += tag[i];
|
||||
i++;
|
||||
}
|
||||
if (tagName) {
|
||||
tokens.push({ type: "tag-name", value: tagName });
|
||||
}
|
||||
|
||||
// Attributes and closing
|
||||
while (i < tag.length) {
|
||||
// Skip whitespace
|
||||
if (/\s/.test(tag[i])) {
|
||||
let ws = "";
|
||||
while (i < tag.length && /\s/.test(tag[i])) {
|
||||
ws += tag[i];
|
||||
i++;
|
||||
}
|
||||
tokens.push({ type: "text", value: ws });
|
||||
continue;
|
||||
}
|
||||
|
||||
// Closing bracket
|
||||
if (tag[i] === ">" || tag.slice(i, i + 2) === "/>" || tag.slice(i, i + 2) === "?>") {
|
||||
tokens.push({ type: "tag-bracket", value: tag.slice(i) });
|
||||
break;
|
||||
}
|
||||
|
||||
// Attribute name
|
||||
let attrName = "";
|
||||
while (i < tag.length && /[a-zA-Z0-9_:-]/.test(tag[i])) {
|
||||
attrName += tag[i];
|
||||
i++;
|
||||
}
|
||||
if (attrName) {
|
||||
tokens.push({ type: "attribute-name", value: attrName });
|
||||
}
|
||||
|
||||
// Skip whitespace around =
|
||||
while (i < tag.length && /\s/.test(tag[i])) {
|
||||
tokens.push({ type: "text", value: tag[i] });
|
||||
i++;
|
||||
}
|
||||
|
||||
// Equals sign
|
||||
if (tag[i] === "=") {
|
||||
tokens.push({ type: "attribute-equals", value: "=" });
|
||||
i++;
|
||||
}
|
||||
|
||||
// Skip whitespace after =
|
||||
while (i < tag.length && /\s/.test(tag[i])) {
|
||||
tokens.push({ type: "text", value: tag[i] });
|
||||
i++;
|
||||
}
|
||||
|
||||
// Attribute value
|
||||
if (tag[i] === '"' || tag[i] === "'") {
|
||||
const quote = tag[i];
|
||||
let value = quote;
|
||||
i++;
|
||||
while (i < tag.length && tag[i] !== quote) {
|
||||
value += tag[i];
|
||||
i++;
|
||||
}
|
||||
if (i < tag.length) {
|
||||
value += tag[i];
|
||||
i++;
|
||||
}
|
||||
tokens.push({ type: "attribute-value", value });
|
||||
}
|
||||
}
|
||||
|
||||
return tokens;
|
||||
}
|
||||
|
||||
export function XmlSyntaxEditor({
|
||||
value,
|
||||
onChange,
|
||||
placeholder,
|
||||
className,
|
||||
"data-testid": testId,
|
||||
}: XmlSyntaxEditorProps) {
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
const highlightRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
// Sync scroll between textarea and highlight layer
|
||||
const handleScroll = useCallback(() => {
|
||||
if (textareaRef.current && highlightRef.current) {
|
||||
highlightRef.current.scrollTop = textareaRef.current.scrollTop;
|
||||
highlightRef.current.scrollLeft = textareaRef.current.scrollLeft;
|
||||
}
|
||||
}, []);
|
||||
|
||||
// Handle tab key for indentation
|
||||
const handleKeyDown = useCallback(
|
||||
(e: React.KeyboardEvent<HTMLTextAreaElement>) => {
|
||||
if (e.key === "Tab") {
|
||||
e.preventDefault();
|
||||
const textarea = e.currentTarget;
|
||||
const start = textarea.selectionStart;
|
||||
const end = textarea.selectionEnd;
|
||||
const newValue =
|
||||
value.substring(0, start) + " " + value.substring(end);
|
||||
onChange(newValue);
|
||||
// Reset cursor position after state update
|
||||
requestAnimationFrame(() => {
|
||||
textarea.selectionStart = textarea.selectionEnd = start + 2;
|
||||
});
|
||||
}
|
||||
},
|
||||
[value, onChange]
|
||||
);
|
||||
|
||||
// Memoize the highlighted content
|
||||
const highlightedContent = useMemo(() => {
|
||||
const tokens = tokenizeXml(value);
|
||||
|
||||
return tokens.map((token, index) => {
|
||||
const className = `xml-${token.type}`;
|
||||
// React handles escaping automatically, just render the raw value
|
||||
return (
|
||||
<span key={index} className={className}>
|
||||
{token.value}
|
||||
</span>
|
||||
);
|
||||
});
|
||||
}, [value]);
|
||||
|
||||
return (
|
||||
<div className={cn("relative w-full h-full xml-editor", className)}>
|
||||
{/* Syntax highlighted layer (read-only, behind textarea) */}
|
||||
<div
|
||||
ref={highlightRef}
|
||||
className="absolute inset-0 overflow-auto pointer-events-none font-mono text-sm p-4 whitespace-pre-wrap break-words"
|
||||
aria-hidden="true"
|
||||
>
|
||||
{value ? (
|
||||
<code className="xml-highlight">{highlightedContent}</code>
|
||||
) : (
|
||||
<span className="text-muted-foreground opacity-50">{placeholder}</span>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Actual textarea (transparent text, handles input) */}
|
||||
<textarea
|
||||
ref={textareaRef}
|
||||
value={value}
|
||||
onChange={(e) => onChange(e.target.value)}
|
||||
onScroll={handleScroll}
|
||||
onKeyDown={handleKeyDown}
|
||||
placeholder=""
|
||||
spellCheck={false}
|
||||
className="absolute inset-0 w-full h-full font-mono text-sm p-4 bg-transparent resize-none focus:outline-none text-transparent caret-foreground selection:bg-primary/30"
|
||||
data-testid={testId}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,804 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useState, useCallback, useRef, useEffect, useMemo } from "react";
|
||||
import { useAppStore } from "@/store/app-store";
|
||||
import { Card, CardContent } from "@/components/ui/card";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Input } from "@/components/ui/input";
|
||||
import { ImageDropZone } from "@/components/ui/image-drop-zone";
|
||||
import {
|
||||
Bot,
|
||||
Send,
|
||||
User,
|
||||
Loader2,
|
||||
Sparkles,
|
||||
Wrench,
|
||||
Trash2,
|
||||
PanelLeftClose,
|
||||
PanelLeft,
|
||||
Paperclip,
|
||||
X,
|
||||
} from "lucide-react";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { useElectronAgent } from "@/hooks/use-electron-agent";
|
||||
import { SessionManager } from "@/components/session-manager";
|
||||
import { Markdown } from "@/components/ui/markdown";
|
||||
import type { ImageAttachment } from "@/store/app-store";
|
||||
import {
|
||||
useKeyboardShortcuts,
|
||||
useKeyboardShortcutsConfig,
|
||||
KeyboardShortcut,
|
||||
} from "@/hooks/use-keyboard-shortcuts";
|
||||
|
||||
export function AgentView() {
|
||||
const { currentProject, setLastSelectedSession, getLastSelectedSession } = useAppStore();
|
||||
const shortcuts = useKeyboardShortcutsConfig();
|
||||
const [input, setInput] = useState("");
|
||||
const [selectedImages, setSelectedImages] = useState<ImageAttachment[]>([]);
|
||||
const [showImageDropZone, setShowImageDropZone] = useState(false);
|
||||
const [currentTool, setCurrentTool] = useState<string | null>(null);
|
||||
const [currentSessionId, setCurrentSessionId] = useState<string | null>(null);
|
||||
const [showSessionManager, setShowSessionManager] = useState(true);
|
||||
const [isDragOver, setIsDragOver] = useState(false);
|
||||
|
||||
// Track if initial session has been loaded
|
||||
const initialSessionLoadedRef = useRef(false);
|
||||
|
||||
// Scroll management for auto-scroll
|
||||
const messagesContainerRef = useRef<HTMLDivElement>(null);
|
||||
const [isUserAtBottom, setIsUserAtBottom] = useState(true);
|
||||
|
||||
// Input ref for auto-focus
|
||||
const inputRef = useRef<HTMLInputElement>(null);
|
||||
|
||||
// Ref for quick create session function from SessionManager
|
||||
const quickCreateSessionRef = useRef<(() => Promise<void>) | null>(null);
|
||||
|
||||
// Use the Electron agent hook (only if we have a session)
|
||||
const {
|
||||
messages,
|
||||
isProcessing,
|
||||
isConnected,
|
||||
sendMessage,
|
||||
clearHistory,
|
||||
error: agentError,
|
||||
} = useElectronAgent({
|
||||
sessionId: currentSessionId || "",
|
||||
workingDirectory: currentProject?.path,
|
||||
onToolUse: (toolName) => {
|
||||
setCurrentTool(toolName);
|
||||
setTimeout(() => setCurrentTool(null), 2000);
|
||||
},
|
||||
});
|
||||
|
||||
// Handle session selection with persistence
|
||||
const handleSelectSession = useCallback((sessionId: string | null) => {
|
||||
setCurrentSessionId(sessionId);
|
||||
// Persist the selection for this project
|
||||
if (currentProject?.path) {
|
||||
setLastSelectedSession(currentProject.path, sessionId);
|
||||
}
|
||||
}, [currentProject?.path, setLastSelectedSession]);
|
||||
|
||||
// Restore last selected session when switching to Agent view or when project changes
|
||||
useEffect(() => {
|
||||
if (!currentProject?.path) {
|
||||
// No project, reset
|
||||
setCurrentSessionId(null);
|
||||
initialSessionLoadedRef.current = false;
|
||||
return;
|
||||
}
|
||||
|
||||
// Only restore once per project
|
||||
if (initialSessionLoadedRef.current) return;
|
||||
initialSessionLoadedRef.current = true;
|
||||
|
||||
const lastSessionId = getLastSelectedSession(currentProject.path);
|
||||
if (lastSessionId) {
|
||||
console.log("[AgentView] Restoring last selected session:", lastSessionId);
|
||||
setCurrentSessionId(lastSessionId);
|
||||
}
|
||||
}, [currentProject?.path, getLastSelectedSession]);
|
||||
|
||||
// Reset initialSessionLoadedRef when project changes
|
||||
useEffect(() => {
|
||||
initialSessionLoadedRef.current = false;
|
||||
}, [currentProject?.path]);
|
||||
|
||||
const handleSend = useCallback(async () => {
|
||||
if ((!input.trim() && selectedImages.length === 0) || isProcessing) return;
|
||||
|
||||
const messageContent = input;
|
||||
const messageImages = selectedImages;
|
||||
|
||||
setInput("");
|
||||
setSelectedImages([]);
|
||||
setShowImageDropZone(false);
|
||||
|
||||
await sendMessage(messageContent, messageImages);
|
||||
}, [input, selectedImages, isProcessing, sendMessage]);
|
||||
|
||||
const handleImagesSelected = useCallback((images: ImageAttachment[]) => {
|
||||
setSelectedImages(images);
|
||||
}, []);
|
||||
|
||||
const toggleImageDropZone = useCallback(() => {
|
||||
setShowImageDropZone(!showImageDropZone);
|
||||
}, [showImageDropZone]);
|
||||
|
||||
// Helper function to convert file to base64
|
||||
const fileToBase64 = useCallback((file: File): Promise<string> => {
|
||||
return new Promise((resolve, reject) => {
|
||||
const reader = new FileReader();
|
||||
reader.onload = () => {
|
||||
if (typeof reader.result === "string") {
|
||||
resolve(reader.result);
|
||||
} else {
|
||||
reject(new Error("Failed to read file as base64"));
|
||||
}
|
||||
};
|
||||
reader.onerror = () => reject(new Error("Failed to read file"));
|
||||
reader.readAsDataURL(file);
|
||||
});
|
||||
}, []);
|
||||
|
||||
// Process dropped files
|
||||
const processDroppedFiles = useCallback(
|
||||
async (files: FileList) => {
|
||||
if (isProcessing) return;
|
||||
|
||||
const ACCEPTED_IMAGE_TYPES = [
|
||||
"image/jpeg",
|
||||
"image/jpg",
|
||||
"image/png",
|
||||
"image/gif",
|
||||
"image/webp",
|
||||
];
|
||||
const MAX_FILE_SIZE = 10 * 1024 * 1024; // 10MB
|
||||
const MAX_FILES = 5;
|
||||
|
||||
const newImages: ImageAttachment[] = [];
|
||||
const errors: string[] = [];
|
||||
|
||||
for (const file of Array.from(files)) {
|
||||
// Validate file type
|
||||
if (!ACCEPTED_IMAGE_TYPES.includes(file.type)) {
|
||||
errors.push(
|
||||
`${file.name}: Unsupported file type. Please use JPG, PNG, GIF, or WebP.`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Validate file size
|
||||
if (file.size > MAX_FILE_SIZE) {
|
||||
const maxSizeMB = MAX_FILE_SIZE / (1024 * 1024);
|
||||
errors.push(
|
||||
`${file.name}: File too large. Maximum size is ${maxSizeMB}MB.`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if we've reached max files
|
||||
if (newImages.length + selectedImages.length >= MAX_FILES) {
|
||||
errors.push(`Maximum ${MAX_FILES} images allowed.`);
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
const base64 = await fileToBase64(file);
|
||||
const imageAttachment: ImageAttachment = {
|
||||
id: `img-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
|
||||
data: base64,
|
||||
mimeType: file.type,
|
||||
filename: file.name,
|
||||
size: file.size,
|
||||
};
|
||||
newImages.push(imageAttachment);
|
||||
} catch (error) {
|
||||
errors.push(`${file.name}: Failed to process image.`);
|
||||
}
|
||||
}
|
||||
|
||||
if (errors.length > 0) {
|
||||
console.warn("Image upload errors:", errors);
|
||||
}
|
||||
|
||||
if (newImages.length > 0) {
|
||||
setSelectedImages((prev) => [...prev, ...newImages]);
|
||||
}
|
||||
},
|
||||
[isProcessing, selectedImages, fileToBase64]
|
||||
);
|
||||
|
||||
// Remove individual image
|
||||
const removeImage = useCallback((imageId: string) => {
|
||||
setSelectedImages((prev) => prev.filter((img) => img.id !== imageId));
|
||||
}, []);
|
||||
|
||||
// Drag and drop handlers for the input area
|
||||
const handleDragEnter = useCallback(
|
||||
(e: React.DragEvent) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
if (isProcessing || !isConnected) return;
|
||||
|
||||
console.log(
|
||||
"[agent-view] Drag enter types:",
|
||||
Array.from(e.dataTransfer.types)
|
||||
);
|
||||
|
||||
// Check if dragged items contain files
|
||||
if (e.dataTransfer.types.includes("Files")) {
|
||||
setIsDragOver(true);
|
||||
}
|
||||
},
|
||||
[isProcessing, isConnected]
|
||||
);
|
||||
|
||||
const handleDragLeave = useCallback((e: React.DragEvent) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
|
||||
// Only set dragOver to false if we're leaving the input container
|
||||
const rect = e.currentTarget.getBoundingClientRect();
|
||||
const x = e.clientX;
|
||||
const y = e.clientY;
|
||||
|
||||
if (x < rect.left || x > rect.right || y < rect.top || y > rect.bottom) {
|
||||
setIsDragOver(false);
|
||||
}
|
||||
}, []);
|
||||
|
||||
const handleDragOver = useCallback((e: React.DragEvent) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
}, []);
|
||||
|
||||
const handleDrop = useCallback(
|
||||
async (e: React.DragEvent) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
setIsDragOver(false);
|
||||
|
||||
if (isProcessing || !isConnected) return;
|
||||
|
||||
console.log("[agent-view] Drop event:", {
|
||||
filesCount: e.dataTransfer.files.length,
|
||||
itemsCount: e.dataTransfer.items.length,
|
||||
types: Array.from(e.dataTransfer.types),
|
||||
});
|
||||
|
||||
// Check if we have files
|
||||
const files = e.dataTransfer.files;
|
||||
if (files && files.length > 0) {
|
||||
console.log("[agent-view] Processing files from dataTransfer.files");
|
||||
processDroppedFiles(files);
|
||||
return;
|
||||
}
|
||||
|
||||
// Handle file paths (from screenshots or other sources)
|
||||
// This is common on macOS when dragging screenshots
|
||||
const items = e.dataTransfer.items;
|
||||
if (items && items.length > 0) {
|
||||
console.log("[agent-view] Processing items");
|
||||
for (let i = 0; i < items.length; i++) {
|
||||
const item = items[i];
|
||||
console.log(`[agent-view] Item ${i}:`, {
|
||||
kind: item.kind,
|
||||
type: item.type,
|
||||
});
|
||||
if (item.kind === "file") {
|
||||
const file = item.getAsFile();
|
||||
if (file) {
|
||||
console.log("[agent-view] Got file from item:", {
|
||||
name: file.name,
|
||||
type: file.type,
|
||||
size: file.size,
|
||||
});
|
||||
const dataTransfer = new DataTransfer();
|
||||
dataTransfer.items.add(file);
|
||||
processDroppedFiles(dataTransfer.files);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
[isProcessing, isConnected, processDroppedFiles]
|
||||
);
|
||||
|
||||
const handlePaste = useCallback(
|
||||
async (e: React.ClipboardEvent) => {
|
||||
// Check if clipboard contains files
|
||||
const items = e.clipboardData?.items;
|
||||
if (items) {
|
||||
const files: File[] = [];
|
||||
|
||||
for (let i = 0; i < items.length; i++) {
|
||||
const item = items[i];
|
||||
console.log("[agent-view] Paste item:", {
|
||||
kind: item.kind,
|
||||
type: item.type,
|
||||
});
|
||||
|
||||
if (item.kind === "file") {
|
||||
const file = item.getAsFile();
|
||||
if (file && file.type.startsWith("image/")) {
|
||||
e.preventDefault(); // Prevent default paste of file path
|
||||
files.push(file);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (files.length > 0) {
|
||||
console.log(
|
||||
"[agent-view] Processing pasted image files:",
|
||||
files.length
|
||||
);
|
||||
const dataTransfer = new DataTransfer();
|
||||
files.forEach((file) => dataTransfer.items.add(file));
|
||||
await processDroppedFiles(dataTransfer.files);
|
||||
}
|
||||
}
|
||||
},
|
||||
[processDroppedFiles]
|
||||
);
|
||||
|
||||
const handleKeyPress = (e: React.KeyboardEvent) => {
|
||||
if (e.key === "Enter" && !e.shiftKey) {
|
||||
e.preventDefault();
|
||||
handleSend();
|
||||
}
|
||||
};
|
||||
|
||||
const handleClearChat = async () => {
|
||||
if (!confirm("Are you sure you want to clear this conversation?")) return;
|
||||
await clearHistory();
|
||||
};
|
||||
|
||||
// Scroll position detection
|
||||
const checkIfUserIsAtBottom = useCallback(() => {
|
||||
const container = messagesContainerRef.current;
|
||||
if (!container) return;
|
||||
|
||||
const threshold = 50; // 50px threshold for "near bottom"
|
||||
const isAtBottom =
|
||||
container.scrollHeight - container.scrollTop - container.clientHeight <=
|
||||
threshold;
|
||||
|
||||
setIsUserAtBottom(isAtBottom);
|
||||
}, []);
|
||||
|
||||
// Scroll to bottom function
|
||||
const scrollToBottom = useCallback((behavior: ScrollBehavior = "smooth") => {
|
||||
const container = messagesContainerRef.current;
|
||||
if (!container) return;
|
||||
|
||||
container.scrollTo({
|
||||
top: container.scrollHeight,
|
||||
behavior: behavior,
|
||||
});
|
||||
}, []);
|
||||
|
||||
// Handle scroll events
|
||||
const handleScroll = useCallback(() => {
|
||||
checkIfUserIsAtBottom();
|
||||
}, [checkIfUserIsAtBottom]);
|
||||
|
||||
// Auto-scroll effect when messages change
|
||||
useEffect(() => {
|
||||
// Only auto-scroll if user was already at bottom
|
||||
if (isUserAtBottom && messages.length > 0) {
|
||||
// Use a small delay to ensure DOM is updated
|
||||
setTimeout(() => {
|
||||
scrollToBottom("smooth");
|
||||
}, 100);
|
||||
}
|
||||
}, [messages, isUserAtBottom, scrollToBottom]);
|
||||
|
||||
// Initial scroll to bottom when session changes
|
||||
useEffect(() => {
|
||||
if (currentSessionId && messages.length > 0) {
|
||||
// Scroll immediately without animation when switching sessions
|
||||
setTimeout(() => {
|
||||
scrollToBottom("auto");
|
||||
setIsUserAtBottom(true);
|
||||
}, 100);
|
||||
}
|
||||
}, [currentSessionId, scrollToBottom]);
|
||||
|
||||
// Auto-focus input when session is selected/changed
|
||||
useEffect(() => {
|
||||
if (currentSessionId && inputRef.current) {
|
||||
// Small delay to ensure UI has updated
|
||||
setTimeout(() => {
|
||||
inputRef.current?.focus();
|
||||
}, 200);
|
||||
}
|
||||
}, [currentSessionId]);
|
||||
|
||||
// Keyboard shortcuts for agent view
|
||||
const agentShortcuts: KeyboardShortcut[] = useMemo(() => {
|
||||
const shortcutsList: KeyboardShortcut[] = [];
|
||||
|
||||
// New session shortcut - only when in agent view with a project
|
||||
if (currentProject) {
|
||||
shortcutsList.push({
|
||||
key: shortcuts.newSession,
|
||||
action: () => {
|
||||
if (quickCreateSessionRef.current) {
|
||||
quickCreateSessionRef.current();
|
||||
}
|
||||
},
|
||||
description: "Create new session",
|
||||
});
|
||||
}
|
||||
|
||||
return shortcutsList;
|
||||
}, [currentProject, shortcuts]);
|
||||
|
||||
// Register keyboard shortcuts
|
||||
useKeyboardShortcuts(agentShortcuts);
|
||||
|
||||
if (!currentProject) {
|
||||
return (
|
||||
<div
|
||||
className="flex-1 flex items-center justify-center"
|
||||
data-testid="agent-view-no-project"
|
||||
>
|
||||
<div className="text-center">
|
||||
<Sparkles className="w-12 h-12 text-muted-foreground mx-auto mb-4" />
|
||||
<h2 className="text-xl font-semibold mb-2">No Project Selected</h2>
|
||||
<p className="text-muted-foreground">
|
||||
Open or create a project to start working with the AI agent.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// Show welcome message if no messages yet
|
||||
const displayMessages =
|
||||
messages.length === 0
|
||||
? [
|
||||
{
|
||||
id: "welcome",
|
||||
role: "assistant" as const,
|
||||
content:
|
||||
"Hello! I'm the Automaker Agent. I can help you build software autonomously. I can read and modify files in this project, run commands, and execute tests. What would you like to create today?",
|
||||
timestamp: new Date().toISOString(),
|
||||
},
|
||||
]
|
||||
: messages;
|
||||
|
||||
return (
|
||||
<div
|
||||
className="flex-1 flex overflow-hidden content-bg"
|
||||
data-testid="agent-view"
|
||||
>
|
||||
{/* Session Manager Sidebar */}
|
||||
{showSessionManager && currentProject && (
|
||||
<div className="w-80 border-r flex-shrink-0">
|
||||
<SessionManager
|
||||
currentSessionId={currentSessionId}
|
||||
onSelectSession={handleSelectSession}
|
||||
projectPath={currentProject.path}
|
||||
isCurrentSessionThinking={isProcessing}
|
||||
onQuickCreateRef={quickCreateSessionRef}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Chat Area */}
|
||||
<div className="flex-1 flex flex-col overflow-hidden">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between p-4 border-b border-border bg-glass backdrop-blur-md">
|
||||
<div className="flex items-center gap-3">
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
onClick={() => setShowSessionManager(!showSessionManager)}
|
||||
className="h-8 w-8 p-0"
|
||||
>
|
||||
{showSessionManager ? (
|
||||
<PanelLeftClose className="w-4 h-4" />
|
||||
) : (
|
||||
<PanelLeft className="w-4 h-4" />
|
||||
)}
|
||||
</Button>
|
||||
<Bot className="w-5 h-5 text-primary" />
|
||||
<div>
|
||||
<h1 className="text-xl font-bold">AI Agent</h1>
|
||||
<p className="text-sm text-muted-foreground">
|
||||
{currentProject.name}
|
||||
{currentSessionId && !isConnected && " · Connecting..."}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Status indicators & actions */}
|
||||
<div className="flex items-center gap-2">
|
||||
{currentTool && (
|
||||
<div className="flex items-center gap-1 text-xs text-muted-foreground bg-muted px-2 py-1 rounded">
|
||||
<Wrench className="w-3 h-3" />
|
||||
<span>{currentTool}</span>
|
||||
</div>
|
||||
)}
|
||||
{agentError && (
|
||||
<span className="text-xs text-destructive">{agentError}</span>
|
||||
)}
|
||||
{currentSessionId && messages.length > 0 && (
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
onClick={handleClearChat}
|
||||
disabled={isProcessing}
|
||||
>
|
||||
<Trash2 className="w-4 h-4 mr-1" />
|
||||
Clear
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Messages */}
|
||||
{!currentSessionId ? (
|
||||
<div
|
||||
className="flex-1 flex items-center justify-center"
|
||||
data-testid="no-session-placeholder"
|
||||
>
|
||||
<div className="text-center">
|
||||
<Bot className="w-12 h-12 text-muted-foreground mx-auto mb-4 opacity-50" />
|
||||
<h2 className="text-lg font-semibold mb-2">
|
||||
No Session Selected
|
||||
</h2>
|
||||
<p className="text-sm text-muted-foreground mb-4">
|
||||
Create or select a session to start chatting
|
||||
</p>
|
||||
<Button
|
||||
onClick={() => setShowSessionManager(true)}
|
||||
variant="outline"
|
||||
>
|
||||
<PanelLeft className="w-4 h-4 mr-2" />
|
||||
{showSessionManager ? "View" : "Show"} Sessions
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<div
|
||||
ref={messagesContainerRef}
|
||||
className="flex-1 overflow-y-auto p-4 space-y-4"
|
||||
data-testid="message-list"
|
||||
onScroll={handleScroll}
|
||||
>
|
||||
{displayMessages.map((message) => (
|
||||
<div
|
||||
key={message.id}
|
||||
className={cn(
|
||||
"flex gap-3",
|
||||
message.role === "user" && "flex-row-reverse"
|
||||
)}
|
||||
>
|
||||
<div
|
||||
className={cn(
|
||||
"w-8 h-8 rounded-full flex items-center justify-center shrink-0",
|
||||
message.role === "assistant" ? "bg-primary/10" : "bg-muted"
|
||||
)}
|
||||
>
|
||||
{message.role === "assistant" ? (
|
||||
<Bot className="w-4 h-4 text-primary" />
|
||||
) : (
|
||||
<User className="w-4 h-4" />
|
||||
)}
|
||||
</div>
|
||||
<Card
|
||||
className={cn(
|
||||
"max-w-[80%]",
|
||||
message.role === "user"
|
||||
? "bg-transparent border border-primary text-foreground"
|
||||
: "border-l-4 border-primary bg-card"
|
||||
)}
|
||||
>
|
||||
<CardContent className="px-3 py-2">
|
||||
{message.role === "assistant" ? (
|
||||
<Markdown className="text-sm text-primary prose-headings:text-primary prose-strong:text-primary prose-code:text-primary">
|
||||
{message.content}
|
||||
</Markdown>
|
||||
) : (
|
||||
<p className="text-sm whitespace-pre-wrap">
|
||||
{message.content}
|
||||
</p>
|
||||
)}
|
||||
<p
|
||||
className={cn(
|
||||
"text-xs mt-1",
|
||||
message.role === "user"
|
||||
? "text-muted-foreground"
|
||||
: "text-primary/70"
|
||||
)}
|
||||
>
|
||||
{new Date(message.timestamp).toLocaleTimeString()}
|
||||
</p>
|
||||
</CardContent>
|
||||
</Card>
|
||||
</div>
|
||||
))}
|
||||
|
||||
{isProcessing && (
|
||||
<div className="flex gap-3">
|
||||
<div className="w-8 h-8 rounded-full bg-primary/10 flex items-center justify-center">
|
||||
<Bot className="w-4 h-4 text-primary" />
|
||||
</div>
|
||||
<Card className="border-l-4 border-primary bg-card">
|
||||
<CardContent className="p-3">
|
||||
<div className="flex items-center gap-2">
|
||||
<Loader2 className="w-4 h-4 animate-spin text-primary" />
|
||||
<span className="text-sm text-primary">
|
||||
Thinking...
|
||||
</span>
|
||||
</div>
|
||||
</CardContent>
|
||||
</Card>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Input */}
|
||||
{currentSessionId && (
|
||||
<div className="border-t border-border p-4 space-y-3 bg-background">
|
||||
{/* Image Drop Zone (when visible) */}
|
||||
{showImageDropZone && (
|
||||
<ImageDropZone
|
||||
onImagesSelected={handleImagesSelected}
|
||||
images={selectedImages}
|
||||
maxFiles={5}
|
||||
className="mb-3"
|
||||
disabled={isProcessing || !isConnected}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Text Input and Controls - with drag and drop support */}
|
||||
<div
|
||||
className={cn(
|
||||
"flex gap-2 transition-all duration-200 rounded-lg",
|
||||
isDragOver &&
|
||||
"bg-primary/10 ring-2 ring-primary ring-offset-2 ring-offset-background"
|
||||
)}
|
||||
onDragEnter={handleDragEnter}
|
||||
onDragLeave={handleDragLeave}
|
||||
onDragOver={handleDragOver}
|
||||
onDrop={handleDrop}
|
||||
>
|
||||
<div className="flex-1 relative">
|
||||
<Input
|
||||
ref={inputRef}
|
||||
placeholder={
|
||||
isDragOver
|
||||
? "Drop your images here..."
|
||||
: "Describe what you want to build..."
|
||||
}
|
||||
value={input}
|
||||
onChange={(e) => setInput(e.target.value)}
|
||||
onKeyPress={handleKeyPress}
|
||||
onPaste={handlePaste}
|
||||
disabled={isProcessing || !isConnected}
|
||||
data-testid="agent-input"
|
||||
className={cn(
|
||||
"bg-input border-border",
|
||||
selectedImages.length > 0 &&
|
||||
"border-primary/50 bg-primary/5",
|
||||
isDragOver &&
|
||||
"border-primary bg-primary/10"
|
||||
)}
|
||||
/>
|
||||
{selectedImages.length > 0 && !isDragOver && (
|
||||
<div className="absolute right-2 top-1/2 transform -translate-y-1/2 text-xs text-primary-foreground bg-primary px-2 py-1 rounded">
|
||||
{selectedImages.length} image
|
||||
{selectedImages.length > 1 ? "s" : ""}
|
||||
</div>
|
||||
)}
|
||||
{isDragOver && (
|
||||
<div className="absolute right-2 top-1/2 transform -translate-y-1/2 text-xs text-primary-foreground bg-primary px-2 py-1 rounded flex items-center gap-1">
|
||||
<Paperclip className="w-3 h-3" />
|
||||
Drop here
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Image Attachment Button */}
|
||||
<Button
|
||||
variant="outline"
|
||||
size="default"
|
||||
onClick={toggleImageDropZone}
|
||||
disabled={isProcessing || !isConnected}
|
||||
className={cn(
|
||||
showImageDropZone &&
|
||||
"bg-primary/20 text-primary border-primary",
|
||||
selectedImages.length > 0 && "border-primary"
|
||||
)}
|
||||
title="Attach images"
|
||||
>
|
||||
<Paperclip className="w-4 h-4" />
|
||||
</Button>
|
||||
|
||||
{/* Send Button */}
|
||||
<Button
|
||||
onClick={handleSend}
|
||||
disabled={
|
||||
(!input.trim() && selectedImages.length === 0) ||
|
||||
isProcessing ||
|
||||
!isConnected
|
||||
}
|
||||
data-testid="send-message"
|
||||
>
|
||||
<Send className="w-4 h-4" />
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{/* Selected Images Preview */}
|
||||
{selectedImages.length > 0 && (
|
||||
<div className="space-y-2">
|
||||
<div className="flex items-center justify-between">
|
||||
<p className="text-xs font-medium text-foreground">
|
||||
{selectedImages.length} image
|
||||
{selectedImages.length > 1 ? "s" : ""} attached
|
||||
</p>
|
||||
<button
|
||||
onClick={() => setSelectedImages([])}
|
||||
className="text-xs text-muted-foreground hover:text-foreground"
|
||||
disabled={isProcessing}
|
||||
>
|
||||
Clear all
|
||||
</button>
|
||||
</div>
|
||||
<div className="flex flex-wrap gap-2">
|
||||
{selectedImages.map((image) => (
|
||||
<div
|
||||
key={image.id}
|
||||
className="relative group rounded-md border border-muted bg-muted/50 p-2 flex items-center space-x-2"
|
||||
>
|
||||
{/* Image thumbnail */}
|
||||
<div className="w-8 h-8 rounded overflow-hidden bg-muted flex-shrink-0">
|
||||
<img
|
||||
src={image.data}
|
||||
alt={image.filename}
|
||||
className="w-full h-full object-cover"
|
||||
/>
|
||||
</div>
|
||||
{/* Image info */}
|
||||
<div className="min-w-0 flex-1">
|
||||
<p className="text-xs font-medium text-foreground truncate">
|
||||
{image.filename}
|
||||
</p>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
{formatFileSize(image.size)}
|
||||
</p>
|
||||
</div>
|
||||
{/* Remove button */}
|
||||
<button
|
||||
onClick={() => removeImage(image.id)}
|
||||
className="opacity-0 group-hover:opacity-100 transition-opacity p-1 rounded-full hover:bg-destructive hover:text-destructive-foreground text-muted-foreground"
|
||||
disabled={isProcessing}
|
||||
>
|
||||
<X className="h-3 w-3" />
|
||||
</button>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// Helper function to format file size
|
||||
function formatFileSize(bytes: number): string {
|
||||
if (bytes === 0) return "0 B";
|
||||
const k = 1024;
|
||||
const sizes = ["B", "KB", "MB", "GB"];
|
||||
const i = Math.floor(Math.log(bytes) / Math.log(k));
|
||||
return parseFloat((bytes / Math.pow(k, i)).toFixed(1)) + " " + sizes[i];
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,699 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useEffect, useState, useCallback, useMemo } from "react";
|
||||
import { useAppStore } from "@/store/app-store";
|
||||
import { getElectronAPI } from "@/lib/electron";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { HotkeyButton } from "@/components/ui/hotkey-button";
|
||||
import { Card } from "@/components/ui/card";
|
||||
import {
|
||||
Plus,
|
||||
RefreshCw,
|
||||
FileText,
|
||||
Image as ImageIcon,
|
||||
Trash2,
|
||||
Save,
|
||||
Upload,
|
||||
File,
|
||||
X,
|
||||
BookOpen,
|
||||
} from "lucide-react";
|
||||
import {
|
||||
useKeyboardShortcuts,
|
||||
useKeyboardShortcutsConfig,
|
||||
KeyboardShortcut,
|
||||
} from "@/hooks/use-keyboard-shortcuts";
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
DialogDescription,
|
||||
DialogFooter,
|
||||
} from "@/components/ui/dialog";
|
||||
import { Input } from "@/components/ui/input";
|
||||
import { Label } from "@/components/ui/label";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
interface ContextFile {
|
||||
name: string;
|
||||
type: "text" | "image";
|
||||
content?: string;
|
||||
path: string;
|
||||
}
|
||||
|
||||
export function ContextView() {
|
||||
const { currentProject } = useAppStore();
|
||||
const shortcuts = useKeyboardShortcutsConfig();
|
||||
const [contextFiles, setContextFiles] = useState<ContextFile[]>([]);
|
||||
const [selectedFile, setSelectedFile] = useState<ContextFile | null>(null);
|
||||
const [isLoading, setIsLoading] = useState(true);
|
||||
const [isSaving, setIsSaving] = useState(false);
|
||||
const [hasChanges, setHasChanges] = useState(false);
|
||||
const [editedContent, setEditedContent] = useState("");
|
||||
const [isAddDialogOpen, setIsAddDialogOpen] = useState(false);
|
||||
const [isDeleteDialogOpen, setIsDeleteDialogOpen] = useState(false);
|
||||
const [newFileName, setNewFileName] = useState("");
|
||||
const [newFileType, setNewFileType] = useState<"text" | "image">("text");
|
||||
const [uploadedImageData, setUploadedImageData] = useState<string | null>(
|
||||
null
|
||||
);
|
||||
const [newFileContent, setNewFileContent] = useState("");
|
||||
const [isDropHovering, setIsDropHovering] = useState(false);
|
||||
|
||||
// Keyboard shortcuts for this view
|
||||
const contextShortcuts: KeyboardShortcut[] = useMemo(
|
||||
() => [
|
||||
{
|
||||
key: shortcuts.addContextFile,
|
||||
action: () => setIsAddDialogOpen(true),
|
||||
description: "Add new context file",
|
||||
},
|
||||
],
|
||||
[shortcuts]
|
||||
);
|
||||
useKeyboardShortcuts(contextShortcuts);
|
||||
|
||||
// Get context directory path for user-added context files
|
||||
const getContextPath = useCallback(() => {
|
||||
if (!currentProject) return null;
|
||||
return `${currentProject.path}/.automaker/context`;
|
||||
}, [currentProject]);
|
||||
|
||||
// Determine if a file is an image based on extension
|
||||
const isImageFile = (filename: string): boolean => {
|
||||
const imageExtensions = [
|
||||
".png",
|
||||
".jpg",
|
||||
".jpeg",
|
||||
".gif",
|
||||
".webp",
|
||||
".svg",
|
||||
".bmp",
|
||||
];
|
||||
const ext = filename.toLowerCase().substring(filename.lastIndexOf("."));
|
||||
return imageExtensions.includes(ext);
|
||||
};
|
||||
|
||||
// Load context files
|
||||
const loadContextFiles = useCallback(async () => {
|
||||
const contextPath = getContextPath();
|
||||
if (!contextPath) return;
|
||||
|
||||
setIsLoading(true);
|
||||
try {
|
||||
const api = getElectronAPI();
|
||||
|
||||
// Ensure context directory exists
|
||||
await api.mkdir(contextPath);
|
||||
|
||||
// Read directory contents
|
||||
const result = await api.readdir(contextPath);
|
||||
if (result.success && result.entries) {
|
||||
const files: ContextFile[] = result.entries
|
||||
.filter((entry) => entry.isFile)
|
||||
.map((entry) => ({
|
||||
name: entry.name,
|
||||
type: isImageFile(entry.name) ? "image" : "text",
|
||||
path: `${contextPath}/${entry.name}`,
|
||||
}));
|
||||
setContextFiles(files);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Failed to load context files:", error);
|
||||
} finally {
|
||||
setIsLoading(false);
|
||||
}
|
||||
}, [getContextPath]);
|
||||
|
||||
useEffect(() => {
|
||||
loadContextFiles();
|
||||
}, [loadContextFiles]);
|
||||
|
||||
// Load selected file content
|
||||
const loadFileContent = useCallback(async (file: ContextFile) => {
|
||||
try {
|
||||
const api = getElectronAPI();
|
||||
const result = await api.readFile(file.path);
|
||||
if (result.success && result.content !== undefined) {
|
||||
setEditedContent(result.content);
|
||||
setSelectedFile({ ...file, content: result.content });
|
||||
setHasChanges(false);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Failed to load file content:", error);
|
||||
}
|
||||
}, []);
|
||||
|
||||
// Select a file
|
||||
const handleSelectFile = (file: ContextFile) => {
|
||||
if (hasChanges) {
|
||||
// Could add a confirmation dialog here
|
||||
}
|
||||
loadFileContent(file);
|
||||
};
|
||||
|
||||
// Save current file
|
||||
const saveFile = async () => {
|
||||
if (!selectedFile) return;
|
||||
|
||||
setIsSaving(true);
|
||||
try {
|
||||
const api = getElectronAPI();
|
||||
await api.writeFile(selectedFile.path, editedContent);
|
||||
setSelectedFile({ ...selectedFile, content: editedContent });
|
||||
setHasChanges(false);
|
||||
} catch (error) {
|
||||
console.error("Failed to save file:", error);
|
||||
} finally {
|
||||
setIsSaving(false);
|
||||
}
|
||||
};
|
||||
|
||||
// Handle content change
|
||||
const handleContentChange = (value: string) => {
|
||||
setEditedContent(value);
|
||||
setHasChanges(true);
|
||||
};
|
||||
|
||||
// Add new context file
|
||||
const handleAddFile = async () => {
|
||||
const contextPath = getContextPath();
|
||||
if (!contextPath || !newFileName.trim()) return;
|
||||
|
||||
try {
|
||||
const api = getElectronAPI();
|
||||
let filename = newFileName.trim();
|
||||
|
||||
// Add default extension if not provided
|
||||
if (newFileType === "text" && !filename.includes(".")) {
|
||||
filename += ".md";
|
||||
}
|
||||
|
||||
const filePath = `${contextPath}/${filename}`;
|
||||
|
||||
if (newFileType === "image" && uploadedImageData) {
|
||||
// Write image data
|
||||
await api.writeFile(filePath, uploadedImageData);
|
||||
} else {
|
||||
// Write text file with content (or empty if no content)
|
||||
await api.writeFile(filePath, newFileContent);
|
||||
}
|
||||
|
||||
setIsAddDialogOpen(false);
|
||||
setNewFileName("");
|
||||
setNewFileType("text");
|
||||
setUploadedImageData(null);
|
||||
setNewFileContent("");
|
||||
setIsDropHovering(false);
|
||||
await loadContextFiles();
|
||||
} catch (error) {
|
||||
console.error("Failed to add file:", error);
|
||||
}
|
||||
};
|
||||
|
||||
// Delete selected file
|
||||
const handleDeleteFile = async () => {
|
||||
if (!selectedFile) return;
|
||||
|
||||
try {
|
||||
const api = getElectronAPI();
|
||||
await api.deleteFile(selectedFile.path);
|
||||
|
||||
setIsDeleteDialogOpen(false);
|
||||
setSelectedFile(null);
|
||||
setEditedContent("");
|
||||
setHasChanges(false);
|
||||
await loadContextFiles();
|
||||
} catch (error) {
|
||||
console.error("Failed to delete file:", error);
|
||||
}
|
||||
};
|
||||
|
||||
// Handle image upload
|
||||
const handleImageUpload = async (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const file = e.target.files?.[0];
|
||||
if (!file) return;
|
||||
|
||||
const reader = new FileReader();
|
||||
reader.onload = (event) => {
|
||||
const base64 = event.target?.result as string;
|
||||
setUploadedImageData(base64);
|
||||
if (!newFileName) {
|
||||
setNewFileName(file.name);
|
||||
}
|
||||
};
|
||||
reader.readAsDataURL(file);
|
||||
};
|
||||
|
||||
// Handle drag and drop for file upload
|
||||
const handleDrop = async (e: React.DragEvent<HTMLDivElement>) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
|
||||
const files = Array.from(e.dataTransfer.files);
|
||||
if (files.length === 0) return;
|
||||
|
||||
const contextPath = getContextPath();
|
||||
if (!contextPath) return;
|
||||
|
||||
const api = getElectronAPI();
|
||||
|
||||
for (const file of files) {
|
||||
const reader = new FileReader();
|
||||
reader.onload = async (event) => {
|
||||
const content = event.target?.result as string;
|
||||
const filePath = `${contextPath}/${file.name}`;
|
||||
await api.writeFile(filePath, content);
|
||||
await loadContextFiles();
|
||||
};
|
||||
|
||||
if (isImageFile(file.name)) {
|
||||
reader.readAsDataURL(file);
|
||||
} else {
|
||||
reader.readAsText(file);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const handleDragOver = (e: React.DragEvent<HTMLDivElement>) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
};
|
||||
|
||||
// Handle drag and drop for .txt and .md files in the add context dialog textarea
|
||||
const handleTextAreaDrop = async (
|
||||
e: React.DragEvent<HTMLTextAreaElement>
|
||||
) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
setIsDropHovering(false);
|
||||
|
||||
const files = Array.from(e.dataTransfer.files);
|
||||
if (files.length === 0) return;
|
||||
|
||||
const file = files[0]; // Only handle the first file
|
||||
const fileName = file.name.toLowerCase();
|
||||
|
||||
// Only accept .txt and .md files
|
||||
if (!fileName.endsWith(".txt") && !fileName.endsWith(".md")) {
|
||||
console.warn("Only .txt and .md files are supported for drag and drop");
|
||||
return;
|
||||
}
|
||||
|
||||
const reader = new FileReader();
|
||||
reader.onload = (event) => {
|
||||
const content = event.target?.result as string;
|
||||
setNewFileContent(content);
|
||||
|
||||
// Auto-fill filename if empty
|
||||
if (!newFileName) {
|
||||
setNewFileName(file.name);
|
||||
}
|
||||
};
|
||||
reader.readAsText(file);
|
||||
};
|
||||
|
||||
const handleTextAreaDragOver = (e: React.DragEvent<HTMLTextAreaElement>) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
setIsDropHovering(true);
|
||||
};
|
||||
|
||||
const handleTextAreaDragLeave = (e: React.DragEvent<HTMLTextAreaElement>) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
setIsDropHovering(false);
|
||||
};
|
||||
|
||||
if (!currentProject) {
|
||||
return (
|
||||
<div
|
||||
className="flex-1 flex items-center justify-center"
|
||||
data-testid="context-view-no-project"
|
||||
>
|
||||
<p className="text-muted-foreground">No project selected</p>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
if (isLoading) {
|
||||
return (
|
||||
<div
|
||||
className="flex-1 flex items-center justify-center"
|
||||
data-testid="context-view-loading"
|
||||
>
|
||||
<RefreshCw className="w-6 h-6 animate-spin text-muted-foreground" />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
className="flex-1 flex flex-col overflow-hidden content-bg"
|
||||
data-testid="context-view"
|
||||
>
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between p-4 border-b border-border bg-glass backdrop-blur-md">
|
||||
<div className="flex items-center gap-3">
|
||||
<BookOpen className="w-5 h-5 text-muted-foreground" />
|
||||
<div>
|
||||
<h1 className="text-xl font-bold">Context Files</h1>
|
||||
<p className="text-sm text-muted-foreground">
|
||||
Add context files to include in AI prompts
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex gap-2">
|
||||
<HotkeyButton
|
||||
size="sm"
|
||||
onClick={() => setIsAddDialogOpen(true)}
|
||||
hotkey={shortcuts.addContextFile}
|
||||
hotkeyActive={false}
|
||||
data-testid="add-context-file"
|
||||
>
|
||||
<Plus className="w-4 h-4 mr-2" />
|
||||
Add File
|
||||
</HotkeyButton>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Main content area with file list and editor */}
|
||||
<div
|
||||
className="flex-1 flex overflow-hidden"
|
||||
onDrop={handleDrop}
|
||||
onDragOver={handleDragOver}
|
||||
>
|
||||
{/* Left Panel - File List */}
|
||||
<div className="w-64 border-r border-border flex flex-col overflow-hidden">
|
||||
<div className="p-3 border-b border-border">
|
||||
<h2 className="text-sm font-semibold text-muted-foreground">
|
||||
Context Files ({contextFiles.length})
|
||||
</h2>
|
||||
</div>
|
||||
<div
|
||||
className="flex-1 overflow-y-auto p-2"
|
||||
data-testid="context-file-list"
|
||||
>
|
||||
{contextFiles.length === 0 ? (
|
||||
<div className="flex flex-col items-center justify-center h-full text-center p-4">
|
||||
<Upload className="w-8 h-8 text-muted-foreground mb-2" />
|
||||
<p className="text-sm text-muted-foreground">
|
||||
No context files yet.
|
||||
<br />
|
||||
Drop files here or click Add File.
|
||||
</p>
|
||||
</div>
|
||||
) : (
|
||||
<div className="space-y-1">
|
||||
{contextFiles.map((file) => (
|
||||
<button
|
||||
key={file.path}
|
||||
onClick={() => handleSelectFile(file)}
|
||||
className={cn(
|
||||
"w-full flex items-center gap-2 px-3 py-2 rounded-lg text-left transition-colors",
|
||||
selectedFile?.path === file.path
|
||||
? "bg-primary/20 text-foreground border border-primary/30"
|
||||
: "text-muted-foreground hover:bg-accent hover:text-foreground"
|
||||
)}
|
||||
data-testid={`context-file-${file.name}`}
|
||||
>
|
||||
{file.type === "image" ? (
|
||||
<ImageIcon className="w-4 h-4 flex-shrink-0" />
|
||||
) : (
|
||||
<FileText className="w-4 h-4 flex-shrink-0" />
|
||||
)}
|
||||
<span className="truncate text-sm">{file.name}</span>
|
||||
</button>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Right Panel - Editor/Preview */}
|
||||
<div className="flex-1 flex flex-col overflow-hidden">
|
||||
{selectedFile ? (
|
||||
<>
|
||||
{/* File toolbar */}
|
||||
<div className="flex items-center justify-between p-3 border-b border-border bg-card">
|
||||
<div className="flex items-center gap-2">
|
||||
{selectedFile.type === "image" ? (
|
||||
<ImageIcon className="w-4 h-4 text-muted-foreground" />
|
||||
) : (
|
||||
<FileText className="w-4 h-4 text-muted-foreground" />
|
||||
)}
|
||||
<span className="text-sm font-medium">
|
||||
{selectedFile.name}
|
||||
</span>
|
||||
</div>
|
||||
<div className="flex gap-2">
|
||||
{selectedFile.type === "text" && (
|
||||
<Button
|
||||
size="sm"
|
||||
onClick={saveFile}
|
||||
disabled={!hasChanges || isSaving}
|
||||
data-testid="save-context-file"
|
||||
>
|
||||
<Save className="w-4 h-4 mr-2" />
|
||||
{isSaving ? "Saving..." : hasChanges ? "Save" : "Saved"}
|
||||
</Button>
|
||||
)}
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={() => setIsDeleteDialogOpen(true)}
|
||||
className="text-red-500 hover:text-red-400 hover:border-red-500/50"
|
||||
data-testid="delete-context-file"
|
||||
>
|
||||
<Trash2 className="w-4 h-4" />
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Content area */}
|
||||
<div className="flex-1 overflow-hidden p-4">
|
||||
{selectedFile.type === "image" ? (
|
||||
<div
|
||||
className="h-full flex items-center justify-center bg-card rounded-lg"
|
||||
data-testid="image-preview"
|
||||
>
|
||||
<img
|
||||
src={editedContent}
|
||||
alt={selectedFile.name}
|
||||
className="max-w-full max-h-full object-contain"
|
||||
/>
|
||||
</div>
|
||||
) : (
|
||||
<Card className="h-full overflow-hidden">
|
||||
<textarea
|
||||
className="w-full h-full p-4 font-mono text-sm bg-transparent resize-none focus:outline-none"
|
||||
value={editedContent}
|
||||
onChange={(e) => handleContentChange(e.target.value)}
|
||||
placeholder="Enter context content here..."
|
||||
spellCheck={false}
|
||||
data-testid="context-editor"
|
||||
/>
|
||||
</Card>
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
) : (
|
||||
<div className="flex-1 flex items-center justify-center">
|
||||
<div className="text-center">
|
||||
<File className="w-12 h-12 text-muted-foreground mx-auto mb-3" />
|
||||
<p className="text-foreground-secondary">
|
||||
Select a file to view or edit
|
||||
</p>
|
||||
<p className="text-muted-foreground text-sm mt-1">
|
||||
Or drop files here to add them
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Add File Dialog */}
|
||||
<Dialog open={isAddDialogOpen} onOpenChange={setIsAddDialogOpen}>
|
||||
<DialogContent
|
||||
data-testid="add-context-dialog"
|
||||
className="w-[60vw] max-w-[60vw] max-h-[80vh] flex flex-col"
|
||||
>
|
||||
<DialogHeader>
|
||||
<DialogTitle>Add Context File</DialogTitle>
|
||||
<DialogDescription>
|
||||
Add a new text or image file to the context.
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
<div className="space-y-4 py-4">
|
||||
<div className="flex gap-2">
|
||||
<Button
|
||||
variant={newFileType === "text" ? "default" : "outline"}
|
||||
size="sm"
|
||||
onClick={() => setNewFileType("text")}
|
||||
data-testid="add-text-type"
|
||||
>
|
||||
<FileText className="w-4 h-4 mr-2" />
|
||||
Text
|
||||
</Button>
|
||||
<Button
|
||||
variant={newFileType === "image" ? "default" : "outline"}
|
||||
size="sm"
|
||||
onClick={() => setNewFileType("image")}
|
||||
data-testid="add-image-type"
|
||||
>
|
||||
<ImageIcon className="w-4 h-4 mr-2" />
|
||||
Image
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="filename">File Name</Label>
|
||||
<Input
|
||||
id="filename"
|
||||
value={newFileName}
|
||||
onChange={(e) => setNewFileName(e.target.value)}
|
||||
placeholder={
|
||||
newFileType === "text" ? "context.md" : "image.png"
|
||||
}
|
||||
data-testid="new-file-name"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{newFileType === "text" && (
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="context-content">Context Content</Label>
|
||||
<div
|
||||
className={cn(
|
||||
"relative rounded-lg transition-colors",
|
||||
isDropHovering && "ring-2 ring-primary"
|
||||
)}
|
||||
>
|
||||
<textarea
|
||||
id="context-content"
|
||||
value={newFileContent}
|
||||
onChange={(e) => setNewFileContent(e.target.value)}
|
||||
onDrop={handleTextAreaDrop}
|
||||
onDragOver={handleTextAreaDragOver}
|
||||
onDragLeave={handleTextAreaDragLeave}
|
||||
placeholder="Enter context content here or drag & drop a .txt or .md file..."
|
||||
className={cn(
|
||||
"w-full h-40 p-3 font-mono text-sm bg-background border border-border rounded-lg resize-none focus:outline-none focus:ring-2 focus:ring-ring focus:border-transparent",
|
||||
isDropHovering && "border-primary bg-primary/10"
|
||||
)}
|
||||
spellCheck={false}
|
||||
data-testid="new-file-content"
|
||||
/>
|
||||
{isDropHovering && (
|
||||
<div className="absolute inset-0 flex items-center justify-center bg-primary/20 rounded-lg pointer-events-none">
|
||||
<div className="flex flex-col items-center text-primary">
|
||||
<Upload className="w-8 h-8 mb-2" />
|
||||
<span className="text-sm font-medium">
|
||||
Drop .txt or .md file here
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Drag & drop .txt or .md files to import their content
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{newFileType === "image" && (
|
||||
<div className="space-y-2">
|
||||
<Label>Upload Image</Label>
|
||||
<div className="border-2 border-dashed border-border rounded-lg p-4 text-center">
|
||||
<input
|
||||
type="file"
|
||||
accept="image/*"
|
||||
onChange={handleImageUpload}
|
||||
className="hidden"
|
||||
id="image-upload"
|
||||
data-testid="image-upload-input"
|
||||
/>
|
||||
<label
|
||||
htmlFor="image-upload"
|
||||
className="cursor-pointer flex flex-col items-center"
|
||||
>
|
||||
{uploadedImageData ? (
|
||||
<img
|
||||
src={uploadedImageData}
|
||||
alt="Preview"
|
||||
className="max-w-32 max-h-32 object-contain mb-2"
|
||||
/>
|
||||
) : (
|
||||
<Upload className="w-8 h-8 text-muted-foreground mb-2" />
|
||||
)}
|
||||
<span className="text-sm text-muted-foreground">
|
||||
{uploadedImageData
|
||||
? "Click to change"
|
||||
: "Click to upload"}
|
||||
</span>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
<DialogFooter>
|
||||
<Button
|
||||
variant="outline"
|
||||
onClick={() => {
|
||||
setIsAddDialogOpen(false);
|
||||
setNewFileName("");
|
||||
setUploadedImageData(null);
|
||||
setNewFileContent("");
|
||||
setIsDropHovering(false);
|
||||
}}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<HotkeyButton
|
||||
onClick={handleAddFile}
|
||||
disabled={
|
||||
!newFileName.trim() ||
|
||||
(newFileType === "image" && !uploadedImageData)
|
||||
}
|
||||
hotkey={{ key: "Enter", cmdCtrl: true }}
|
||||
hotkeyActive={isAddDialogOpen}
|
||||
data-testid="confirm-add-file"
|
||||
>
|
||||
Add File
|
||||
</HotkeyButton>
|
||||
</DialogFooter>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
|
||||
{/* Delete Confirmation Dialog */}
|
||||
<Dialog open={isDeleteDialogOpen} onOpenChange={setIsDeleteDialogOpen}>
|
||||
<DialogContent data-testid="delete-context-dialog">
|
||||
<DialogHeader>
|
||||
<DialogTitle>Delete Context File</DialogTitle>
|
||||
<DialogDescription>
|
||||
Are you sure you want to delete "{selectedFile?.name}"? This
|
||||
action cannot be undone.
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
<DialogFooter>
|
||||
<Button
|
||||
variant="outline"
|
||||
onClick={() => setIsDeleteDialogOpen(false)}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
variant="destructive"
|
||||
onClick={handleDeleteFile}
|
||||
className="bg-red-600 hover:bg-red-700"
|
||||
data-testid="confirm-delete-file"
|
||||
>
|
||||
Delete
|
||||
</Button>
|
||||
</DialogFooter>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user