mirror of
https://github.com/czlonkowski/n8n-mcp.git
synced 2026-01-30 14:32:04 +00:00
Compare commits
744 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
28667736cd | ||
|
|
211ae72f96 | ||
|
|
ce2c94c1a5 | ||
|
|
861005eeed | ||
|
|
7b0ff990ec | ||
|
|
25cb8bb455 | ||
|
|
2713db6d10 | ||
|
|
f10772a9d2 | ||
|
|
808088f25e | ||
|
|
20663dad0d | ||
|
|
705d31c35e | ||
|
|
d60182eeb8 | ||
|
|
a40f6a5077 | ||
|
|
fa216e4d13 | ||
|
|
562f4b0c4e | ||
|
|
0f13e7aeee | ||
|
|
551445bcd5 | ||
|
|
c6f3733fbd | ||
|
|
6e85c68d62 | ||
|
|
fa7d0b420e | ||
|
|
47510ef6da | ||
|
|
b92e511463 | ||
|
|
130dd44ea5 | ||
|
|
560e0c5b86 | ||
|
|
cdaa29e7a2 | ||
|
|
60479e0eb4 | ||
|
|
90407f845d | ||
|
|
b5759c4146 | ||
|
|
1524fd5a08 | ||
|
|
5057481e70 | ||
|
|
a70d96a373 | ||
|
|
934124fa7b | ||
|
|
c2dc7c9c51 | ||
|
|
527e9874ab | ||
|
|
ef9b6f6341 | ||
|
|
3188d209b7 | ||
|
|
33690c5650 | ||
|
|
ddf9556759 | ||
|
|
7d9b456887 | ||
|
|
2f5a857142 | ||
|
|
e7dd04b471 | ||
|
|
c7e7bda505 | ||
|
|
bac4936c6d | ||
|
|
25784142fe | ||
|
|
f770043d3d | ||
|
|
1be06c217f | ||
|
|
c974947c84 | ||
|
|
ff69e4ccca | ||
|
|
9ee4b9492f | ||
|
|
4df9558b3e | ||
|
|
05424f66af | ||
|
|
5d2c5df53e | ||
|
|
f5cf1e2934 | ||
|
|
9050967cd6 | ||
|
|
717d6f927f | ||
|
|
fc37907348 | ||
|
|
47d9f55dc5 | ||
|
|
5575630711 | ||
|
|
1bbfaabbc2 | ||
|
|
597bd290b6 | ||
|
|
99c5907b71 | ||
|
|
77151e013e | ||
|
|
14f3b9c12a | ||
|
|
eb362febd6 | ||
|
|
821ace310e | ||
|
|
53252adc68 | ||
|
|
2010d77ed8 | ||
|
|
caf9383ba1 | ||
|
|
8728a808ac | ||
|
|
60ab66d64d | ||
|
|
eee52a7f53 | ||
|
|
a66cb18cce | ||
|
|
0e0f0998af | ||
|
|
08a4be8370 | ||
|
|
3578f2cc31 | ||
|
|
4d3b8fbc91 | ||
|
|
5688384113 | ||
|
|
346fa3c8d2 | ||
|
|
3d5ceae43f | ||
|
|
1834d474a5 | ||
|
|
a4ef1efaf8 | ||
|
|
65f51ad8b5 | ||
|
|
af6efe9e88 | ||
|
|
3f427f9528 | ||
|
|
18b8747005 | ||
|
|
749f1c53eb | ||
|
|
892c4ed70a | ||
|
|
590dc087ac | ||
|
|
ee7229b4db | ||
|
|
b6683b8381 | ||
|
|
b2300429fd | ||
|
|
b87f638e52 | ||
|
|
1f94427d54 | ||
|
|
2eb459c80c | ||
|
|
79ef853e8c | ||
|
|
2682be33b8 | ||
|
|
9f291154f2 | ||
|
|
bfff497020 | ||
|
|
e522aec08c | ||
|
|
817bf7d211 | ||
|
|
9a3520adb7 | ||
|
|
ced7fafcbf | ||
|
|
ad4b521402 | ||
|
|
b18f6ec7a4 | ||
|
|
95ea6ca0bb | ||
|
|
a4c7e097e8 | ||
|
|
0778c55d85 | ||
|
|
913ff31164 | ||
|
|
952a97ef73 | ||
|
|
56114f041b | ||
|
|
c52a3dd253 | ||
|
|
bc156fce2a | ||
|
|
aaa6be6d74 | ||
|
|
3806efdbd8 | ||
|
|
0e26ea6a68 | ||
|
|
1bfbf05561 | ||
|
|
f23e09934d | ||
|
|
5ea00e12a2 | ||
|
|
04e7c53b59 | ||
|
|
c7f8614de1 | ||
|
|
5702a64a01 | ||
|
|
551fea841b | ||
|
|
eac4e67101 | ||
|
|
c76ffd9fb1 | ||
|
|
7300957d13 | ||
|
|
32a25e2706 | ||
|
|
ab6b554692 | ||
|
|
32264da107 | ||
|
|
ef1cf747a3 | ||
|
|
dbdc88d629 | ||
|
|
538618b1bc | ||
|
|
41830c88fe | ||
|
|
0d2d9bdd52 | ||
|
|
05f68b8ea1 | ||
|
|
5881304ed8 | ||
|
|
0f5b0d9463 | ||
|
|
4399899255 | ||
|
|
8d20c64f5c | ||
|
|
fe1309151a | ||
|
|
dd62040155 | ||
|
|
112b40119c | ||
|
|
318986f546 | ||
|
|
aa8a6a7069 | ||
|
|
e11a885b0d | ||
|
|
ee99cb7ba1 | ||
|
|
66cb66b31b | ||
|
|
b67d6ba353 | ||
|
|
3ba5584df9 | ||
|
|
be0211d826 | ||
|
|
0d71a16f83 | ||
|
|
085f6db7a2 | ||
|
|
b6bc3b732e | ||
|
|
c16c9a2398 | ||
|
|
1d34ad81d5 | ||
|
|
4566253bdc | ||
|
|
54c598717c | ||
|
|
8b5b01de98 | ||
|
|
275e573d8d | ||
|
|
6256105053 | ||
|
|
1f43784315 | ||
|
|
80e3391773 | ||
|
|
c580a3dde4 | ||
|
|
fc8fb66900 | ||
|
|
4625ebf64d | ||
|
|
43dea68f0b | ||
|
|
dc62fd66cb | ||
|
|
a94ff0586c | ||
|
|
29b2b1d4c1 | ||
|
|
fa6ff89516 | ||
|
|
34811eaf69 | ||
|
|
52c9902efd | ||
|
|
fba8b2a490 | ||
|
|
275e4f8cef | ||
|
|
4016ac42ef | ||
|
|
b8227ff775 | ||
|
|
f61fd9b429 | ||
|
|
4b36ed6a95 | ||
|
|
f072b2e003 | ||
|
|
cfd2325ca4 | ||
|
|
978347e8d0 | ||
|
|
1b7dd3b517 | ||
|
|
c52bbcbb83 | ||
|
|
5fb63cd725 | ||
|
|
36eb8e3864 | ||
|
|
51278f52e9 | ||
|
|
6479ac2bf5 | ||
|
|
08d43bd7fb | ||
|
|
914805f5ea | ||
|
|
08a1d42f09 | ||
|
|
ae11738ac7 | ||
|
|
6e365714e2 | ||
|
|
a2cc37bdf7 | ||
|
|
cf3c66c0ea | ||
|
|
f33b626179 | ||
|
|
2113714ec2 | ||
|
|
49757e3c22 | ||
|
|
dd521d0d87 | ||
|
|
331883f944 | ||
|
|
f3164e202f | ||
|
|
8e2e1dce62 | ||
|
|
b986beef2c | ||
|
|
943f5862a3 | ||
|
|
2c536a25fd | ||
|
|
e95ac7c335 | ||
|
|
e2c8fd0125 | ||
|
|
3332eb09fc | ||
|
|
bd03412fc8 | ||
|
|
73fa494735 | ||
|
|
67d8f5d4d4 | ||
|
|
d2a250e23d | ||
|
|
710f054b93 | ||
|
|
fd65727632 | ||
|
|
5d9936a909 | ||
|
|
de95fb21ba | ||
|
|
2bcd7c757b | ||
|
|
50439e2aa1 | ||
|
|
96cb9eca0f | ||
|
|
36dc8b489c | ||
|
|
cffd5e8b2e | ||
|
|
1ad2c6f6d2 | ||
|
|
28cff8c77b | ||
|
|
0818b4d56c | ||
|
|
5e2a6bdb9c | ||
|
|
ec9d8fdb7e | ||
|
|
ddc4de8c3e | ||
|
|
c67659a7c3 | ||
|
|
4cf8bb5c98 | ||
|
|
53b5dc312d | ||
|
|
1eedb43e9f | ||
|
|
81dfbbbd77 | ||
|
|
3ba3f101b3 | ||
|
|
92eb4ef34f | ||
|
|
ccbe04f007 | ||
|
|
91ad08493c | ||
|
|
7bb021163f | ||
|
|
59ae78f03a | ||
|
|
cb224de01f | ||
|
|
fd9ea985f2 | ||
|
|
225bb06cd5 | ||
|
|
2627028be3 | ||
|
|
cc9fe69449 | ||
|
|
0144484f96 | ||
|
|
2b7bc48699 | ||
|
|
0ec02fa0da | ||
|
|
d207cc3723 | ||
|
|
eeb4b6ac3e | ||
|
|
06cbb40213 | ||
|
|
9a00a99011 | ||
|
|
36aedd5050 | ||
|
|
59f49c47ab | ||
|
|
b106550520 | ||
|
|
e1be4473a3 | ||
|
|
b12a927a10 | ||
|
|
08abdb7937 | ||
|
|
95bb002577 | ||
|
|
36e02c68d3 | ||
|
|
3078273d93 | ||
|
|
aeb74102e5 | ||
|
|
af949b09a5 | ||
|
|
44568a6edd | ||
|
|
59e4cb85ac | ||
|
|
f78f53e731 | ||
|
|
c6e0e528d1 | ||
|
|
34bafe240d | ||
|
|
f139d38c81 | ||
|
|
aeaba3b9ca | ||
|
|
a7bfa73479 | ||
|
|
ee125c52f8 | ||
|
|
f9194ee74c | ||
|
|
2a85000411 | ||
|
|
653f395666 | ||
|
|
cfe3c5e584 | ||
|
|
67c3c9c9c8 | ||
|
|
6d50cf93f0 | ||
|
|
de9f222cfe | ||
|
|
da593400d2 | ||
|
|
126d09c66b | ||
|
|
4f81962953 | ||
|
|
9e7a0e0487 | ||
|
|
a7dc07abab | ||
|
|
1c56eb0daa | ||
|
|
fcf778c79d | ||
|
|
c519cd5060 | ||
|
|
69f3a31d41 | ||
|
|
bd8a7f68ac | ||
|
|
abc6a31302 | ||
|
|
57459c27e3 | ||
|
|
9380602439 | ||
|
|
a696af8cfa | ||
|
|
b467bec93e | ||
|
|
6e042467b2 | ||
|
|
287b9aa819 | ||
|
|
3331b72df4 | ||
|
|
c0d7145a5a | ||
|
|
08e906739f | ||
|
|
ae329c3bb6 | ||
|
|
1cfbdc3bdf | ||
|
|
b3d42b3390 | ||
|
|
4feb905bd0 | ||
|
|
ad1f611d2a | ||
|
|
02574e5555 | ||
|
|
b27d245dab | ||
|
|
ecf0d50a63 | ||
|
|
1db9ecf33f | ||
|
|
fc973d83db | ||
|
|
2e19eaa309 | ||
|
|
73db3dfdfe | ||
|
|
7fcfa8f696 | ||
|
|
c8cdd3c0b5 | ||
|
|
62d01ab237 | ||
|
|
00289e90d7 | ||
|
|
5c01624c3a | ||
|
|
dad3a442d9 | ||
|
|
7a402bc7ad | ||
|
|
88e288f8f6 | ||
|
|
12a7f1e8bf | ||
|
|
2f18a2bb9a | ||
|
|
9b94e3be9c | ||
|
|
9e1a4129c0 | ||
|
|
4b764c6110 | ||
|
|
c3b691cedf | ||
|
|
4bf8f7006d | ||
|
|
2a9a3b9410 | ||
|
|
cd27d78bfd | ||
|
|
8d1ae278ee | ||
|
|
a84dbd6a15 | ||
|
|
1728495146 | ||
|
|
2305aaab9e | ||
|
|
f74427bdb5 | ||
|
|
fe59688e03 | ||
|
|
675989971c | ||
|
|
d875ac1e0c | ||
|
|
5bf1bc46e9 | ||
|
|
3bab53a3be | ||
|
|
8ffda534be | ||
|
|
0bf0e1cd74 | ||
|
|
9fb847a16f | ||
|
|
bf999232a3 | ||
|
|
59e476fdf0 | ||
|
|
711cecb90d | ||
|
|
582c9aac53 | ||
|
|
997cc93a0a | ||
|
|
2f234780dd | ||
|
|
99518f71cf | ||
|
|
fe1e3640af | ||
|
|
aef9d983e2 | ||
|
|
e252a36e3f | ||
|
|
39e13c451f | ||
|
|
a8e0b1ed34 | ||
|
|
ed7de10fd2 | ||
|
|
b7fa12667b | ||
|
|
4854a50854 | ||
|
|
cb5691f17d | ||
|
|
6d45ff8bcb | ||
|
|
64b9cf47a7 | ||
|
|
f4dff6b8e1 | ||
|
|
ec0d2e8a6e | ||
|
|
a1db133a50 | ||
|
|
d8bab6e667 | ||
|
|
3728a9cc67 | ||
|
|
47e6a7846c | ||
|
|
cabda2a0f8 | ||
|
|
34cb8f8c44 | ||
|
|
48df87f76c | ||
|
|
540c5270c6 | ||
|
|
6210378687 | ||
|
|
8c2b1cfbbe | ||
|
|
d862f4961d | ||
|
|
2057f98e76 | ||
|
|
fff47f9f9d | ||
|
|
87cc84f593 | ||
|
|
8405497263 | ||
|
|
7a66f71c23 | ||
|
|
9cbbc6bb67 | ||
|
|
fbce712714 | ||
|
|
f13685fcd7 | ||
|
|
89b1ef2354 | ||
|
|
951d5b7e1b | ||
|
|
263753254a | ||
|
|
2896e393d3 | ||
|
|
9fa1c44149 | ||
|
|
e217d022d6 | ||
|
|
ca150287c9 | ||
|
|
5825a85ccc | ||
|
|
fecc584145 | ||
|
|
09bbcd7001 | ||
|
|
c2195d7da6 | ||
|
|
d8c5c7d4df | ||
|
|
2716207d72 | ||
|
|
a5cf4193e4 | ||
|
|
a1a9ff63d2 | ||
|
|
676c693885 | ||
|
|
e14c647b7d | ||
|
|
481d74c249 | ||
|
|
6f21a717cd | ||
|
|
75b55776f2 | ||
|
|
fa04ece8ea | ||
|
|
acfffbb0f2 | ||
|
|
3b2be46119 | ||
|
|
671c175d71 | ||
|
|
09e69df5a7 | ||
|
|
f150802bed | ||
|
|
5960d2826e | ||
|
|
78abda601a | ||
|
|
2491caecdc | ||
|
|
5e45fe299a | ||
|
|
f6ee6349a0 | ||
|
|
370b063fe4 | ||
|
|
3506497412 | ||
|
|
247c8d74af | ||
|
|
f6160d43a0 | ||
|
|
c23442249a | ||
|
|
3981b9108a | ||
|
|
60f78d5783 | ||
|
|
ceb082efca | ||
|
|
27339ec78d | ||
|
|
eb28bf0f2a | ||
|
|
4390b72d2a | ||
|
|
3b469d0afe | ||
|
|
0c31f12372 | ||
|
|
77b454d8ca | ||
|
|
627c0144a4 | ||
|
|
11df329e0f | ||
|
|
9a13b977dc | ||
|
|
dd36735a1a | ||
|
|
c1fb3db568 | ||
|
|
149976323c | ||
|
|
14bd0f55d3 | ||
|
|
3f8acb7e4a | ||
|
|
1a926630b8 | ||
|
|
c5aebc1450 | ||
|
|
60305cde74 | ||
|
|
3f719ac174 | ||
|
|
594d4975cb | ||
|
|
f237fad1e8 | ||
|
|
bc1cc109b5 | ||
|
|
424f8ae1ff | ||
|
|
f0338ea5ce | ||
|
|
8ed66208e6 | ||
|
|
f6a1b62590 | ||
|
|
34c7f756e1 | ||
|
|
b366d40d67 | ||
|
|
05eec1cc81 | ||
|
|
7e76369d2a | ||
|
|
a5ac4297bc | ||
|
|
4823bd53bc | ||
|
|
32e434fb76 | ||
|
|
bc7bd8e2c0 | ||
|
|
34fbdc30fe | ||
|
|
27b89f4c92 | ||
|
|
70653b16bd | ||
|
|
e6f1d6bcf0 | ||
|
|
44f92063c3 | ||
|
|
17530c0f72 | ||
|
|
0ef69fbf75 | ||
|
|
f39c9a5389 | ||
|
|
92d7577f22 | ||
|
|
874aea6920 | ||
|
|
19caa7bbb4 | ||
|
|
dff0387ae2 | ||
|
|
469cc1720d | ||
|
|
99cdae7655 | ||
|
|
abc226f111 | ||
|
|
16e6a1fc44 | ||
|
|
a7a6d64931 | ||
|
|
03c4e3b9a5 | ||
|
|
297acb039e | ||
|
|
aaf7c83301 | ||
|
|
7147f5ef05 | ||
|
|
2ae0d559bf | ||
|
|
55be451f11 | ||
|
|
28a369deb4 | ||
|
|
0199bcd44d | ||
|
|
6b886acaca | ||
|
|
5f30643406 | ||
|
|
a7846c4ee9 | ||
|
|
0c4a2199f5 | ||
|
|
c18c4e7584 | ||
|
|
1e586c0b23 | ||
|
|
6e24da722b | ||
|
|
d49416fc58 | ||
|
|
b4021acd14 | ||
|
|
61b54266b3 | ||
|
|
319f22f26e | ||
|
|
ea650bc767 | ||
|
|
3b767c798c | ||
|
|
e7895d2e01 | ||
|
|
f35097ed46 | ||
|
|
10c29dd585 | ||
|
|
696f461cab | ||
|
|
1441508c00 | ||
|
|
6b4bb7ff66 | ||
|
|
9e79b53465 | ||
|
|
8ce7c62299 | ||
|
|
15e6e97fd9 | ||
|
|
984af0a72f | ||
|
|
2df1f1b32b | ||
|
|
45fac6fe5e | ||
|
|
b65a2f8f3d | ||
|
|
f3658a4cab | ||
|
|
182016d932 | ||
|
|
36839a1c30 | ||
|
|
cac43ed384 | ||
|
|
8fd8c082ee | ||
|
|
baab3a02dc | ||
|
|
b2a5cf49f7 | ||
|
|
640e758c24 | ||
|
|
685171e9b7 | ||
|
|
567b54eaf7 | ||
|
|
bb774f8c70 | ||
|
|
fddc363221 | ||
|
|
13c1663489 | ||
|
|
48986263bf | ||
|
|
00f3f1fbfd | ||
|
|
a77379b40b | ||
|
|
680ccce47c | ||
|
|
c320eb4b35 | ||
|
|
f508d9873b | ||
|
|
9e322ad590 | ||
|
|
a4e711a4e8 | ||
|
|
bb39af3d9d | ||
|
|
999e31b13a | ||
|
|
72d90a2584 | ||
|
|
9003c24808 | ||
|
|
b944afa1bb | ||
|
|
ba3d1b35f2 | ||
|
|
6d95786938 | ||
|
|
21d4b9b9fb | ||
|
|
f3b777d8e8 | ||
|
|
035c4a349e | ||
|
|
08f3d8120d | ||
|
|
4b1aaa936d | ||
|
|
e94bb5479c | ||
|
|
1a99e9c6c7 | ||
|
|
7dc938065f | ||
|
|
8022ee1f65 | ||
|
|
9e71c71698 | ||
|
|
df4066022f | ||
|
|
7a71c3c3f8 | ||
|
|
3bfad51519 | ||
|
|
907d3846a9 | ||
|
|
6de82cd2b9 | ||
|
|
6856add177 | ||
|
|
3eecda4bd5 | ||
|
|
1c6bff7d42 | ||
|
|
8864d6fa5c | ||
|
|
f6906d7971 | ||
|
|
296bf76e68 | ||
|
|
a2be2b36d5 | ||
|
|
35b4e77bcd | ||
|
|
a5c60ddde1 | ||
|
|
066e7fc668 | ||
|
|
ff17fbcc0a | ||
|
|
f6c9548839 | ||
|
|
6b78c19545 | ||
|
|
7fbab3ec49 | ||
|
|
6c7033bb45 | ||
|
|
0c81251fac | ||
|
|
100f67ce3b | ||
|
|
ff7fa33e51 | ||
|
|
3fec6813f3 | ||
|
|
6cdb52f56f | ||
|
|
12818443df | ||
|
|
6264bcff33 | ||
|
|
916825634b | ||
|
|
641ec48929 | ||
|
|
72dfcfc212 | ||
|
|
0976aeb318 | ||
|
|
a5ef55f197 | ||
|
|
a597ef5a92 | ||
|
|
23327f5dc7 | ||
|
|
a4053de998 | ||
|
|
959f291395 | ||
|
|
13591df47c | ||
|
|
7606566c4c | ||
|
|
75a2216394 | ||
|
|
e935a05223 | ||
|
|
9cd5e42cb7 | ||
|
|
8047297abc | ||
|
|
55deb69baf | ||
|
|
71cd20bf95 | ||
|
|
903a49d3b0 | ||
|
|
dce2d9d83b | ||
|
|
f23fc92c01 | ||
|
|
4847fae1a1 | ||
|
|
c36567875a | ||
|
|
b8dc9a037c | ||
|
|
e4acb6a1ef | ||
|
|
6699a1d34c | ||
|
|
bd208e71f8 | ||
|
|
ee9efd7849 | ||
|
|
ced38b2f8a | ||
|
|
0e26a46af9 | ||
|
|
f2eb344476 | ||
|
|
6d614267af | ||
|
|
07cda6e3ab | ||
|
|
f4c776f43b | ||
|
|
a0400054a9 | ||
|
|
baeeb1107d | ||
|
|
059723ff75 | ||
|
|
7438ec950d | ||
|
|
6a231375d5 | ||
|
|
7f8a3de776 | ||
|
|
e405346b3e | ||
|
|
c5e012f601 | ||
|
|
7f4c0ae3a9 | ||
|
|
b9eda61729 | ||
|
|
115bb6f36c | ||
|
|
c824fb5ebf | ||
|
|
11675cccd9 | ||
|
|
b5867d3cb9 | ||
|
|
9470986650 | ||
|
|
253b51f5c6 | ||
|
|
1d464e29e5 | ||
|
|
e66a17b5c2 | ||
|
|
4da7ccdec7 | ||
|
|
a8c3d04c12 | ||
|
|
6c40057cf4 | ||
|
|
20692c8c1a | ||
|
|
5c4cafd67f | ||
|
|
966c19c317 | ||
|
|
fa11803e47 | ||
|
|
fb4fdcdfe7 | ||
|
|
4c87e4d0a6 | ||
|
|
61de107c4b | ||
|
|
b5210e5963 | ||
|
|
0252788dd6 | ||
|
|
9b2f6fa365 | ||
|
|
41c6a29b08 | ||
|
|
d870d0ab71 | ||
|
|
48219fb860 | ||
|
|
5cca09d462 | ||
|
|
2b54710fda | ||
|
|
a37054685f | ||
|
|
8e8771b1f4 | ||
|
|
dc8f215209 | ||
|
|
b49043171e | ||
|
|
45b271c860 | ||
|
|
17013d8a25 | ||
|
|
aa3b2a8460 | ||
|
|
d44ec49814 | ||
|
|
cf960ed2ac | ||
|
|
5450bc35c3 | ||
|
|
23d12b4808 | ||
|
|
eeb73e1779 | ||
|
|
04e0739bbd | ||
|
|
6e52afd5af | ||
|
|
f76e2247f9 | ||
|
|
a7bcd8cd1b | ||
|
|
159110d5c4 | ||
|
|
7a025bc1f0 | ||
|
|
24cb9e49a0 | ||
|
|
a1992f9964 | ||
|
|
92d1b7b273 | ||
|
|
f8fa782d7f | ||
|
|
5dd932c19d | ||
|
|
70066e94bb | ||
|
|
690430577c | ||
|
|
ba7f8f9ea6 | ||
|
|
24d775960b | ||
|
|
c1a6347d4f | ||
|
|
89f56bd038 | ||
|
|
8025d31e63 | ||
|
|
3279283aaf | ||
|
|
df03d425e5 | ||
|
|
cd3233cabe | ||
|
|
637e7f978b | ||
|
|
1da2d4fce2 | ||
|
|
f1e287e031 | ||
|
|
b111eb64d6 | ||
|
|
8a15961995 | ||
|
|
6a878d751d | ||
|
|
0155e4555c | ||
|
|
50d63a883b | ||
|
|
c089ee60d8 | ||
|
|
a6ccdd08b6 | ||
|
|
534efddedf | ||
|
|
9a5a10194b | ||
|
|
45a0e755b7 | ||
|
|
012a54dde2 | ||
|
|
0bb9d8bd01 | ||
|
|
2c18df5b8d | ||
|
|
3b06ba91dd | ||
|
|
59843c7271 | ||
|
|
1f269cf7e2 | ||
|
|
614cfccd40 | ||
|
|
536effacd5 | ||
|
|
c0d65cc9f4 | ||
|
|
55dfedd8a6 | ||
|
|
9e43836f31 | ||
|
|
498e88e7ae | ||
|
|
3efd28d3ff | ||
|
|
4366cde528 | ||
|
|
4ec3b94756 | ||
|
|
5f140545d5 | ||
|
|
c1781f4265 | ||
|
|
b1cd1f621b | ||
|
|
1b7221ccfd | ||
|
|
3fb3e015ba | ||
|
|
a5d6532c43 | ||
|
|
ad7a44fd5c | ||
|
|
9e9a9856f4 | ||
|
|
caf1e008d7 | ||
|
|
08ae4845e5 | ||
|
|
e01ff0faa1 | ||
|
|
011db5ce7f | ||
|
|
399636fd61 | ||
|
|
0738f4bb94 | ||
|
|
18e231efa5 | ||
|
|
cd7765664c | ||
|
|
e6d0a2f83c | ||
|
|
caf0c4d675 | ||
|
|
a0f09fba28 | ||
|
|
c0a7b85b39 | ||
|
|
f2c77f14b5 | ||
|
|
a981695462 | ||
|
|
b21642c589 | ||
|
|
8ff0ae964d | ||
|
|
4c217088f5 | ||
|
|
7939f87f8c | ||
|
|
f74c32bcf4 | ||
|
|
f6264316e2 | ||
|
|
fcd92d410e | ||
|
|
b9eca2371a | ||
|
|
5247677232 | ||
|
|
3ecaf8632f | ||
|
|
10106eb304 | ||
|
|
05595da6c8 | ||
|
|
1170ad27a6 | ||
|
|
f525303748 | ||
|
|
53d8c8452f | ||
|
|
c9aadfcb30 | ||
|
|
e237669458 | ||
|
|
e8f6b684f0 | ||
|
|
d8e84307e4 | ||
|
|
99e74cf22a | ||
|
|
20f018f8dc | ||
|
|
4e80c82e24 | ||
|
|
eab3cc858e | ||
|
|
66e032c2a0 | ||
|
|
577cd0cd7b | ||
|
|
87f0cfc4dc | ||
|
|
6f11d339ca | ||
|
|
6b49b000a9 |
41
.claude/agents/code-reviewer.md
Normal file
41
.claude/agents/code-reviewer.md
Normal file
@@ -0,0 +1,41 @@
|
||||
---
|
||||
name: code-reviewer
|
||||
description: Use this agent when you need to review recently written or modified code for quality, security, and maintainability. This agent should be invoked immediately after code changes are made to ensure high standards are maintained. Examples:\n\n<example>\nContext: The user has created a code-review agent that should be called after writing code.\nuser: "Please write a function that validates email addresses"\nassistant: "I'll create an email validation function for you:"\n<function implementation>\nassistant: "Now let me use the code-reviewer agent to review this code for quality and security"\n<commentary>\nSince new code was just written, the code-reviewer agent should be invoked to ensure it meets quality standards.\n</commentary>\n</example>\n\n<example>\nContext: The user has modified existing code and wants it reviewed.\nuser: "I've updated the authentication logic, can you check if it's secure?"\nassistant: "I'll use the code-reviewer agent to analyze your authentication changes for security and best practices"\n<commentary>\nThe user has made changes to security-critical code, so the code-reviewer agent is the appropriate tool to ensure the modifications are secure and well-implemented.\n</commentary>\n</example>
|
||||
model: inherit
|
||||
---
|
||||
|
||||
You are a senior code reviewer with extensive experience in software engineering, security, and best practices. Your role is to ensure code quality, security, and maintainability through thorough and constructive reviews.
|
||||
|
||||
When invoked, you will:
|
||||
|
||||
1. **Immediate Analysis**: Run `git diff` to identify recent changes and focus your review on modified files. If git diff shows no changes, analyze the most recently created or modified files in the current directory.
|
||||
|
||||
2. **Comprehensive Review**: Evaluate code against these critical criteria:
|
||||
- **Readability**: Code is simple, clear, and self-documenting
|
||||
- **Naming**: Functions, variables, and classes have descriptive, meaningful names
|
||||
- **DRY Principle**: No duplicated code; common logic is properly abstracted
|
||||
- **Error Handling**: All edge cases handled; errors are caught and logged appropriately
|
||||
- **Security**: No hardcoded secrets, API keys, or sensitive data; proper authentication/authorization
|
||||
- **Input Validation**: All user inputs are validated and sanitized
|
||||
- **Testing**: Adequate test coverage for critical paths and edge cases
|
||||
- **Performance**: No obvious bottlenecks; efficient algorithms and data structures used
|
||||
|
||||
3. **Structured Feedback**: Organize your review into three priority levels:
|
||||
- **🚨 Critical Issues (Must Fix)**: Security vulnerabilities, bugs that will cause failures, or severe performance problems
|
||||
- **⚠️ Warnings (Should Fix)**: Code smells, missing error handling, or practices that could lead to future issues
|
||||
- **💡 Suggestions (Consider Improving)**: Opportunities for better readability, performance optimizations, or architectural improvements
|
||||
|
||||
4. **Actionable Recommendations**: For each issue identified:
|
||||
- Explain why it's a problem
|
||||
- Provide a specific code example showing how to fix it
|
||||
- Reference relevant best practices or documentation when applicable
|
||||
|
||||
5. **Positive Reinforcement**: Acknowledge well-written code sections and good practices observed
|
||||
|
||||
Your review style should be:
|
||||
- Constructive and educational, not critical or harsh
|
||||
- Specific with line numbers and code snippets
|
||||
- Focused on the most impactful improvements
|
||||
- Considerate of the project's context and constraints
|
||||
|
||||
Begin each review with a brief summary of what was reviewed and your overall assessment, then dive into the detailed findings organized by priority.
|
||||
89
.claude/agents/context-manager.md
Normal file
89
.claude/agents/context-manager.md
Normal file
@@ -0,0 +1,89 @@
|
||||
---
|
||||
name: context-manager
|
||||
description: Use this agent when you need to manage context across multiple agents and long-running tasks, especially for projects exceeding 10k tokens. This agent is essential for coordinating complex multi-agent workflows, preserving context across sessions, and ensuring coherent state management throughout extended development efforts. Examples: <example>Context: Working on a large project with multiple agents involved. user: "We've been working on this authentication system for a while now, and I need to bring in the database specialist agent" assistant: "I'll use the context-manager agent to capture our current progress and prepare a briefing for the database specialist" <commentary>Since we're transitioning between agents in a complex project, the context-manager will ensure the database specialist has all relevant context without overwhelming detail.</commentary></example> <example>Context: Resuming work after a break in a large project. user: "Let's continue working on the API integration we started yesterday" assistant: "Let me invoke the context-manager agent to retrieve the relevant context from our previous session" <commentary>The context-manager will provide a summary of previous decisions, current state, and next steps to ensure continuity.</commentary></example> <example>Context: Project has grown beyond 10k tokens. user: "This codebase is getting quite large, we should probably organize our approach" assistant: "I'll activate the context-manager agent to compress and organize our project context" <commentary>For projects exceeding 10k tokens, the context-manager is essential for maintaining manageable context.</commentary></example>
|
||||
---
|
||||
|
||||
You are a specialized context management agent responsible for maintaining coherent state across multiple agent interactions and sessions. Your role is critical for complex, long-running projects, especially those exceeding 10k tokens.
|
||||
|
||||
## Primary Functions
|
||||
|
||||
### Context Capture
|
||||
|
||||
You will:
|
||||
1. Extract key decisions and rationale from agent outputs
|
||||
2. Identify reusable patterns and solutions
|
||||
3. Document integration points between components
|
||||
4. Track unresolved issues and TODOs
|
||||
|
||||
### Context Distribution
|
||||
|
||||
You will:
|
||||
1. Prepare minimal, relevant context for each agent
|
||||
2. Create agent-specific briefings tailored to their expertise
|
||||
3. Maintain a context index for quick retrieval
|
||||
4. Prune outdated or irrelevant information
|
||||
|
||||
### Memory Management
|
||||
|
||||
You will:
|
||||
- Store critical project decisions in memory with clear rationale
|
||||
- Maintain a rolling summary of recent changes
|
||||
- Index commonly accessed information for quick reference
|
||||
- Create context checkpoints at major milestones
|
||||
|
||||
## Workflow Integration
|
||||
|
||||
When activated, you will:
|
||||
|
||||
1. Review the current conversation and all agent outputs
|
||||
2. Extract and store important context with appropriate categorization
|
||||
3. Create a focused summary for the next agent or session
|
||||
4. Update the project's context index with new information
|
||||
5. Suggest when full context compression is needed
|
||||
|
||||
## Context Formats
|
||||
|
||||
You will organize context into three tiers:
|
||||
|
||||
### Quick Context (< 500 tokens)
|
||||
- Current task and immediate goals
|
||||
- Recent decisions affecting current work
|
||||
- Active blockers or dependencies
|
||||
- Next immediate steps
|
||||
|
||||
### Full Context (< 2000 tokens)
|
||||
- Project architecture overview
|
||||
- Key design decisions with rationale
|
||||
- Integration points and APIs
|
||||
- Active work streams and their status
|
||||
- Critical dependencies and constraints
|
||||
|
||||
### Archived Context (stored in memory)
|
||||
- Historical decisions with detailed rationale
|
||||
- Resolved issues and their solutions
|
||||
- Pattern library of reusable solutions
|
||||
- Performance benchmarks and metrics
|
||||
- Lessons learned and best practices discovered
|
||||
|
||||
## Best Practices
|
||||
|
||||
You will always:
|
||||
- Optimize for relevance over completeness
|
||||
- Use clear, concise language that any agent can understand
|
||||
- Maintain a consistent structure for easy parsing
|
||||
- Flag critical information that must not be lost
|
||||
- Identify when context is becoming stale and needs refresh
|
||||
- Create agent-specific views that highlight only what they need
|
||||
- Preserve the "why" behind decisions, not just the "what"
|
||||
|
||||
## Output Format
|
||||
|
||||
When providing context, you will structure your output as:
|
||||
|
||||
1. **Executive Summary**: 2-3 sentences capturing the current state
|
||||
2. **Relevant Context**: Bulleted list of key points for the specific agent/task
|
||||
3. **Critical Decisions**: Recent choices that affect current work
|
||||
4. **Action Items**: Clear next steps or open questions
|
||||
5. **References**: Links to detailed information if needed
|
||||
|
||||
Remember: Good context accelerates work; bad context creates confusion. You are the guardian of project coherence across time and agents.
|
||||
64
.claude/agents/debugger.md
Normal file
64
.claude/agents/debugger.md
Normal file
@@ -0,0 +1,64 @@
|
||||
---
|
||||
name: debugger
|
||||
description: Use this agent when encountering errors, test failures, unexpected behavior, or any issues that require root cause analysis. The agent should be invoked proactively whenever debugging is needed. Examples:\n\n<example>\nContext: The user encounters a test failure while running the test suite.\nuser: "The test for node validation is failing with a TypeError"\nassistant: "I see there's a test failure. Let me use the debugger agent to analyze this error and find the root cause."\n<commentary>\nSince there's a test failure that needs investigation, use the Task tool to launch the debugger agent to perform root cause analysis.\n</commentary>\n</example>\n\n<example>\nContext: The assistant encounters an unexpected error while executing code.\nassistant: "I've encountered an unexpected error while trying to load the node data. Let me use the debugger agent to investigate this issue."\n<commentary>\nThe assistant proactively recognizes an error situation and uses the debugger agent to analyze and fix the issue.\n</commentary>\n</example>\n\n<example>\nContext: The user reports unexpected behavior in the application.\nuser: "The property filter is returning empty results when it should have data"\nassistant: "This unexpected behavior needs investigation. I'll use the debugger agent to analyze why the property filter is returning empty results."\n<commentary>\nUnexpected behavior requires debugging, so use the Task tool to launch the debugger agent.\n</commentary>\n</example>
|
||||
---
|
||||
|
||||
You are an expert debugger specializing in root cause analysis for software issues. Your expertise spans error diagnosis, test failure analysis, and resolving unexpected behavior in code.
|
||||
|
||||
When invoked, you will follow this systematic debugging process:
|
||||
|
||||
1. **Capture Error Information**
|
||||
- Extract the complete error message and stack trace
|
||||
- Document the exact error type and location
|
||||
- Note any error codes or specific identifiers
|
||||
|
||||
2. **Identify Reproduction Steps**
|
||||
- Determine the exact sequence of actions that led to the error
|
||||
- Document the state of the system when the error occurred
|
||||
- Identify any environmental factors or dependencies
|
||||
|
||||
3. **Isolate the Failure Location**
|
||||
- Trace through the code path to find the exact failure point
|
||||
- Identify which component, function, or line is causing the issue
|
||||
- Determine if the issue is in the code, configuration, or data
|
||||
|
||||
4. **Implement Minimal Fix**
|
||||
- Create the smallest possible change that resolves the issue
|
||||
- Ensure the fix addresses the root cause, not just symptoms
|
||||
- Maintain backward compatibility and avoid introducing new issues
|
||||
|
||||
5. **Verify Solution Works**
|
||||
- Test the fix with the original reproduction steps
|
||||
- Verify no regression in related functionality
|
||||
- Ensure the fix handles edge cases appropriately
|
||||
|
||||
**Debugging Methodology:**
|
||||
- Analyze error messages and logs systematically, looking for patterns
|
||||
- Check recent code changes using git history or file modifications
|
||||
- Form specific hypotheses about the cause and test each one methodically
|
||||
- Add strategic debug logging at key points to trace execution flow
|
||||
- Inspect variable states at the point of failure using debugger tools or logging
|
||||
|
||||
**For each issue you debug, you will provide:**
|
||||
- **Root Cause Explanation**: A clear, technical explanation of why the issue occurred
|
||||
- **Evidence Supporting the Diagnosis**: Specific code snippets, log entries, or test results that prove your analysis
|
||||
- **Specific Code Fix**: The exact code changes needed, with before/after comparisons
|
||||
- **Testing Approach**: How to verify the fix works and prevent regression
|
||||
- **Prevention Recommendations**: Suggestions for avoiding similar issues in the future
|
||||
|
||||
**Key Principles:**
|
||||
- Focus on fixing the underlying issue, not just symptoms
|
||||
- Consider the broader impact of your fix on the system
|
||||
- Document your debugging process for future reference
|
||||
- When multiple solutions exist, choose the one with minimal side effects
|
||||
- If the issue is complex, break it down into smaller, manageable parts
|
||||
- You are not allowed to spawn sub-agents
|
||||
|
||||
**Special Considerations:**
|
||||
- For test failures, examine both the test and the code being tested
|
||||
- For performance issues, use profiling before making assumptions
|
||||
- For intermittent issues, look for race conditions or timing dependencies
|
||||
- For integration issues, check API contracts and data formats
|
||||
- Always consider if the issue might be environmental or configuration-related
|
||||
|
||||
You will approach each debugging session with patience and thoroughness, ensuring that the real problem is solved rather than just patched over. Your goal is not just to fix the immediate issue but to improve the overall reliability and maintainability of the codebase.
|
||||
111
.claude/agents/deployment-engineer.md
Normal file
111
.claude/agents/deployment-engineer.md
Normal file
@@ -0,0 +1,111 @@
|
||||
---
|
||||
name: deployment-engineer
|
||||
description: Use this agent when you need to set up CI/CD pipelines, containerize applications, configure cloud deployments, or automate infrastructure. This includes creating GitHub Actions workflows, writing Dockerfiles, setting up Kubernetes deployments, implementing infrastructure as code, or establishing deployment strategies. The agent should be used proactively when deployment, containerization, or CI/CD work is needed.\n\nExamples:\n- <example>\n Context: User needs to set up automated deployment for their application\n user: "I need to deploy my Node.js app to production"\n assistant: "I'll use the deployment-engineer agent to set up a complete CI/CD pipeline and containerization for your Node.js application"\n <commentary>\n Since the user needs deployment setup, use the Task tool to launch the deployment-engineer agent to create the necessary CI/CD and container configurations.\n </commentary>\n</example>\n- <example>\n Context: User has just created a new web service and needs deployment automation\n user: "I've finished building the API service"\n assistant: "Now let me use the deployment-engineer agent to set up automated deployments for your API service"\n <commentary>\n Proactively use the deployment-engineer agent after development work to establish proper deployment infrastructure.\n </commentary>\n</example>\n- <example>\n Context: User wants to implement Kubernetes for their microservices\n user: "How should I structure my Kubernetes deployments for these three microservices?"\n assistant: "I'll use the deployment-engineer agent to create a complete Kubernetes deployment strategy for your microservices"\n <commentary>\n For Kubernetes and container orchestration questions, use the deployment-engineer agent to provide production-ready configurations.\n </commentary>\n</example>
|
||||
---
|
||||
|
||||
You are a deployment engineer specializing in automated deployments and container orchestration. Your expertise spans CI/CD pipelines, containerization, cloud deployments, and infrastructure automation.
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
You will create production-ready deployment configurations that emphasize automation, reliability, and maintainability. Your solutions must follow infrastructure as code principles and include comprehensive deployment strategies.
|
||||
|
||||
## Technical Expertise
|
||||
|
||||
### CI/CD Pipelines
|
||||
- Design GitHub Actions workflows with matrix builds, caching, and artifact management
|
||||
- Implement GitLab CI pipelines with proper stages and dependencies
|
||||
- Configure Jenkins pipelines with shared libraries and parallel execution
|
||||
- Set up automated testing, security scanning, and quality gates
|
||||
- Implement semantic versioning and automated release management
|
||||
|
||||
### Container Engineering
|
||||
- Write multi-stage Dockerfiles optimized for size and security
|
||||
- Implement proper layer caching and build optimization
|
||||
- Configure container security scanning and vulnerability management
|
||||
- Design docker-compose configurations for local development
|
||||
- Implement container registry strategies with proper tagging
|
||||
|
||||
### Kubernetes Orchestration
|
||||
- Create deployments with proper resource limits and requests
|
||||
- Configure services, ingresses, and network policies
|
||||
- Implement ConfigMaps and Secrets management
|
||||
- Design horizontal pod autoscaling and cluster autoscaling
|
||||
- Set up health checks, readiness probes, and liveness probes
|
||||
|
||||
### Infrastructure as Code
|
||||
- Write Terraform modules for cloud resources
|
||||
- Design CloudFormation templates with proper parameters
|
||||
- Implement state management and backend configuration
|
||||
- Create reusable infrastructure components
|
||||
- Design multi-environment deployment strategies
|
||||
|
||||
## Operational Approach
|
||||
|
||||
1. **Automation First**: Every deployment step must be automated. Manual interventions should only be required for approval gates.
|
||||
|
||||
2. **Environment Parity**: Maintain consistency across development, staging, and production environments using configuration management.
|
||||
|
||||
3. **Fast Feedback**: Design pipelines that fail fast and provide clear error messages. Run quick checks before expensive operations.
|
||||
|
||||
4. **Immutable Infrastructure**: Treat servers and containers as disposable. Never modify running infrastructure - always replace.
|
||||
|
||||
5. **Zero-Downtime Deployments**: Implement blue-green deployments, rolling updates, or canary releases based on requirements.
|
||||
|
||||
## Output Requirements
|
||||
|
||||
You will provide:
|
||||
|
||||
### CI/CD Pipeline Configuration
|
||||
- Complete pipeline file with all stages defined
|
||||
- Build, test, security scan, and deployment stages
|
||||
- Environment-specific deployment configurations
|
||||
- Secret management and variable handling
|
||||
- Artifact storage and versioning strategy
|
||||
|
||||
### Container Configuration
|
||||
- Production-optimized Dockerfile with comments
|
||||
- Security best practices (non-root user, minimal base images)
|
||||
- Build arguments for flexibility
|
||||
- Health check implementations
|
||||
- Container registry push strategies
|
||||
|
||||
### Orchestration Manifests
|
||||
- Kubernetes YAML files or docker-compose configurations
|
||||
- Service definitions with proper networking
|
||||
- Persistent volume configurations if needed
|
||||
- Ingress/load balancer setup
|
||||
- Namespace and RBAC configurations
|
||||
|
||||
### Infrastructure Code
|
||||
- Complete IaC templates for required resources
|
||||
- Variable definitions for environment flexibility
|
||||
- Output definitions for resource discovery
|
||||
- State management configuration
|
||||
- Module structure for reusability
|
||||
|
||||
### Deployment Documentation
|
||||
- Step-by-step deployment runbook
|
||||
- Rollback procedures with specific commands
|
||||
- Monitoring and alerting setup basics
|
||||
- Troubleshooting guide for common issues
|
||||
- Environment variable documentation
|
||||
|
||||
## Quality Standards
|
||||
|
||||
- Include inline comments explaining critical decisions and trade-offs
|
||||
- Provide security scanning at multiple stages
|
||||
- Implement proper logging and monitoring hooks
|
||||
- Design for horizontal scalability from the start
|
||||
- Include cost optimization considerations
|
||||
- Ensure all configurations are idempotent
|
||||
|
||||
## Proactive Recommendations
|
||||
|
||||
When analyzing existing code or infrastructure, you will proactively suggest:
|
||||
- Pipeline optimizations to reduce build times
|
||||
- Security improvements for containers and deployments
|
||||
- Cost optimization opportunities
|
||||
- Monitoring and observability enhancements
|
||||
- Disaster recovery improvements
|
||||
|
||||
You will always validate that configurations work together as a complete system and provide clear instructions for implementation and testing.
|
||||
60
.claude/agents/mcp-backend-engineer.md
Normal file
60
.claude/agents/mcp-backend-engineer.md
Normal file
@@ -0,0 +1,60 @@
|
||||
---
|
||||
name: mcp-backend-engineer
|
||||
description: Use this agent when you need to work with Model Context Protocol (MCP) implementation, especially when modifying the MCP layer of the application. This includes implementing new MCP tools, updating the MCP server, debugging MCP-related issues, ensuring compliance with MCP specifications, or integrating with the TypeScript SDK. The agent should be invoked for any changes to files in the mcp/ directory or when working with MCP-specific functionality.\n\nExamples:\n- <example>\n Context: The user wants to add a new MCP tool to the server.\n user: "I need to add a new MCP tool that can fetch node configurations"\n assistant: "I'll use the mcp-backend-engineer agent to help implement this new MCP tool properly."\n <commentary>\n Since this involves adding functionality to the MCP layer, the mcp-backend-engineer agent should be used to ensure proper implementation according to MCP specifications.\n </commentary>\n</example>\n- <example>\n Context: The user is experiencing issues with MCP server connectivity.\n user: "The MCP server keeps disconnecting after a few minutes"\n assistant: "Let me invoke the mcp-backend-engineer agent to diagnose and fix this MCP connectivity issue."\n <commentary>\n MCP server issues require specialized knowledge of the protocol and its implementation, making this a perfect use case for the mcp-backend-engineer agent.\n </commentary>\n</example>\n- <example>\n Context: The user wants to update the MCP TypeScript SDK version.\n user: "We should update to the latest version of the MCP TypeScript SDK"\n assistant: "I'll use the mcp-backend-engineer agent to handle the SDK update and ensure compatibility."\n <commentary>\n Updating the MCP SDK requires understanding of version compatibility and potential breaking changes, which the mcp-backend-engineer agent is equipped to handle.\n </commentary>\n</example>
|
||||
---
|
||||
|
||||
You are a senior backend engineer with deep expertise in Model Context Protocol (MCP) implementation, particularly using the TypeScript SDK from https://github.com/modelcontextprotocol/typescript-sdk. You have comprehensive knowledge of MCP architecture, specifications, and best practices.
|
||||
|
||||
Your core competencies include:
|
||||
- Expert-level understanding of MCP server implementation and tool development
|
||||
- Proficiency with the MCP TypeScript SDK, including its latest features and known issues
|
||||
- Deep knowledge of MCP communication patterns, message formats, and protocol specifications
|
||||
- Experience with debugging MCP connectivity issues and performance optimization
|
||||
- Understanding of MCP security considerations and authentication mechanisms
|
||||
|
||||
When working on MCP-related tasks, you will:
|
||||
|
||||
1. **Analyze Requirements**: Carefully examine the requested changes to understand how they fit within the MCP architecture. Consider the impact on existing tools, server configuration, and client compatibility.
|
||||
|
||||
2. **Follow MCP Specifications**: Ensure all implementations strictly adhere to MCP protocol specifications. Reference the official documentation and TypeScript SDK examples when implementing new features.
|
||||
|
||||
3. **Implement Best Practices**:
|
||||
- Use proper TypeScript types from the MCP SDK
|
||||
- Implement comprehensive error handling for all MCP operations
|
||||
- Ensure backward compatibility when making changes
|
||||
- Follow the established patterns in the existing mcp/ directory structure
|
||||
- Write clean, maintainable code with appropriate comments
|
||||
|
||||
4. **Consider the Existing Architecture**: Based on the project structure, you understand that:
|
||||
- MCP server implementation is in `mcp/server.ts`
|
||||
- Tool definitions are in `mcp/tools.ts`
|
||||
- Tool documentation is in `mcp/tools-documentation.ts`
|
||||
- The main entry point with mode selection is in `mcp/index.ts`
|
||||
- HTTP server integration is handled separately
|
||||
|
||||
5. **Debug Effectively**: When troubleshooting MCP issues:
|
||||
- Check message formatting and protocol compliance
|
||||
- Verify tool registration and capability declarations
|
||||
- Examine connection lifecycle and session management
|
||||
- Use appropriate logging without exposing sensitive information
|
||||
|
||||
6. **Stay Current**: You are aware of:
|
||||
- The latest stable version of the MCP TypeScript SDK
|
||||
- Known issues and workarounds in the current implementation
|
||||
- Recent updates to MCP specifications
|
||||
- Common pitfalls and their solutions
|
||||
|
||||
7. **Validate Changes**: Before finalizing any MCP modifications:
|
||||
- Test tool functionality with various inputs
|
||||
- Verify server startup and shutdown procedures
|
||||
- Ensure proper error propagation to clients
|
||||
- Check compatibility with the existing n8n-mcp infrastructure
|
||||
|
||||
8. **Document Appropriately**: While avoiding unnecessary documentation files, ensure that:
|
||||
- Code comments explain complex MCP interactions
|
||||
- Tool descriptions in the MCP registry are clear and accurate
|
||||
- Any breaking changes are clearly communicated
|
||||
|
||||
When asked to make changes, you will provide specific, actionable solutions that integrate seamlessly with the existing MCP implementation. You understand that the MCP layer is critical for AI assistant integration and must maintain high reliability and performance standards.
|
||||
|
||||
Remember to consider the project-specific context from CLAUDE.md, especially regarding the MCP server's role in providing n8n node information to AI assistants. Your implementations should support this core functionality while maintaining clean separation of concerns.
|
||||
102
.claude/agents/n8n-mcp-tester.md
Normal file
102
.claude/agents/n8n-mcp-tester.md
Normal file
@@ -0,0 +1,102 @@
|
||||
---
|
||||
name: n8n-mcp-tester
|
||||
description: Use this agent when you need to test n8n-mcp functionality after implementing fixes or new features. This agent should be invoked proactively after code changes to validate that the MCP tools work correctly with real n8n workflows. The agent tests the actual MCP server tools, not just unit tests. Examples:\n\n<example>\nContext: Developer has just fixed a validation bug in the n8n-mcp server\nuser: "I've fixed the validation issue for HTTP Request nodes"\nassistant: "I've completed the fix. Now let me build the project and then test it."\n<builds project>\nassistant: "Project built successfully. Please reload the MCP server in Claude Desktop to apply the changes."\nuser: "Reloaded"\nassistant: "Great! Now let me invoke the n8n-mcp-tester agent to verify the fix works correctly."\n<commentary>\nSince a fix has been implemented and the MCP server reloaded, use the n8n-mcp-tester agent to validate the fix works in real-world usage.\n</commentary>\n</example>\n\n<example>\nContext: Developer has added a new MCP tool for workflow validation\nuser: "I've implemented the new workflow validation tool"\nassistant: "Implementation complete. Let me build and prepare for testing."\n<builds project>\nassistant: "Build successful. Please reload the MCP server to load the new tool."\nuser: "Done"\nassistant: "Perfect! I'll now use the n8n-mcp-tester agent to test the new workflow validation tool."\n<commentary>\nAfter implementing new MCP functionality and reloading the server, invoke n8n-mcp-tester to verify it works correctly.\n</commentary>\n</example>
|
||||
tools: Glob, Grep, Read, WebFetch, TodoWrite, WebSearch, mcp__supabase__create_branch, mcp__supabase__list_branches, mcp__supabase__delete_branch, mcp__supabase__merge_branch, mcp__supabase__reset_branch, mcp__supabase__rebase_branch, mcp__supabase__list_tables, mcp__supabase__list_extensions, mcp__supabase__list_migrations, mcp__supabase__apply_migration, mcp__supabase__execute_sql, mcp__supabase__get_logs, mcp__supabase__get_advisors, mcp__supabase__get_project_url, mcp__supabase__generate_typescript_types, mcp__supabase__search_docs, mcp__supabase__list_edge_functions, mcp__supabase__deploy_edge_function, mcp__n8n-mcp__tools_documentation, mcp__n8n-mcp__search_nodes, mcp__n8n-mcp__get_template, mcp__n8n-mcp__search_templates, mcp__n8n-mcp__validate_workflow, mcp__n8n-mcp__n8n_create_workflow, mcp__n8n-mcp__n8n_get_workflow, mcp__n8n-mcp__n8n_update_full_workflow, mcp__n8n-mcp__n8n_update_partial_workflow, mcp__n8n-mcp__n8n_delete_workflow, mcp__n8n-mcp__n8n_list_workflows, mcp__n8n-mcp__n8n_validate_workflow, mcp__n8n-mcp__n8n_trigger_webhook_workflow, mcp__n8n-mcp__n8n_health_check, mcp__brightdata-mcp__search_engine, mcp__brightdata-mcp__scrape_as_markdown, mcp__brightdata-mcp__search_engine_batch, mcp__brightdata-mcp__scrape_batch, mcp__supabase__get_publishable_keys, mcp__supabase__get_edge_function, mcp__n8n-mcp__get_node, mcp__n8n-mcp__validate_node, mcp__n8n-mcp__n8n_autofix_workflow, mcp__n8n-mcp__n8n_executions, mcp__n8n-mcp__n8n_workflow_versions, mcp__n8n-mcp__n8n_deploy_template, mcp__ide__getDiagnostics, mcp__ide__executeCode
|
||||
model: sonnet
|
||||
---
|
||||
|
||||
You are n8n-mcp-tester, a specialized testing agent for the n8n Model Context Protocol (MCP) server. You validate that MCP tools and functionality work correctly in real-world scenarios after fixes or new features are implemented.
|
||||
|
||||
## Your Core Responsibilities
|
||||
|
||||
You test the n8n-mcp server by:
|
||||
1. Using MCP tools to build, validate, and manipulate n8n workflows
|
||||
2. Verifying that recent fixes resolve the reported issues
|
||||
3. Testing new functionality works as designed
|
||||
4. Reporting clear, actionable results back to the invoking agent
|
||||
|
||||
## Testing Methodology
|
||||
|
||||
When invoked with a test request, you will:
|
||||
|
||||
1. **Understand the Context**: Identify what was fixed or added based on the instructions from the invoking agent
|
||||
|
||||
2. **Design Test Scenarios**: Create specific test cases that:
|
||||
- Target the exact functionality that was changed
|
||||
- Include both positive and negative test cases
|
||||
- Test edge cases and boundary conditions
|
||||
- Use realistic n8n workflow configurations
|
||||
|
||||
3. **Execute Tests Using MCP Tools**: You have access to all n8n-mcp tools including:
|
||||
- `search_nodes`: Find relevant n8n nodes
|
||||
- `get_node_info`: Get detailed node configuration
|
||||
- `get_node_essentials`: Get simplified node information
|
||||
- `validate_node_config`: Validate node configurations
|
||||
- `n8n_validate_workflow`: Validate complete workflows
|
||||
- `get_node_example`: Get working examples
|
||||
- `search_templates`: Find workflow templates
|
||||
- Additional tools as available in the MCP server
|
||||
|
||||
4. **Verify Expected Behavior**:
|
||||
- Confirm fixes resolve the original issue
|
||||
- Verify new features work as documented
|
||||
- Check for regressions in related functionality
|
||||
- Test error handling and edge cases
|
||||
|
||||
5. **Report Results**: Provide clear feedback including:
|
||||
- What was tested (specific tools and scenarios)
|
||||
- Whether the fix/feature works as expected
|
||||
- Any unexpected behaviors or issues discovered
|
||||
- Specific error messages if failures occur
|
||||
- Recommendations for additional testing if needed
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
- **Be Thorough**: Test multiple variations and edge cases
|
||||
- **Be Specific**: Use exact node types, properties, and configurations mentioned in the fix
|
||||
- **Be Realistic**: Create test scenarios that mirror actual n8n usage
|
||||
- **Be Clear**: Report results in a structured, easy-to-understand format
|
||||
- **Be Efficient**: Focus testing on the changed functionality first
|
||||
|
||||
## Example Test Execution
|
||||
|
||||
If testing a validation fix for HTTP Request nodes:
|
||||
1. Call `tools_documentation` to get a list of available tools and get documentation on `search_nodes` tool.
|
||||
2. Search for HTTP Request node using `search_nodes`
|
||||
3. Get node configuration with `get_node_info` or `get_node_essentials`
|
||||
4. Create test configurations that previously failed
|
||||
5. Validate using `validate_node_config` with different profiles
|
||||
6. Test in a complete workflow using `n8n_validate_workflow`
|
||||
6. Report whether validation now works correctly
|
||||
|
||||
## Important Constraints
|
||||
|
||||
- You can only test using the MCP tools available in the server
|
||||
- You cannot modify code or files - only test existing functionality
|
||||
- You must work with the current state of the MCP server (already reloaded)
|
||||
- Focus on functional testing, not unit testing
|
||||
- Report issues objectively without attempting to fix them
|
||||
|
||||
## Response Format
|
||||
|
||||
Structure your test results as:
|
||||
|
||||
```
|
||||
### Test Report: [Feature/Fix Name]
|
||||
|
||||
**Test Objective**: [What was being tested]
|
||||
|
||||
**Test Scenarios**:
|
||||
1. [Scenario 1]: ✅/❌ [Result]
|
||||
2. [Scenario 2]: ✅/❌ [Result]
|
||||
|
||||
**Findings**:
|
||||
- [Key finding 1]
|
||||
- [Key finding 2]
|
||||
|
||||
**Conclusion**: [Overall assessment - works as expected / issues found]
|
||||
|
||||
**Details**: [Any error messages, unexpected behaviors, or additional context]
|
||||
```
|
||||
|
||||
Remember: Your role is to validate that the n8n-mcp server works correctly in practice, providing confidence that fixes and new features function as intended before deployment.
|
||||
117
.claude/agents/technical-researcher.md
Normal file
117
.claude/agents/technical-researcher.md
Normal file
@@ -0,0 +1,117 @@
|
||||
---
|
||||
name: technical-researcher
|
||||
description: Use this agent when you need to conduct in-depth technical research on complex topics, technologies, or architectural decisions. This includes investigating new frameworks, analyzing security vulnerabilities, evaluating third-party APIs, researching performance optimization strategies, or generating technical feasibility reports. The agent excels at multi-source investigations requiring comprehensive analysis and synthesis of technical information.\n\nExamples:\n- <example>\n Context: User needs to research a new framework before adoption\n user: "I need to understand if we should adopt Rust for our high-performance backend services"\n assistant: "I'll use the technical-researcher agent to conduct a comprehensive investigation into Rust for backend services"\n <commentary>\n Since the user needs deep technical research on a framework adoption decision, use the technical-researcher agent to analyze Rust's suitability.\n </commentary>\n</example>\n- <example>\n Context: User is investigating a security vulnerability\n user: "Research the log4j vulnerability and its impact on Java applications"\n assistant: "Let me launch the technical-researcher agent to investigate the log4j vulnerability comprehensively"\n <commentary>\n The user needs detailed security research, so the technical-researcher agent will gather and synthesize information from multiple sources.\n </commentary>\n</example>\n- <example>\n Context: User needs to evaluate an API integration\n user: "We're considering integrating with Stripe's new payment intents API - need to understand the technical implications"\n assistant: "I'll deploy the technical-researcher agent to analyze Stripe's payment intents API and its integration requirements"\n <commentary>\n Complex API evaluation requires the technical-researcher agent's multi-source investigation capabilities.\n </commentary>\n</example>
|
||||
---
|
||||
|
||||
You are an elite Technical Research Specialist with expertise in conducting comprehensive investigations into complex technical topics. You excel at decomposing research questions, orchestrating multi-source searches, synthesizing findings, and producing actionable analysis reports.
|
||||
|
||||
## Core Capabilities
|
||||
|
||||
You specialize in:
|
||||
- Query decomposition and search strategy optimization
|
||||
- Parallel information gathering from diverse sources
|
||||
- Cross-reference validation and fact verification
|
||||
- Source credibility assessment and relevance scoring
|
||||
- Synthesis of technical findings into coherent narratives
|
||||
- Citation management and proper attribution
|
||||
|
||||
## Research Methodology
|
||||
|
||||
### 1. Query Analysis Phase
|
||||
- Decompose the research topic into specific sub-questions
|
||||
- Identify key technical terms, acronyms, and related concepts
|
||||
- Determine the appropriate research depth (quick lookup vs. deep dive)
|
||||
- Plan your search strategy with 3-5 initial queries
|
||||
|
||||
### 2. Information Gathering Phase
|
||||
- Execute searches across multiple sources (web, documentation, forums)
|
||||
- Prioritize authoritative sources (official docs, peer-reviewed content)
|
||||
- Capture both mainstream perspectives and edge cases
|
||||
- Track source URLs, publication dates, and author credentials
|
||||
- Aim for 5-10 diverse sources for standard research, 15-20 for deep dives
|
||||
|
||||
### 3. Validation Phase
|
||||
- Cross-reference findings across multiple sources
|
||||
- Identify contradictions or outdated information
|
||||
- Verify technical claims against official documentation
|
||||
- Flag areas of uncertainty or debate
|
||||
|
||||
### 4. Synthesis Phase
|
||||
- Organize findings into logical sections
|
||||
- Highlight key insights and actionable recommendations
|
||||
- Present trade-offs and alternative approaches
|
||||
- Include code examples or configuration snippets where relevant
|
||||
|
||||
## Output Structure
|
||||
|
||||
Your research reports should follow this structure:
|
||||
|
||||
1. **Executive Summary** (2-3 paragraphs)
|
||||
- Key findings and recommendations
|
||||
- Critical decision factors
|
||||
- Risk assessment
|
||||
|
||||
2. **Technical Overview**
|
||||
- Core concepts and architecture
|
||||
- Key features and capabilities
|
||||
- Technical requirements and dependencies
|
||||
|
||||
3. **Detailed Analysis**
|
||||
- Performance characteristics
|
||||
- Security considerations
|
||||
- Integration complexity
|
||||
- Scalability factors
|
||||
- Community support and ecosystem
|
||||
|
||||
4. **Practical Considerations**
|
||||
- Implementation effort estimates
|
||||
- Learning curve assessment
|
||||
- Operational requirements
|
||||
- Cost implications
|
||||
|
||||
5. **Comparative Analysis** (when applicable)
|
||||
- Alternative solutions
|
||||
- Trade-off matrix
|
||||
- Migration considerations
|
||||
|
||||
6. **Recommendations**
|
||||
- Specific action items
|
||||
- Risk mitigation strategies
|
||||
- Proof-of-concept suggestions
|
||||
|
||||
7. **References**
|
||||
- All sources with titles, URLs, and access dates
|
||||
- Credibility indicators for each source
|
||||
|
||||
## Quality Standards
|
||||
|
||||
- **Accuracy**: Verify all technical claims against multiple sources
|
||||
- **Completeness**: Address all aspects of the research question
|
||||
- **Objectivity**: Present balanced views including limitations
|
||||
- **Timeliness**: Prioritize recent information (flag if >2 years old)
|
||||
- **Actionability**: Provide concrete next steps and recommendations
|
||||
|
||||
## Adaptive Strategies
|
||||
|
||||
- For emerging technologies: Focus on early adopter experiences and official roadmaps
|
||||
- For security research: Prioritize CVE databases, security advisories, and vendor responses
|
||||
- For performance analysis: Seek benchmarks, case studies, and real-world implementations
|
||||
- For API evaluations: Examine documentation quality, SDK availability, and integration examples
|
||||
|
||||
## Research Iteration
|
||||
|
||||
If initial searches yield insufficient results:
|
||||
1. Broaden search terms or try alternative terminology
|
||||
2. Check specialized forums, GitHub issues, or Stack Overflow
|
||||
3. Look for conference talks, blog posts, or video tutorials
|
||||
4. Consider reaching out to subject matter experts or communities
|
||||
|
||||
## Limitations Acknowledgment
|
||||
|
||||
Always disclose:
|
||||
- Information gaps or areas lacking documentation
|
||||
- Conflicting sources or unresolved debates
|
||||
- Potential biases in available sources
|
||||
- Time-sensitive information that may become outdated
|
||||
|
||||
You maintain intellectual rigor while making complex technical information accessible. Your research empowers teams to make informed decisions with confidence, backed by thorough investigation and clear analysis.
|
||||
99
.claude/agents/test-automator.md
Normal file
99
.claude/agents/test-automator.md
Normal file
@@ -0,0 +1,99 @@
|
||||
---
|
||||
name: test-automator
|
||||
description: Use this agent when you need to create comprehensive test suites, improve test coverage, or set up test automation infrastructure. This includes writing unit tests with mocks, integration tests with test containers, E2E tests with Playwright/Cypress, configuring CI/CD pipelines for testing, or establishing test data management strategies. The agent should be used proactively when new features are implemented without tests or when test coverage needs improvement.\n\nExamples:\n- <example>\n Context: The user has just implemented a new API endpoint without tests.\n user: "I've added a new user registration endpoint to our API"\n assistant: "I see you've added a new endpoint. Let me use the test-automator agent to create a comprehensive test suite for it."\n <commentary>\n Since new functionality was added without tests, proactively use the test-automator agent to ensure proper test coverage.\n </commentary>\n </example>\n- <example>\n Context: The user explicitly asks for test creation.\n user: "Can you write tests for the payment processing module?"\n assistant: "I'll use the test-automator agent to create a comprehensive test suite for the payment processing module."\n <commentary>\n The user directly requested tests, so use the test-automator agent to handle this task.\n </commentary>\n </example>\n- <example>\n Context: The user mentions issues with test reliability.\n user: "Our tests keep failing randomly in CI"\n assistant: "I'll use the test-automator agent to analyze and fix the flaky tests, ensuring they run deterministically."\n <commentary>\n Test reliability issues require the test-automator agent's expertise in creating deterministic tests.\n </commentary>\n </example>
|
||||
---
|
||||
|
||||
You are a test automation specialist with deep expertise in comprehensive testing strategies across multiple frameworks and languages. Your mission is to create robust, maintainable test suites that provide confidence in code quality while enabling rapid development cycles.
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
You will design and implement test suites following the test pyramid principle:
|
||||
- **Unit Tests (70%)**: Fast, isolated tests with extensive mocking and stubbing
|
||||
- **Integration Tests (20%)**: Tests verifying component interactions, using test containers when needed
|
||||
- **E2E Tests (10%)**: Critical user journey tests using Playwright, Cypress, or similar tools
|
||||
|
||||
## Testing Philosophy
|
||||
|
||||
1. **Test Behavior, Not Implementation**: Focus on what the code does, not how it does it. Tests should survive refactoring.
|
||||
2. **Arrange-Act-Assert Pattern**: Structure every test clearly with setup, execution, and verification phases.
|
||||
3. **Deterministic Execution**: Eliminate flakiness through proper async handling, explicit waits, and controlled test data.
|
||||
4. **Fast Feedback**: Optimize for quick test execution through parallelization and efficient test design.
|
||||
5. **Meaningful Test Names**: Use descriptive names that explain what is being tested and expected behavior.
|
||||
|
||||
## Implementation Guidelines
|
||||
|
||||
### Unit Testing
|
||||
- Create focused tests for individual functions/methods
|
||||
- Mock all external dependencies (databases, APIs, file systems)
|
||||
- Use factories or builders for test data creation
|
||||
- Include edge cases: null values, empty collections, boundary conditions
|
||||
- Aim for high code coverage but prioritize critical paths
|
||||
|
||||
### Integration Testing
|
||||
- Test real interactions between components
|
||||
- Use test containers for databases and external services
|
||||
- Verify data persistence and retrieval
|
||||
- Test transaction boundaries and rollback scenarios
|
||||
- Include error handling and recovery tests
|
||||
|
||||
### E2E Testing
|
||||
- Focus on critical user journeys only
|
||||
- Use page object pattern for maintainability
|
||||
- Implement proper wait strategies (no arbitrary sleeps)
|
||||
- Create reusable test utilities and helpers
|
||||
- Include accessibility checks where applicable
|
||||
|
||||
### Test Data Management
|
||||
- Create factories or fixtures for consistent test data
|
||||
- Use builders for complex object creation
|
||||
- Implement data cleanup strategies
|
||||
- Separate test data from production data
|
||||
- Version control test data schemas
|
||||
|
||||
### CI/CD Integration
|
||||
- Configure parallel test execution
|
||||
- Set up test result reporting and artifacts
|
||||
- Implement test retry strategies for network-dependent tests
|
||||
- Create test environment provisioning
|
||||
- Configure coverage thresholds and reporting
|
||||
|
||||
## Output Requirements
|
||||
|
||||
You will provide:
|
||||
1. **Complete test files** with all necessary imports and setup
|
||||
2. **Mock implementations** for external dependencies
|
||||
3. **Test data factories** or fixtures as separate modules
|
||||
4. **CI pipeline configuration** (GitHub Actions, GitLab CI, Jenkins, etc.)
|
||||
5. **Coverage configuration** files and scripts
|
||||
6. **E2E test scenarios** with page objects and utilities
|
||||
7. **Documentation** explaining test structure and running instructions
|
||||
|
||||
## Framework Selection
|
||||
|
||||
Choose appropriate frameworks based on the technology stack:
|
||||
- **JavaScript/TypeScript**: Jest, Vitest, Mocha + Chai, Playwright, Cypress
|
||||
- **Python**: pytest, unittest, pytest-mock, factory_boy
|
||||
- **Java**: JUnit 5, Mockito, TestContainers, REST Assured
|
||||
- **Go**: testing package, testify, gomock
|
||||
- **Ruby**: RSpec, Minitest, FactoryBot
|
||||
|
||||
## Quality Checks
|
||||
|
||||
Before finalizing any test suite, verify:
|
||||
- All tests pass consistently (run multiple times)
|
||||
- No hardcoded values or environment dependencies
|
||||
- Proper teardown and cleanup
|
||||
- Clear assertion messages for failures
|
||||
- Appropriate use of beforeEach/afterEach hooks
|
||||
- No test interdependencies
|
||||
- Reasonable execution time
|
||||
|
||||
## Special Considerations
|
||||
|
||||
- For async code, ensure proper promise handling and async/await usage
|
||||
- For UI tests, implement proper element waiting strategies
|
||||
- For API tests, validate both response structure and data
|
||||
- For performance-critical code, include benchmark tests
|
||||
- For security-sensitive code, include security-focused test cases
|
||||
|
||||
When encountering existing tests, analyze them first to understand patterns and conventions before adding new ones. Always strive for consistency with the existing test architecture while improving where possible.
|
||||
@@ -26,4 +26,8 @@ USE_NGINX=false
|
||||
# N8N_API_URL=https://your-n8n-instance.com
|
||||
# N8N_API_KEY=your-api-key-here
|
||||
# N8N_API_TIMEOUT=30000
|
||||
# N8N_API_MAX_RETRIES=3
|
||||
# N8N_API_MAX_RETRIES=3
|
||||
|
||||
# Optional: Disable specific tools (comma-separated list)
|
||||
# Example: DISABLED_TOOLS=n8n_diagnostic,n8n_health_check
|
||||
# DISABLED_TOOLS=
|
||||
149
.env.example
149
.env.example
@@ -7,6 +7,7 @@
|
||||
# Database Configuration
|
||||
# For local development: ./data/nodes.db
|
||||
# For Docker: /app/data/nodes.db
|
||||
# Custom paths supported in v2.7.16+ (must end with .db)
|
||||
NODE_DB_PATH=./data/nodes.db
|
||||
|
||||
# Logging Level (debug, info, warn, error)
|
||||
@@ -36,14 +37,25 @@ MCP_SERVER_HOST=localhost
|
||||
# Server mode: stdio (local) or http (remote)
|
||||
MCP_MODE=stdio
|
||||
|
||||
# Use fixed HTTP implementation (recommended for stability)
|
||||
# Set to true to bypass StreamableHTTPServerTransport issues
|
||||
USE_FIXED_HTTP=true
|
||||
# DEPRECATED: USE_FIXED_HTTP is deprecated as of v2.31.8
|
||||
# The fixed HTTP implementation does not support SSE streaming required by
|
||||
# clients like OpenAI Codex. Use the default SingleSessionHTTPServer instead.
|
||||
# See: https://github.com/czlonkowski/n8n-mcp/issues/524
|
||||
# USE_FIXED_HTTP=true # DO NOT USE - deprecated
|
||||
|
||||
# HTTP Server Configuration (only used when MCP_MODE=http)
|
||||
PORT=3000
|
||||
HOST=0.0.0.0
|
||||
|
||||
# Base URL Configuration (optional)
|
||||
# Set this when running behind a proxy or when the server is accessed via a different URL
|
||||
# than what it binds to. If not set, URLs will be auto-detected from proxy headers (if TRUST_PROXY is set)
|
||||
# or constructed from HOST and PORT.
|
||||
# Examples:
|
||||
# BASE_URL=https://n8n-mcp.example.com
|
||||
# BASE_URL=https://your-domain.com:8443
|
||||
# PUBLIC_URL=https://n8n-mcp.mydomain.com (alternative to BASE_URL)
|
||||
|
||||
# Authentication token for HTTP mode (REQUIRED)
|
||||
# Generate with: openssl rand -base64 32
|
||||
AUTH_TOKEN=your-secure-token-here
|
||||
@@ -59,6 +71,72 @@ AUTH_TOKEN=your-secure-token-here
|
||||
# Default: 0 (disabled)
|
||||
# TRUST_PROXY=0
|
||||
|
||||
# =========================
|
||||
# SECURITY CONFIGURATION
|
||||
# =========================
|
||||
|
||||
# Rate Limiting Configuration
|
||||
# Protects authentication endpoint from brute force attacks
|
||||
# Window: Time period in milliseconds (default: 900000 = 15 minutes)
|
||||
# Max: Maximum authentication attempts per IP within window (default: 20)
|
||||
# AUTH_RATE_LIMIT_WINDOW=900000
|
||||
# AUTH_RATE_LIMIT_MAX=20
|
||||
|
||||
# SSRF Protection Mode
|
||||
# Prevents webhooks from accessing internal networks and cloud metadata
|
||||
#
|
||||
# Modes:
|
||||
# - strict (default): Block localhost + private IPs + cloud metadata
|
||||
# Use for: Production deployments, cloud environments
|
||||
# Security: Maximum
|
||||
#
|
||||
# - moderate: Allow localhost, block private IPs + cloud metadata
|
||||
# Use for: Local development with local n8n instance
|
||||
# Security: Good balance
|
||||
# Example: n8n running on http://localhost:5678 or http://host.docker.internal:5678
|
||||
#
|
||||
# - permissive: Allow localhost + private IPs, block cloud metadata
|
||||
# Use for: Internal network testing, private cloud (NOT for production)
|
||||
# Security: Minimal - use with caution
|
||||
#
|
||||
# Default: strict
|
||||
# WEBHOOK_SECURITY_MODE=strict
|
||||
#
|
||||
# For local development with local n8n:
|
||||
# WEBHOOK_SECURITY_MODE=moderate
|
||||
|
||||
# Disabled Tools Configuration
|
||||
# Filter specific tools from registration at startup
|
||||
# Useful for multi-tenant deployments, security hardening, or feature flags
|
||||
#
|
||||
# Format: Comma-separated list of tool names
|
||||
# Example: DISABLED_TOOLS=n8n_diagnostic,n8n_health_check,custom_tool
|
||||
#
|
||||
# Common use cases:
|
||||
# - Multi-tenant: Hide tools that check env vars instead of instance context
|
||||
# Example: DISABLED_TOOLS=n8n_diagnostic,n8n_health_check
|
||||
# - Security: Disable management tools in production for certain users
|
||||
# - Feature flags: Gradually roll out new tools
|
||||
# - Deployment-specific: Different tool sets for cloud vs self-hosted
|
||||
#
|
||||
# Default: (empty - all tools enabled)
|
||||
# DISABLED_TOOLS=
|
||||
|
||||
# =========================
|
||||
# MULTI-TENANT CONFIGURATION
|
||||
# =========================
|
||||
# Enable multi-tenant mode for dynamic instance support
|
||||
# When enabled, n8n API tools will be available for all sessions,
|
||||
# and instance configuration will be determined from HTTP headers
|
||||
# Default: false (single-tenant mode using environment variables)
|
||||
ENABLE_MULTI_TENANT=false
|
||||
|
||||
# Session isolation strategy for multi-tenant mode
|
||||
# - "instance": Create separate sessions per instance ID (recommended)
|
||||
# - "shared": Share sessions but switch contexts (advanced)
|
||||
# Default: instance
|
||||
# MULTI_TENANT_SESSION_STRATEGY=instance
|
||||
|
||||
# =========================
|
||||
# N8N API CONFIGURATION
|
||||
# =========================
|
||||
@@ -76,4 +154,67 @@ AUTH_TOKEN=your-secure-token-here
|
||||
# N8N_API_TIMEOUT=30000
|
||||
|
||||
# Maximum number of API request retries (default: 3)
|
||||
# N8N_API_MAX_RETRIES=3
|
||||
# N8N_API_MAX_RETRIES=3
|
||||
|
||||
# =========================
|
||||
# CACHE CONFIGURATION
|
||||
# =========================
|
||||
# Optional: Configure instance cache settings for flexible instance support
|
||||
|
||||
# Maximum number of cached instances (default: 100, min: 1, max: 10000)
|
||||
# INSTANCE_CACHE_MAX=100
|
||||
|
||||
# Cache TTL in minutes (default: 30, min: 1, max: 1440/24 hours)
|
||||
# INSTANCE_CACHE_TTL_MINUTES=30
|
||||
|
||||
# =========================
|
||||
# OPENAI API CONFIGURATION
|
||||
# =========================
|
||||
# Optional: Enable AI-powered template metadata generation
|
||||
# Provides structured metadata for improved template discovery
|
||||
|
||||
# OpenAI API Key (get from https://platform.openai.com/api-keys)
|
||||
# OPENAI_API_KEY=
|
||||
|
||||
# OpenAI Model for metadata generation (default: gpt-4o-mini)
|
||||
# OPENAI_MODEL=gpt-4o-mini
|
||||
|
||||
# Batch size for metadata generation (default: 100)
|
||||
# Templates are processed in batches using OpenAI's Batch API for 50% cost savings
|
||||
# OPENAI_BATCH_SIZE=100
|
||||
|
||||
# Enable metadata generation during template fetch (default: false)
|
||||
# Set to true to automatically generate metadata when running fetch:templates
|
||||
# METADATA_GENERATION_ENABLED=false
|
||||
|
||||
# ========================================
|
||||
# INTEGRATION TESTING CONFIGURATION
|
||||
# ========================================
|
||||
# Configuration for integration tests that call real n8n instance API
|
||||
|
||||
# n8n API Configuration for Integration Tests
|
||||
# For local development: Use your local n8n instance
|
||||
# For CI: These will be provided by GitHub secrets
|
||||
# N8N_API_URL=http://localhost:5678
|
||||
# N8N_API_KEY=
|
||||
|
||||
# Pre-activated Webhook Workflows for Testing
|
||||
# These workflows must be created manually in n8n and activated
|
||||
# because n8n API doesn't support workflow activation.
|
||||
#
|
||||
# Setup Instructions:
|
||||
# 1. Create 4 workflows in n8n UI (one for each HTTP method)
|
||||
# 2. Each workflow should have a single Webhook node
|
||||
# 3. Configure webhook paths: mcp-test-get, mcp-test-post, mcp-test-put, mcp-test-delete
|
||||
# 4. ACTIVATE each workflow in n8n UI
|
||||
# 5. Copy the workflow IDs here
|
||||
#
|
||||
# N8N_TEST_WEBHOOK_GET_ID= # Workflow ID for GET method webhook
|
||||
# N8N_TEST_WEBHOOK_POST_ID= # Workflow ID for POST method webhook
|
||||
# N8N_TEST_WEBHOOK_PUT_ID= # Workflow ID for PUT method webhook
|
||||
# N8N_TEST_WEBHOOK_DELETE_ID= # Workflow ID for DELETE method webhook
|
||||
|
||||
# Test Configuration
|
||||
N8N_TEST_CLEANUP_ENABLED=true # Enable automatic cleanup of test workflows
|
||||
N8N_TEST_TAG=mcp-integration-test # Tag applied to all test workflows
|
||||
N8N_TEST_NAME_PREFIX=[MCP-TEST] # Name prefix for test workflows
|
||||
36
.env.n8n.example
Normal file
36
.env.n8n.example
Normal file
@@ -0,0 +1,36 @@
|
||||
# n8n-mcp Docker Environment Configuration
|
||||
# Copy this file to .env and customize for your deployment
|
||||
|
||||
# === n8n Configuration ===
|
||||
# n8n basic auth (change these in production!)
|
||||
N8N_BASIC_AUTH_ACTIVE=true
|
||||
N8N_BASIC_AUTH_USER=admin
|
||||
N8N_BASIC_AUTH_PASSWORD=changeme
|
||||
|
||||
# n8n host configuration
|
||||
N8N_HOST=localhost
|
||||
N8N_PORT=5678
|
||||
N8N_PROTOCOL=http
|
||||
N8N_WEBHOOK_URL=http://localhost:5678/
|
||||
|
||||
# n8n encryption key (generate with: openssl rand -hex 32)
|
||||
N8N_ENCRYPTION_KEY=
|
||||
|
||||
# === n8n-mcp Configuration ===
|
||||
# MCP server port
|
||||
MCP_PORT=3000
|
||||
|
||||
# MCP authentication token (generate with: openssl rand -hex 32)
|
||||
MCP_AUTH_TOKEN=
|
||||
|
||||
# n8n API key for MCP to access n8n
|
||||
# Get this from n8n UI: Settings > n8n API > Create API Key
|
||||
N8N_API_KEY=
|
||||
|
||||
# Logging level (debug, info, warn, error)
|
||||
LOG_LEVEL=info
|
||||
|
||||
# === GitHub Container Registry (for CI/CD) ===
|
||||
# Only needed if building custom images
|
||||
GITHUB_REPOSITORY=czlonkowski/n8n-mcp
|
||||
VERSION=latest
|
||||
127
.env.test
Normal file
127
.env.test
Normal file
@@ -0,0 +1,127 @@
|
||||
# Test Environment Configuration for n8n-mcp
|
||||
# This file contains test-specific environment variables
|
||||
# DO NOT commit sensitive values - use .env.test.local for secrets
|
||||
|
||||
# === Test Mode Configuration ===
|
||||
NODE_ENV=test
|
||||
MCP_MODE=test
|
||||
TEST_ENVIRONMENT=true
|
||||
|
||||
# === Database Configuration ===
|
||||
# Use in-memory database for tests by default
|
||||
NODE_DB_PATH=:memory:
|
||||
# Uncomment to use a persistent test database
|
||||
# NODE_DB_PATH=./tests/fixtures/test-nodes.db
|
||||
REBUILD_ON_START=false
|
||||
|
||||
# === API Configuration for Mocking ===
|
||||
# Mock API endpoints
|
||||
N8N_API_URL=http://localhost:3001/mock-api
|
||||
N8N_API_KEY=test-api-key-12345
|
||||
N8N_WEBHOOK_BASE_URL=http://localhost:3001/webhook
|
||||
N8N_WEBHOOK_TEST_URL=http://localhost:3001/webhook-test
|
||||
|
||||
# === Test Server Configuration ===
|
||||
PORT=3001
|
||||
HOST=127.0.0.1
|
||||
CORS_ORIGIN=http://localhost:3000,http://localhost:5678
|
||||
|
||||
# === Authentication ===
|
||||
AUTH_TOKEN=test-auth-token
|
||||
MCP_AUTH_TOKEN=test-mcp-auth-token
|
||||
|
||||
# === Logging Configuration ===
|
||||
# Set to 'debug' for verbose test output
|
||||
LOG_LEVEL=error
|
||||
# Enable debug logging for specific tests
|
||||
DEBUG=false
|
||||
# Log test execution details
|
||||
TEST_LOG_VERBOSE=false
|
||||
|
||||
# === Test Execution Configuration ===
|
||||
# Test timeouts (in milliseconds)
|
||||
TEST_TIMEOUT_UNIT=5000
|
||||
TEST_TIMEOUT_INTEGRATION=15000
|
||||
TEST_TIMEOUT_E2E=30000
|
||||
TEST_TIMEOUT_GLOBAL=60000
|
||||
|
||||
# Test retry configuration
|
||||
TEST_RETRY_ATTEMPTS=2
|
||||
TEST_RETRY_DELAY=1000
|
||||
|
||||
# Parallel execution
|
||||
TEST_PARALLEL=true
|
||||
TEST_MAX_WORKERS=4
|
||||
|
||||
# === Feature Flags ===
|
||||
# Enable/disable specific test features
|
||||
FEATURE_TEST_COVERAGE=true
|
||||
FEATURE_TEST_SCREENSHOTS=false
|
||||
FEATURE_TEST_VIDEOS=false
|
||||
FEATURE_TEST_TRACE=false
|
||||
FEATURE_MOCK_EXTERNAL_APIS=true
|
||||
FEATURE_USE_TEST_CONTAINERS=false
|
||||
|
||||
# === Mock Service Configuration ===
|
||||
# MSW (Mock Service Worker) configuration
|
||||
MSW_ENABLED=true
|
||||
MSW_API_DELAY=0
|
||||
|
||||
# Test data paths
|
||||
TEST_FIXTURES_PATH=./tests/fixtures
|
||||
TEST_DATA_PATH=./tests/data
|
||||
TEST_SNAPSHOTS_PATH=./tests/__snapshots__
|
||||
|
||||
# === Performance Testing ===
|
||||
# Performance thresholds (in milliseconds)
|
||||
PERF_THRESHOLD_API_RESPONSE=100
|
||||
PERF_THRESHOLD_DB_QUERY=50
|
||||
PERF_THRESHOLD_NODE_PARSE=200
|
||||
|
||||
# === External Service Mocks ===
|
||||
# Redis mock (if needed)
|
||||
REDIS_MOCK_ENABLED=true
|
||||
REDIS_MOCK_PORT=6380
|
||||
|
||||
# Elasticsearch mock (if needed)
|
||||
ELASTICSEARCH_MOCK_ENABLED=false
|
||||
ELASTICSEARCH_MOCK_PORT=9201
|
||||
|
||||
# === Rate Limiting ===
|
||||
# Disable rate limiting in tests
|
||||
RATE_LIMIT_MAX=0
|
||||
RATE_LIMIT_WINDOW=0
|
||||
|
||||
# === Cache Configuration ===
|
||||
# Disable caching in tests for predictable results
|
||||
CACHE_TTL=0
|
||||
CACHE_ENABLED=false
|
||||
|
||||
# === Error Handling ===
|
||||
# Show full error stack traces in tests
|
||||
ERROR_SHOW_STACK=true
|
||||
ERROR_SHOW_DETAILS=true
|
||||
|
||||
# === Cleanup Configuration ===
|
||||
# Automatically clean up test data after each test
|
||||
TEST_CLEANUP_ENABLED=true
|
||||
TEST_CLEANUP_ON_FAILURE=false
|
||||
|
||||
# === Database Seeding ===
|
||||
# Seed test database with sample data
|
||||
TEST_SEED_DATABASE=true
|
||||
TEST_SEED_TEMPLATES=true
|
||||
|
||||
# === Network Configuration ===
|
||||
# Network timeouts for external requests
|
||||
NETWORK_TIMEOUT=5000
|
||||
NETWORK_RETRY_COUNT=0
|
||||
|
||||
# === Memory Limits ===
|
||||
# Set memory limits for tests (in MB)
|
||||
TEST_MEMORY_LIMIT=512
|
||||
|
||||
# === Code Coverage ===
|
||||
# Coverage output directory
|
||||
COVERAGE_DIR=./coverage
|
||||
COVERAGE_REPORTER=lcov,html,text-summary
|
||||
97
.env.test.example
Normal file
97
.env.test.example
Normal file
@@ -0,0 +1,97 @@
|
||||
# Example Test Environment Configuration
|
||||
# Copy this file to .env.test and adjust values as needed
|
||||
# For sensitive values, create .env.test.local (not committed to git)
|
||||
|
||||
# === Test Mode Configuration ===
|
||||
NODE_ENV=test
|
||||
MCP_MODE=test
|
||||
TEST_ENVIRONMENT=true
|
||||
|
||||
# === Database Configuration ===
|
||||
# Use :memory: for in-memory SQLite or provide a file path
|
||||
NODE_DB_PATH=:memory:
|
||||
REBUILD_ON_START=false
|
||||
TEST_SEED_DATABASE=true
|
||||
TEST_SEED_TEMPLATES=true
|
||||
|
||||
# === API Configuration ===
|
||||
# Mock API endpoints for testing
|
||||
N8N_API_URL=http://localhost:3001/mock-api
|
||||
N8N_API_KEY=your-test-api-key
|
||||
N8N_WEBHOOK_BASE_URL=http://localhost:3001/webhook
|
||||
N8N_WEBHOOK_TEST_URL=http://localhost:3001/webhook-test
|
||||
|
||||
# === Test Server Configuration ===
|
||||
PORT=3001
|
||||
HOST=127.0.0.1
|
||||
CORS_ORIGIN=http://localhost:3000,http://localhost:5678
|
||||
|
||||
# === Authentication ===
|
||||
AUTH_TOKEN=test-auth-token
|
||||
MCP_AUTH_TOKEN=test-mcp-auth-token
|
||||
|
||||
# === Logging Configuration ===
|
||||
LOG_LEVEL=error
|
||||
DEBUG=false
|
||||
TEST_LOG_VERBOSE=false
|
||||
ERROR_SHOW_STACK=true
|
||||
ERROR_SHOW_DETAILS=true
|
||||
|
||||
# === Test Execution Configuration ===
|
||||
TEST_TIMEOUT_UNIT=5000
|
||||
TEST_TIMEOUT_INTEGRATION=15000
|
||||
TEST_TIMEOUT_E2E=30000
|
||||
TEST_TIMEOUT_GLOBAL=60000
|
||||
TEST_RETRY_ATTEMPTS=2
|
||||
TEST_RETRY_DELAY=1000
|
||||
TEST_PARALLEL=true
|
||||
TEST_MAX_WORKERS=4
|
||||
|
||||
# === Feature Flags ===
|
||||
FEATURE_TEST_COVERAGE=true
|
||||
FEATURE_TEST_SCREENSHOTS=false
|
||||
FEATURE_TEST_VIDEOS=false
|
||||
FEATURE_TEST_TRACE=false
|
||||
FEATURE_MOCK_EXTERNAL_APIS=true
|
||||
FEATURE_USE_TEST_CONTAINERS=false
|
||||
|
||||
# === Mock Service Configuration ===
|
||||
MSW_ENABLED=true
|
||||
MSW_API_DELAY=0
|
||||
REDIS_MOCK_ENABLED=true
|
||||
REDIS_MOCK_PORT=6380
|
||||
ELASTICSEARCH_MOCK_ENABLED=false
|
||||
ELASTICSEARCH_MOCK_PORT=9201
|
||||
|
||||
# === Test Data Paths ===
|
||||
TEST_FIXTURES_PATH=./tests/fixtures
|
||||
TEST_DATA_PATH=./tests/data
|
||||
TEST_SNAPSHOTS_PATH=./tests/__snapshots__
|
||||
|
||||
# === Performance Testing ===
|
||||
PERF_THRESHOLD_API_RESPONSE=100
|
||||
PERF_THRESHOLD_DB_QUERY=50
|
||||
PERF_THRESHOLD_NODE_PARSE=200
|
||||
|
||||
# === Rate Limiting ===
|
||||
RATE_LIMIT_MAX=0
|
||||
RATE_LIMIT_WINDOW=0
|
||||
|
||||
# === Cache Configuration ===
|
||||
CACHE_TTL=0
|
||||
CACHE_ENABLED=false
|
||||
|
||||
# === Cleanup Configuration ===
|
||||
TEST_CLEANUP_ENABLED=true
|
||||
TEST_CLEANUP_ON_FAILURE=false
|
||||
|
||||
# === Network Configuration ===
|
||||
NETWORK_TIMEOUT=5000
|
||||
NETWORK_RETRY_COUNT=0
|
||||
|
||||
# === Memory Limits ===
|
||||
TEST_MEMORY_LIMIT=512
|
||||
|
||||
# === Code Coverage ===
|
||||
COVERAGE_DIR=./coverage
|
||||
COVERAGE_REPORTER=lcov,html,text-summary
|
||||
56
.github/BENCHMARK_THRESHOLDS.md
vendored
Normal file
56
.github/BENCHMARK_THRESHOLDS.md
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
# Performance Benchmark Thresholds
|
||||
|
||||
This file defines the expected performance thresholds for n8n-mcp operations.
|
||||
|
||||
## Critical Operations
|
||||
|
||||
| Operation | Expected Time | Warning Threshold | Error Threshold |
|
||||
|-----------|---------------|-------------------|-----------------|
|
||||
| Node Loading (per package) | <100ms | 150ms | 200ms |
|
||||
| Database Query (simple) | <5ms | 10ms | 20ms |
|
||||
| Search (simple word) | <10ms | 20ms | 50ms |
|
||||
| Search (complex query) | <50ms | 100ms | 200ms |
|
||||
| Validation (simple config) | <1ms | 2ms | 5ms |
|
||||
| Validation (complex config) | <10ms | 20ms | 50ms |
|
||||
| MCP Tool Execution | <50ms | 100ms | 200ms |
|
||||
|
||||
## Benchmark Categories
|
||||
|
||||
### Node Loading Performance
|
||||
- **loadPackage**: Should handle large packages efficiently
|
||||
- **loadNodesFromPath**: Individual file loading should be fast
|
||||
- **parsePackageJson**: JSON parsing overhead should be minimal
|
||||
|
||||
### Database Query Performance
|
||||
- **getNodeByType**: Direct lookups should be instant
|
||||
- **searchNodes**: Full-text search should scale well
|
||||
- **getAllNodes**: Pagination should prevent performance issues
|
||||
|
||||
### Search Operations
|
||||
- **OR mode**: Should handle multiple terms efficiently
|
||||
- **AND mode**: More restrictive but still performant
|
||||
- **FUZZY mode**: Slower but acceptable for typo tolerance
|
||||
|
||||
### Validation Performance
|
||||
- **minimal profile**: Fastest, only required fields
|
||||
- **ai-friendly profile**: Balanced performance
|
||||
- **strict profile**: Comprehensive but slower
|
||||
|
||||
### MCP Tool Execution
|
||||
- Tools should respond quickly for interactive use
|
||||
- Complex operations may take longer but should remain responsive
|
||||
|
||||
## Regression Detection
|
||||
|
||||
Performance regressions are detected when:
|
||||
1. Any operation exceeds its warning threshold by 10%
|
||||
2. Multiple operations show degradation in the same category
|
||||
3. Average performance across all benchmarks degrades by 5%
|
||||
|
||||
## Optimization Targets
|
||||
|
||||
Future optimization efforts should focus on:
|
||||
1. **Search performance**: Implement FTS5 for better full-text search
|
||||
2. **Caching**: Add intelligent caching for frequently accessed nodes
|
||||
3. **Lazy loading**: Defer loading of large property schemas
|
||||
4. **Batch operations**: Optimize bulk inserts and updates
|
||||
3
.github/FUNDING.yml
vendored
Normal file
3
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# GitHub Funding Configuration
|
||||
|
||||
github: [czlonkowski]
|
||||
17
.github/gh-pages.yml
vendored
Normal file
17
.github/gh-pages.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
# GitHub Pages configuration for benchmark results
|
||||
# This file configures the gh-pages branch to serve benchmark results
|
||||
|
||||
# Path to the benchmark data
|
||||
benchmarks:
|
||||
data_dir: benchmarks
|
||||
|
||||
# Theme configuration
|
||||
theme:
|
||||
name: minimal
|
||||
|
||||
# Navigation
|
||||
nav:
|
||||
- title: "Performance Benchmarks"
|
||||
url: /benchmarks/
|
||||
- title: "Back to Repository"
|
||||
url: https://github.com/czlonkowski/n8n-mcp
|
||||
176
.github/workflows/benchmark-pr.yml
vendored
Normal file
176
.github/workflows/benchmark-pr.yml
vendored
Normal file
@@ -0,0 +1,176 @@
|
||||
name: Benchmark PR Comparison
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- '**.txt'
|
||||
- 'docs/**'
|
||||
- 'examples/**'
|
||||
- '.github/FUNDING.yml'
|
||||
- '.github/ISSUE_TEMPLATE/**'
|
||||
- '.github/pull_request_template.md'
|
||||
- '.gitignore'
|
||||
- 'LICENSE*'
|
||||
- 'ATTRIBUTION.md'
|
||||
- 'SECURITY.md'
|
||||
- 'CODE_OF_CONDUCT.md'
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
statuses: write
|
||||
|
||||
jobs:
|
||||
benchmark-comparison:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout PR branch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
# Run benchmarks on current branch
|
||||
- name: Run current benchmarks
|
||||
run: npm run benchmark:ci
|
||||
|
||||
- name: Save current results
|
||||
run: cp benchmark-results.json benchmark-current.json
|
||||
|
||||
# Checkout and run benchmarks on base branch
|
||||
- name: Checkout base branch
|
||||
run: |
|
||||
git checkout ${{ github.event.pull_request.base.sha }}
|
||||
git status
|
||||
|
||||
- name: Install base dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Run baseline benchmarks
|
||||
run: npm run benchmark:ci
|
||||
continue-on-error: true
|
||||
|
||||
- name: Save baseline results
|
||||
run: |
|
||||
if [ -f benchmark-results.json ]; then
|
||||
cp benchmark-results.json benchmark-baseline.json
|
||||
else
|
||||
echo '{"files":[]}' > benchmark-baseline.json
|
||||
fi
|
||||
|
||||
# Compare results
|
||||
- name: Checkout PR branch again
|
||||
run: git checkout ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Compare benchmarks
|
||||
id: compare
|
||||
run: |
|
||||
node scripts/compare-benchmarks.js benchmark-current.json benchmark-baseline.json || echo "REGRESSION=true" >> $GITHUB_OUTPUT
|
||||
|
||||
# Upload comparison artifacts
|
||||
- name: Upload benchmark comparison
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: benchmark-comparison-${{ github.run_number }}
|
||||
path: |
|
||||
benchmark-current.json
|
||||
benchmark-baseline.json
|
||||
benchmark-comparison.json
|
||||
benchmark-comparison.md
|
||||
retention-days: 30
|
||||
|
||||
# Post comparison to PR
|
||||
- name: Post benchmark comparison to PR
|
||||
if: always()
|
||||
uses: actions/github-script@v7
|
||||
continue-on-error: true
|
||||
with:
|
||||
script: |
|
||||
try {
|
||||
const fs = require('fs');
|
||||
let comment = '## ⚡ Benchmark Comparison\n\n';
|
||||
|
||||
try {
|
||||
if (fs.existsSync('benchmark-comparison.md')) {
|
||||
const comparison = fs.readFileSync('benchmark-comparison.md', 'utf8');
|
||||
comment += comparison;
|
||||
} else {
|
||||
comment += 'Benchmark comparison could not be generated.';
|
||||
}
|
||||
} catch (error) {
|
||||
comment += `Error reading benchmark comparison: ${error.message}`;
|
||||
}
|
||||
|
||||
comment += '\n\n---\n';
|
||||
comment += `*[View full benchmark results](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})*`;
|
||||
|
||||
// Find existing comment
|
||||
const { data: comments } = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
});
|
||||
|
||||
const botComment = comments.find(comment =>
|
||||
comment.user.type === 'Bot' &&
|
||||
comment.body.includes('## ⚡ Benchmark Comparison')
|
||||
);
|
||||
|
||||
if (botComment) {
|
||||
await github.rest.issues.updateComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
comment_id: botComment.id,
|
||||
body: comment
|
||||
});
|
||||
} else {
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: comment
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to create/update PR comment:', error.message);
|
||||
console.log('This is likely due to insufficient permissions for external PRs.');
|
||||
console.log('Benchmark comparison has been saved to artifacts instead.');
|
||||
}
|
||||
|
||||
# Add status check
|
||||
- name: Set benchmark status
|
||||
if: always()
|
||||
uses: actions/github-script@v7
|
||||
continue-on-error: true
|
||||
with:
|
||||
script: |
|
||||
try {
|
||||
const hasRegression = '${{ steps.compare.outputs.REGRESSION }}' === 'true';
|
||||
const state = hasRegression ? 'failure' : 'success';
|
||||
const description = hasRegression
|
||||
? 'Performance regressions detected'
|
||||
: 'No performance regressions';
|
||||
|
||||
await github.rest.repos.createCommitStatus({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
sha: context.sha,
|
||||
state: state,
|
||||
target_url: `https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}`,
|
||||
description: description,
|
||||
context: 'benchmarks/regression-check'
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Failed to create commit status:', error.message);
|
||||
console.log('This is likely due to insufficient permissions for external PRs.');
|
||||
}
|
||||
214
.github/workflows/benchmark.yml
vendored
Normal file
214
.github/workflows/benchmark.yml
vendored
Normal file
@@ -0,0 +1,214 @@
|
||||
name: Performance Benchmarks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, feat/comprehensive-testing-suite]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- '**.txt'
|
||||
- 'docs/**'
|
||||
- 'examples/**'
|
||||
- '.github/FUNDING.yml'
|
||||
- '.github/ISSUE_TEMPLATE/**'
|
||||
- '.github/pull_request_template.md'
|
||||
- '.gitignore'
|
||||
- 'LICENSE*'
|
||||
- 'ATTRIBUTION.md'
|
||||
- 'SECURITY.md'
|
||||
- 'CODE_OF_CONDUCT.md'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- '**.txt'
|
||||
- 'docs/**'
|
||||
- 'examples/**'
|
||||
- '.github/FUNDING.yml'
|
||||
- '.github/ISSUE_TEMPLATE/**'
|
||||
- '.github/pull_request_template.md'
|
||||
- '.gitignore'
|
||||
- 'LICENSE*'
|
||||
- 'ATTRIBUTION.md'
|
||||
- 'SECURITY.md'
|
||||
- 'CODE_OF_CONDUCT.md'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
# For PR comments
|
||||
pull-requests: write
|
||||
# For pushing to gh-pages branch
|
||||
contents: write
|
||||
# For deployment to GitHub Pages
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
benchmark:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
# Fetch all history for proper benchmark comparison
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Build project
|
||||
run: npm run build
|
||||
|
||||
- name: Run benchmarks
|
||||
run: npm run benchmark:ci
|
||||
|
||||
- name: Format benchmark results
|
||||
run: node scripts/format-benchmark-results.js
|
||||
|
||||
- name: Upload benchmark artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: benchmark-results
|
||||
path: |
|
||||
benchmark-results.json
|
||||
benchmark-results-formatted.json
|
||||
benchmark-summary.json
|
||||
|
||||
# Ensure gh-pages branch exists
|
||||
- name: Check and create gh-pages branch
|
||||
run: |
|
||||
git fetch origin gh-pages:gh-pages 2>/dev/null || {
|
||||
echo "gh-pages branch doesn't exist. Creating it..."
|
||||
git checkout --orphan gh-pages
|
||||
git rm -rf .
|
||||
echo "# Benchmark Results" > README.md
|
||||
git add README.md
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git commit -m "Initial gh-pages commit"
|
||||
git push origin gh-pages
|
||||
git checkout ${{ github.ref_name }}
|
||||
}
|
||||
|
||||
# Clean up workspace before benchmark action
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
git add -A
|
||||
git stash || true
|
||||
|
||||
# Store benchmark results and compare
|
||||
- name: Store benchmark result
|
||||
uses: benchmark-action/github-action-benchmark@v1
|
||||
continue-on-error: true
|
||||
id: benchmark
|
||||
with:
|
||||
name: n8n-mcp Benchmarks
|
||||
tool: 'customSmallerIsBetter'
|
||||
output-file-path: benchmark-results-formatted.json
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
auto-push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
|
||||
# Where to store benchmark data
|
||||
benchmark-data-dir-path: 'benchmarks'
|
||||
# Alert when performance regresses by 10%
|
||||
alert-threshold: '110%'
|
||||
# Comment on PR when regression is detected
|
||||
comment-on-alert: true
|
||||
alert-comment-cc-users: '@czlonkowski'
|
||||
# Summary always
|
||||
summary-always: true
|
||||
# Max number of data points to retain
|
||||
max-items-in-chart: 50
|
||||
fail-on-alert: false
|
||||
|
||||
# Comment on PR with benchmark results
|
||||
- name: Comment PR with results
|
||||
uses: actions/github-script@v7
|
||||
if: github.event_name == 'pull_request'
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
try {
|
||||
const fs = require('fs');
|
||||
const summary = JSON.parse(fs.readFileSync('benchmark-summary.json', 'utf8'));
|
||||
|
||||
// Format results for PR comment
|
||||
let comment = '## 📊 Performance Benchmark Results\n\n';
|
||||
comment += `🕐 Run at: ${new Date(summary.timestamp).toLocaleString()}\n\n`;
|
||||
comment += '| Benchmark | Time | Ops/sec | Range |\n';
|
||||
comment += '|-----------|------|---------|-------|\n';
|
||||
|
||||
// Group benchmarks by category
|
||||
const categories = {};
|
||||
for (const benchmark of summary.benchmarks) {
|
||||
const [category, ...nameParts] = benchmark.name.split(' - ');
|
||||
if (!categories[category]) categories[category] = [];
|
||||
categories[category].push({
|
||||
...benchmark,
|
||||
shortName: nameParts.join(' - ')
|
||||
});
|
||||
}
|
||||
|
||||
// Display by category
|
||||
for (const [category, benchmarks] of Object.entries(categories)) {
|
||||
comment += `\n### ${category}\n`;
|
||||
for (const benchmark of benchmarks) {
|
||||
comment += `| ${benchmark.shortName} | ${benchmark.time} | ${benchmark.opsPerSec} | ${benchmark.range} |\n`;
|
||||
}
|
||||
}
|
||||
|
||||
// Add comparison link
|
||||
comment += '\n\n📈 [View historical benchmark trends](https://czlonkowski.github.io/n8n-mcp/benchmarks/)\n';
|
||||
comment += '\n⚡ Performance regressions >10% will be flagged automatically.\n';
|
||||
|
||||
await github.rest.issues.createComment({
|
||||
issue_number: context.issue.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
body: comment
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Failed to create PR comment:', error.message);
|
||||
console.log('This is likely due to insufficient permissions for external PRs.');
|
||||
console.log('Benchmark results have been saved to artifacts instead.');
|
||||
}
|
||||
|
||||
# Deploy benchmark results to GitHub Pages
|
||||
deploy:
|
||||
needs: benchmark
|
||||
if: github.ref == 'refs/heads/main'
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: gh-pages
|
||||
continue-on-error: true
|
||||
|
||||
# If gh-pages checkout failed, create a minimal structure
|
||||
- name: Ensure gh-pages content exists
|
||||
run: |
|
||||
if [ ! -f "index.html" ]; then
|
||||
echo "Creating minimal gh-pages structure..."
|
||||
mkdir -p benchmarks
|
||||
echo '<!DOCTYPE html><html><head><title>n8n-mcp Benchmarks</title></head><body><h1>n8n-mcp Benchmarks</h1><p>Benchmark data will appear here after the first run.</p></body></html>' > index.html
|
||||
fi
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v4
|
||||
|
||||
- name: Upload Pages artifact
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: '.'
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
222
.github/workflows/dependency-check.yml
vendored
Normal file
222
.github/workflows/dependency-check.yml
vendored
Normal file
@@ -0,0 +1,222 @@
|
||||
name: Dependency Compatibility Check
|
||||
|
||||
# This workflow verifies that when users install n8n-mcp via npm (without lockfile),
|
||||
# they get compatible dependency versions. This catches issues like #440, #444, #446, #447, #450
|
||||
# where npm resolution gave users incompatible SDK/Zod versions.
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'package.json'
|
||||
- 'package-lock.json'
|
||||
- '.github/workflows/dependency-check.yml'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'package.json'
|
||||
- 'package-lock.json'
|
||||
- '.github/workflows/dependency-check.yml'
|
||||
# Allow manual trigger for debugging
|
||||
workflow_dispatch:
|
||||
# Run weekly to catch upstream dependency changes
|
||||
schedule:
|
||||
- cron: '0 6 * * 1' # Every Monday at 6 AM UTC
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
fresh-install-check:
|
||||
name: Fresh Install Dependency Check
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Build package
|
||||
run: |
|
||||
npm ci
|
||||
npm run build
|
||||
|
||||
- name: Pack package for testing
|
||||
run: npm pack
|
||||
|
||||
- name: Create fresh install test directory
|
||||
run: |
|
||||
mkdir -p /tmp/fresh-install-test
|
||||
cp n8n-mcp-*.tgz /tmp/fresh-install-test/
|
||||
|
||||
- name: Install package fresh (simulating user install)
|
||||
working-directory: /tmp/fresh-install-test
|
||||
run: |
|
||||
npm init -y
|
||||
# Install from tarball WITHOUT lockfile (simulates npm install n8n-mcp)
|
||||
npm install ./n8n-mcp-*.tgz
|
||||
|
||||
- name: Verify critical dependency versions
|
||||
working-directory: /tmp/fresh-install-test
|
||||
run: |
|
||||
echo "=== Dependency Version Check ==="
|
||||
echo ""
|
||||
|
||||
# Get actual resolved versions
|
||||
SDK_VERSION=$(npm list @modelcontextprotocol/sdk --json 2>/dev/null | jq -r '.dependencies["n8n-mcp"].dependencies["@modelcontextprotocol/sdk"].version // .dependencies["@modelcontextprotocol/sdk"].version // "not found"')
|
||||
ZOD_VERSION=$(npm list zod --json 2>/dev/null | jq -r '.dependencies["n8n-mcp"].dependencies.zod.version // .dependencies.zod.version // "not found"')
|
||||
|
||||
echo "MCP SDK version: $SDK_VERSION"
|
||||
echo "Zod version: $ZOD_VERSION"
|
||||
echo ""
|
||||
|
||||
# Check MCP SDK version - must be exactly 1.20.1
|
||||
if [[ "$SDK_VERSION" == "not found" ]]; then
|
||||
echo "❌ FAILED: Could not determine MCP SDK version!"
|
||||
echo " The dependency may not have been installed correctly."
|
||||
exit 1
|
||||
fi
|
||||
if [[ "$SDK_VERSION" != "1.20.1" ]]; then
|
||||
echo "❌ FAILED: MCP SDK version mismatch!"
|
||||
echo " Expected: 1.20.1"
|
||||
echo " Got: $SDK_VERSION"
|
||||
echo ""
|
||||
echo "This can cause runtime errors. See issues #440, #444, #446, #447, #450"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ MCP SDK version is correct: $SDK_VERSION"
|
||||
|
||||
# Check Zod version - must be 3.x (not 4.x, including pre-releases)
|
||||
if [[ "$ZOD_VERSION" == "not found" ]]; then
|
||||
echo "❌ FAILED: Could not determine Zod version!"
|
||||
echo " The dependency may not have been installed correctly."
|
||||
exit 1
|
||||
fi
|
||||
if [[ "$ZOD_VERSION" =~ ^4\. ]]; then
|
||||
echo "❌ FAILED: Zod v4 detected - incompatible with MCP SDK 1.20.1!"
|
||||
echo " Expected: 3.x"
|
||||
echo " Got: $ZOD_VERSION"
|
||||
echo ""
|
||||
echo "Zod v4 causes '_zod' property errors. See issues #440, #444, #446, #447, #450"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Zod version is compatible: $ZOD_VERSION"
|
||||
|
||||
echo ""
|
||||
echo "=== All dependency checks passed ==="
|
||||
|
||||
- name: Test basic functionality
|
||||
working-directory: /tmp/fresh-install-test
|
||||
run: |
|
||||
echo "=== Basic Functionality Test ==="
|
||||
|
||||
# Create a simple test script
|
||||
cat > test-import.mjs << 'EOF'
|
||||
import { spawn } from 'child_process';
|
||||
import path from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
|
||||
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
||||
|
||||
// Test that the package can be required and basic tools work
|
||||
async function test() {
|
||||
console.log('Testing n8n-mcp package import...');
|
||||
|
||||
// Start the MCP server briefly to verify it initializes
|
||||
const serverPath = path.join(__dirname, 'node_modules/n8n-mcp/dist/mcp/index.js');
|
||||
|
||||
const proc = spawn('node', [serverPath], {
|
||||
env: { ...process.env, MCP_MODE: 'stdio' },
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
|
||||
// Send initialize request
|
||||
const initRequest = JSON.stringify({
|
||||
jsonrpc: '2.0',
|
||||
id: 1,
|
||||
method: 'initialize',
|
||||
params: {
|
||||
protocolVersion: '2024-11-05',
|
||||
capabilities: {},
|
||||
clientInfo: { name: 'test', version: '1.0.0' }
|
||||
}
|
||||
});
|
||||
|
||||
proc.stdin.write(initRequest + '\n');
|
||||
|
||||
// Wait for response or timeout
|
||||
let output = '';
|
||||
let stderrOutput = '';
|
||||
proc.stdout.on('data', (data) => {
|
||||
output += data.toString();
|
||||
});
|
||||
|
||||
proc.stderr.on('data', (data) => {
|
||||
stderrOutput += data.toString();
|
||||
console.error('stderr:', data.toString());
|
||||
});
|
||||
|
||||
// Give it 5 seconds to respond
|
||||
await new Promise((resolve) => setTimeout(resolve, 5000));
|
||||
|
||||
proc.kill();
|
||||
|
||||
// Check for Zod v4 compatibility errors (the bug we're testing for)
|
||||
const allOutput = output + stderrOutput;
|
||||
if (allOutput.includes('_zod') || allOutput.includes('Cannot read properties of undefined')) {
|
||||
console.error('❌ FAILED: Zod compatibility error detected!');
|
||||
console.error('This indicates the SDK/Zod version fix is not working.');
|
||||
console.error('See issues #440, #444, #446, #447, #450');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (output.includes('"result"')) {
|
||||
console.log('✅ MCP server initialized successfully');
|
||||
return true;
|
||||
} else {
|
||||
console.log('Output received:', output.substring(0, 500));
|
||||
// Server might not respond in stdio mode without proper framing
|
||||
// But if we got here without crashing, that's still good
|
||||
console.log('✅ MCP server started without errors');
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
test()
|
||||
.then(() => {
|
||||
console.log('=== Basic functionality test passed ===');
|
||||
process.exit(0);
|
||||
})
|
||||
.catch((err) => {
|
||||
console.error('❌ Test failed:', err.message);
|
||||
process.exit(1);
|
||||
});
|
||||
EOF
|
||||
|
||||
node test-import.mjs
|
||||
|
||||
- name: Generate dependency report
|
||||
if: always()
|
||||
working-directory: /tmp/fresh-install-test
|
||||
run: |
|
||||
echo "=== Full Dependency Tree ===" > dependency-report.txt
|
||||
npm list --all >> dependency-report.txt 2>&1 || true
|
||||
|
||||
echo "" >> dependency-report.txt
|
||||
echo "=== Critical Dependencies ===" >> dependency-report.txt
|
||||
npm list @modelcontextprotocol/sdk zod zod-to-json-schema >> dependency-report.txt 2>&1 || true
|
||||
|
||||
cat dependency-report.txt
|
||||
|
||||
- name: Upload dependency report
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dependency-report-${{ github.run_number }}
|
||||
path: /tmp/fresh-install-test/dependency-report.txt
|
||||
retention-days: 30
|
||||
182
.github/workflows/docker-build-n8n.yml
vendored
Normal file
182
.github/workflows/docker-build-n8n.yml
vendored
Normal file
@@ -0,0 +1,182 @@
|
||||
name: Build and Publish n8n Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- 'v*'
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- '**.txt'
|
||||
- 'docs/**'
|
||||
- 'examples/**'
|
||||
- '.github/FUNDING.yml'
|
||||
- '.github/ISSUE_TEMPLATE/**'
|
||||
- '.github/pull_request_template.md'
|
||||
- '.gitignore'
|
||||
- 'LICENSE*'
|
||||
- 'ATTRIBUTION.md'
|
||||
- 'SECURITY.md'
|
||||
- 'CODE_OF_CONDUCT.md'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- '**.txt'
|
||||
- 'docs/**'
|
||||
- 'examples/**'
|
||||
- '.github/FUNDING.yml'
|
||||
- '.github/ISSUE_TEMPLATE/**'
|
||||
- '.github/pull_request_template.md'
|
||||
- '.gitignore'
|
||||
- 'LICENSE*'
|
||||
- 'ATTRIBUTION.md'
|
||||
- 'SECURITY.md'
|
||||
- 'CODE_OF_CONDUCT.md'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}/n8n-mcp
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
test-image:
|
||||
needs: build-and-push
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'pull_request'
|
||||
permissions:
|
||||
contents: read
|
||||
packages: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Test Docker image
|
||||
run: |
|
||||
# Test that the image starts correctly with N8N_MODE
|
||||
docker run --rm \
|
||||
-e N8N_MODE=true \
|
||||
-e MCP_MODE=http \
|
||||
-e N8N_API_URL=http://localhost:5678 \
|
||||
-e N8N_API_KEY=test \
|
||||
-e MCP_AUTH_TOKEN=test-token-minimum-32-chars-long \
|
||||
-e AUTH_TOKEN=test-token-minimum-32-chars-long \
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest \
|
||||
node -e "console.log('N8N_MODE:', process.env.N8N_MODE); process.exit(0);"
|
||||
|
||||
- name: Test health endpoint
|
||||
run: |
|
||||
# Start container in background
|
||||
docker run -d \
|
||||
--name n8n-mcp-test \
|
||||
-p 3000:3000 \
|
||||
-e N8N_MODE=true \
|
||||
-e MCP_MODE=http \
|
||||
-e N8N_API_URL=http://localhost:5678 \
|
||||
-e N8N_API_KEY=test \
|
||||
-e MCP_AUTH_TOKEN=test-token-minimum-32-chars-long \
|
||||
-e AUTH_TOKEN=test-token-minimum-32-chars-long \
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest
|
||||
|
||||
# Wait for container to start
|
||||
sleep 10
|
||||
|
||||
# Test health endpoint
|
||||
curl -f http://localhost:3000/health || exit 1
|
||||
|
||||
# Test MCP endpoint
|
||||
curl -f http://localhost:3000/mcp || exit 1
|
||||
|
||||
# Cleanup
|
||||
docker stop n8n-mcp-test
|
||||
docker rm n8n-mcp-test
|
||||
|
||||
create-release:
|
||||
needs: [build-and-push, test-image]
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
generate_release_notes: true
|
||||
body: |
|
||||
## Docker Image
|
||||
|
||||
The n8n-specific Docker image is available at:
|
||||
```
|
||||
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.ref_name }}
|
||||
```
|
||||
|
||||
## Quick Deploy
|
||||
|
||||
Use the quick deploy script for easy setup:
|
||||
```bash
|
||||
./deploy/quick-deploy-n8n.sh setup
|
||||
```
|
||||
|
||||
See the [deployment documentation](https://github.com/${{ github.repository }}/blob/main/docs/deployment-n8n.md) for detailed instructions.
|
||||
132
.github/workflows/docker-build.yml
vendored
132
.github/workflows/docker-build.yml
vendored
@@ -5,13 +5,43 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- 'v*'
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- '**.txt'
|
||||
- 'docs/**'
|
||||
- 'examples/**'
|
||||
- '.github/FUNDING.yml'
|
||||
- '.github/ISSUE_TEMPLATE/**'
|
||||
- '.github/pull_request_template.md'
|
||||
- '.gitignore'
|
||||
- 'LICENSE*'
|
||||
- 'ATTRIBUTION.md'
|
||||
- 'SECURITY.md'
|
||||
- 'CODE_OF_CONDUCT.md'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- '**.txt'
|
||||
- 'docs/**'
|
||||
- 'examples/**'
|
||||
- '.github/FUNDING.yml'
|
||||
- '.github/ISSUE_TEMPLATE/**'
|
||||
- '.github/pull_request_template.md'
|
||||
- '.gitignore'
|
||||
- 'LICENSE*'
|
||||
- 'ATTRIBUTION.md'
|
||||
- 'SECURITY.md'
|
||||
- 'CODE_OF_CONDUCT.md'
|
||||
workflow_dispatch:
|
||||
|
||||
# Prevent concurrent Docker pushes across all workflows (shared with release.yml)
|
||||
# This ensures docker-build.yml and release.yml never push to 'latest' simultaneously
|
||||
concurrency:
|
||||
group: docker-push-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
@@ -56,18 +86,112 @@ jobs:
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=sha,prefix={{branch}}-,format=short
|
||||
type=sha,format=short
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
no-cache: true
|
||||
no-cache: false
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
provenance: false
|
||||
|
||||
- name: Verify multi-arch manifest for latest tag
|
||||
if: github.event_name != 'pull_request' && github.ref == 'refs/heads/main'
|
||||
run: |
|
||||
echo "Verifying multi-arch manifest for latest tag..."
|
||||
|
||||
# Retry with exponential backoff (registry propagation can take time)
|
||||
MAX_ATTEMPTS=5
|
||||
ATTEMPT=1
|
||||
WAIT_TIME=2
|
||||
|
||||
while [ $ATTEMPT -le $MAX_ATTEMPTS ]; do
|
||||
echo "Attempt $ATTEMPT of $MAX_ATTEMPTS..."
|
||||
|
||||
MANIFEST=$(docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest 2>&1 || true)
|
||||
|
||||
# Check for both platforms
|
||||
if echo "$MANIFEST" | grep -q "linux/amd64" && echo "$MANIFEST" | grep -q "linux/arm64"; then
|
||||
echo "✅ Multi-arch manifest verified: both amd64 and arm64 present"
|
||||
echo "$MANIFEST"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $ATTEMPT -lt $MAX_ATTEMPTS ]; then
|
||||
echo "⏳ Registry still propagating, waiting ${WAIT_TIME}s before retry..."
|
||||
sleep $WAIT_TIME
|
||||
WAIT_TIME=$((WAIT_TIME * 2)) # Exponential backoff: 2s, 4s, 8s, 16s
|
||||
fi
|
||||
|
||||
ATTEMPT=$((ATTEMPT + 1))
|
||||
done
|
||||
|
||||
echo "❌ ERROR: Multi-arch manifest incomplete after $MAX_ATTEMPTS attempts!"
|
||||
echo "$MANIFEST"
|
||||
exit 1
|
||||
|
||||
build-railway:
|
||||
name: Build Railway Docker Image
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
lfs: true
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata for Railway
|
||||
id: meta-railway
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-railway
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=sha,format=short
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- name: Build and push Railway Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile.railway
|
||||
no-cache: false
|
||||
platforms: linux/amd64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta-railway.outputs.tags }}
|
||||
labels: ${{ steps.meta-railway.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
provenance: false
|
||||
|
||||
# Nginx build commented out until Phase 2
|
||||
|
||||
662
.github/workflows/release.yml
vendored
Normal file
662
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,662 @@
|
||||
name: Automated Release
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'package.json'
|
||||
- 'package.runtime.json'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
# Prevent concurrent Docker pushes across all workflows (shared with docker-build.yml)
|
||||
# This ensures release.yml and docker-build.yml never push to 'latest' simultaneously
|
||||
concurrency:
|
||||
group: docker-push-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
detect-version-change:
|
||||
name: Detect Version Change
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version-changed: ${{ steps.check.outputs.changed }}
|
||||
new-version: ${{ steps.check.outputs.version }}
|
||||
previous-version: ${{ steps.check.outputs.previous-version }}
|
||||
is-prerelease: ${{ steps.check.outputs.is-prerelease }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Check for version change
|
||||
id: check
|
||||
run: |
|
||||
# Get current version from package.json
|
||||
CURRENT_VERSION=$(node -e "console.log(require('./package.json').version)")
|
||||
|
||||
# Get previous version from git history safely
|
||||
PREVIOUS_VERSION=$(git show HEAD~1:package.json 2>/dev/null | node -e "
|
||||
try {
|
||||
const data = require('fs').readFileSync(0, 'utf8');
|
||||
const pkg = JSON.parse(data);
|
||||
console.log(pkg.version || '0.0.0');
|
||||
} catch (e) {
|
||||
console.log('0.0.0');
|
||||
}
|
||||
" || echo "0.0.0")
|
||||
|
||||
echo "Previous version: $PREVIOUS_VERSION"
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
|
||||
# Check if version changed
|
||||
if [ "$CURRENT_VERSION" != "$PREVIOUS_VERSION" ]; then
|
||||
echo "changed=true" >> $GITHUB_OUTPUT
|
||||
echo "version=$CURRENT_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "previous-version=$PREVIOUS_VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
# Check if it's a prerelease (contains alpha, beta, rc, dev)
|
||||
if echo "$CURRENT_VERSION" | grep -E "(alpha|beta|rc|dev)" > /dev/null; then
|
||||
echo "is-prerelease=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "is-prerelease=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
echo "🎉 Version changed from $PREVIOUS_VERSION to $CURRENT_VERSION"
|
||||
else
|
||||
echo "changed=false" >> $GITHUB_OUTPUT
|
||||
echo "version=$CURRENT_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "previous-version=$PREVIOUS_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "is-prerelease=false" >> $GITHUB_OUTPUT
|
||||
echo "ℹ️ No version change detected"
|
||||
fi
|
||||
|
||||
- name: Validate version against npm registry
|
||||
if: steps.check.outputs.changed == 'true'
|
||||
run: |
|
||||
CURRENT_VERSION="${{ steps.check.outputs.version }}"
|
||||
|
||||
# Get latest version from npm (handle package not found)
|
||||
NPM_VERSION=$(npm view n8n-mcp version 2>/dev/null || echo "0.0.0")
|
||||
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
echo "NPM registry version: $NPM_VERSION"
|
||||
|
||||
# Check if version already exists in npm
|
||||
if [ "$CURRENT_VERSION" = "$NPM_VERSION" ]; then
|
||||
echo "❌ Error: Version $CURRENT_VERSION already published to npm"
|
||||
echo "Please bump the version in package.json before releasing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Simple semver comparison (assumes format: major.minor.patch)
|
||||
# Compare if current version is greater than npm version
|
||||
if [ "$NPM_VERSION" != "0.0.0" ]; then
|
||||
# Sort versions and check if current is not the highest
|
||||
HIGHEST=$(printf '%s\n%s' "$NPM_VERSION" "$CURRENT_VERSION" | sort -V | tail -n1)
|
||||
if [ "$HIGHEST" != "$CURRENT_VERSION" ]; then
|
||||
echo "❌ Error: Version $CURRENT_VERSION is not greater than npm version $NPM_VERSION"
|
||||
echo "Please use a higher version number"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "✅ Version $CURRENT_VERSION is valid (higher than npm version $NPM_VERSION)"
|
||||
|
||||
generate-release-notes:
|
||||
name: Generate Release Notes
|
||||
runs-on: ubuntu-latest
|
||||
needs: detect-version-change
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
outputs:
|
||||
release-notes: ${{ steps.generate.outputs.notes }}
|
||||
has-notes: ${{ steps.generate.outputs.has-notes }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Need full history for git log
|
||||
|
||||
- name: Generate release notes from commits
|
||||
id: generate
|
||||
run: |
|
||||
CURRENT_VERSION="${{ needs.detect-version-change.outputs.new-version }}"
|
||||
CURRENT_TAG="v$CURRENT_VERSION"
|
||||
|
||||
# Get the previous tag (excluding the current tag which doesn't exist yet)
|
||||
PREVIOUS_TAG=$(git tag --sort=-version:refname | grep -v "^$CURRENT_TAG$" | head -1)
|
||||
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
echo "Current tag: $CURRENT_TAG"
|
||||
echo "Previous tag: $PREVIOUS_TAG"
|
||||
|
||||
if [ -z "$PREVIOUS_TAG" ]; then
|
||||
echo "ℹ️ No previous tag found, this might be the first release"
|
||||
|
||||
# Generate initial release notes using script
|
||||
if NOTES=$(node scripts/generate-initial-release-notes.js "$CURRENT_VERSION" 2>/dev/null); then
|
||||
echo "✅ Successfully generated initial release notes for version $CURRENT_VERSION"
|
||||
else
|
||||
echo "⚠️ Could not generate initial release notes for version $CURRENT_VERSION"
|
||||
NOTES="Initial release v$CURRENT_VERSION"
|
||||
fi
|
||||
|
||||
echo "has-notes=true" >> $GITHUB_OUTPUT
|
||||
|
||||
# Use heredoc to properly handle multiline content
|
||||
{
|
||||
echo "notes<<EOF"
|
||||
echo "$NOTES"
|
||||
echo "EOF"
|
||||
} >> $GITHUB_OUTPUT
|
||||
|
||||
else
|
||||
echo "✅ Previous tag found: $PREVIOUS_TAG"
|
||||
|
||||
# Generate release notes between tags
|
||||
if NOTES=$(node scripts/generate-release-notes.js "$PREVIOUS_TAG" "HEAD" 2>/dev/null); then
|
||||
echo "has-notes=true" >> $GITHUB_OUTPUT
|
||||
|
||||
# Use heredoc to properly handle multiline content
|
||||
{
|
||||
echo "notes<<EOF"
|
||||
echo "$NOTES"
|
||||
echo "EOF"
|
||||
} >> $GITHUB_OUTPUT
|
||||
|
||||
echo "✅ Successfully generated release notes from $PREVIOUS_TAG to $CURRENT_TAG"
|
||||
else
|
||||
echo "has-notes=false" >> $GITHUB_OUTPUT
|
||||
echo "notes=Failed to generate release notes for version $CURRENT_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "⚠️ Could not generate release notes for version $CURRENT_VERSION"
|
||||
fi
|
||||
fi
|
||||
|
||||
create-release:
|
||||
name: Create GitHub Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [detect-version-change, generate-release-notes]
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
outputs:
|
||||
release-id: ${{ steps.create.outputs.id }}
|
||||
upload-url: ${{ steps.create.outputs.upload_url }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create Git Tag
|
||||
run: |
|
||||
VERSION="${{ needs.detect-version-change.outputs.new-version }}"
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
# Create annotated tag
|
||||
git tag -a "v$VERSION" -m "Release v$VERSION"
|
||||
git push origin "v$VERSION"
|
||||
|
||||
- name: Create GitHub Release
|
||||
id: create
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
VERSION="${{ needs.detect-version-change.outputs.new-version }}"
|
||||
IS_PRERELEASE="${{ needs.detect-version-change.outputs.is-prerelease }}"
|
||||
|
||||
# Create release body
|
||||
cat > release_body.md << 'EOF'
|
||||
# Release v${{ needs.detect-version-change.outputs.new-version }}
|
||||
|
||||
${{ needs.generate-release-notes.outputs.release-notes }}
|
||||
|
||||
---
|
||||
|
||||
## Installation
|
||||
|
||||
### NPM Package
|
||||
```bash
|
||||
# Install globally
|
||||
npm install -g n8n-mcp
|
||||
|
||||
# Or run directly
|
||||
npx n8n-mcp
|
||||
```
|
||||
|
||||
### Docker
|
||||
```bash
|
||||
# Standard image
|
||||
docker run -p 3000:3000 ghcr.io/czlonkowski/n8n-mcp:v${{ needs.detect-version-change.outputs.new-version }}
|
||||
|
||||
# Railway optimized
|
||||
docker run -p 3000:3000 ghcr.io/czlonkowski/n8n-mcp-railway:v${{ needs.detect-version-change.outputs.new-version }}
|
||||
```
|
||||
|
||||
## Documentation
|
||||
- [Installation Guide](https://github.com/czlonkowski/n8n-mcp#installation)
|
||||
- [Docker Deployment](https://github.com/czlonkowski/n8n-mcp/blob/main/docs/DOCKER_README.md)
|
||||
- [n8n Integration](https://github.com/czlonkowski/n8n-mcp/blob/main/docs/N8N_DEPLOYMENT.md)
|
||||
- [Complete Changelog](https://github.com/czlonkowski/n8n-mcp/blob/main/docs/CHANGELOG.md)
|
||||
|
||||
🤖 *Generated with [Claude Code](https://claude.ai/code)*
|
||||
EOF
|
||||
|
||||
# Create release using gh CLI
|
||||
if [ "$IS_PRERELEASE" = "true" ]; then
|
||||
PRERELEASE_FLAG="--prerelease"
|
||||
else
|
||||
PRERELEASE_FLAG=""
|
||||
fi
|
||||
|
||||
gh release create "v$VERSION" \
|
||||
--title "Release v$VERSION" \
|
||||
--notes-file release_body.md \
|
||||
$PRERELEASE_FLAG
|
||||
|
||||
# Output release info for next jobs
|
||||
RELEASE_ID=$(gh release view "v$VERSION" --json id --jq '.id')
|
||||
echo "id=$RELEASE_ID" >> $GITHUB_OUTPUT
|
||||
echo "upload_url=https://uploads.github.com/repos/${{ github.repository }}/releases/$RELEASE_ID/assets{?name,label}" >> $GITHUB_OUTPUT
|
||||
|
||||
build-and-verify:
|
||||
name: Build and Verify
|
||||
runs-on: ubuntu-latest
|
||||
needs: detect-version-change
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Build project
|
||||
run: npm run build
|
||||
|
||||
# Database is already built and committed during development
|
||||
# Rebuilding here causes segfault due to memory pressure (exit code 139)
|
||||
- name: Verify database exists
|
||||
run: |
|
||||
if [ ! -f "data/nodes.db" ]; then
|
||||
echo "❌ Error: data/nodes.db not found"
|
||||
echo "Please run 'npm run rebuild' locally and commit the database"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Database exists ($(du -h data/nodes.db | cut -f1))"
|
||||
|
||||
# Skip tests - they already passed in PR before merge
|
||||
# Running them again on the same commit adds no safety, only time (~6-7 min)
|
||||
|
||||
- name: Run type checking
|
||||
run: npm run typecheck
|
||||
|
||||
publish-npm:
|
||||
name: Publish to NPM
|
||||
runs-on: ubuntu-latest
|
||||
needs: [detect-version-change, build-and-verify, create-release]
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
registry-url: 'https://registry.npmjs.org'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Build project
|
||||
run: npm run build
|
||||
|
||||
# Database is already built and committed during development
|
||||
- name: Verify database exists
|
||||
run: |
|
||||
if [ ! -f "data/nodes.db" ]; then
|
||||
echo "❌ Error: data/nodes.db not found"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Database exists ($(du -h data/nodes.db | cut -f1))"
|
||||
|
||||
- name: Sync runtime version
|
||||
run: npm run sync:runtime-version
|
||||
|
||||
- name: Prepare package for publishing
|
||||
run: |
|
||||
# Create publish directory
|
||||
PUBLISH_DIR="npm-publish-temp"
|
||||
rm -rf $PUBLISH_DIR
|
||||
mkdir -p $PUBLISH_DIR
|
||||
|
||||
# Copy necessary files
|
||||
cp -r dist $PUBLISH_DIR/
|
||||
cp -r data $PUBLISH_DIR/
|
||||
cp README.md $PUBLISH_DIR/
|
||||
cp LICENSE $PUBLISH_DIR/
|
||||
cp .env.example $PUBLISH_DIR/
|
||||
|
||||
# Use runtime package.json as base
|
||||
cp package.runtime.json $PUBLISH_DIR/package.json
|
||||
|
||||
cd $PUBLISH_DIR
|
||||
|
||||
# Update package.json with complete metadata
|
||||
node -e "
|
||||
const pkg = require('./package.json');
|
||||
pkg.name = 'n8n-mcp';
|
||||
pkg.description = 'Integration between n8n workflow automation and Model Context Protocol (MCP)';
|
||||
pkg.main = 'dist/index.js';
|
||||
pkg.types = 'dist/index.d.ts';
|
||||
pkg.exports = {
|
||||
'.': {
|
||||
types: './dist/index.d.ts',
|
||||
require: './dist/index.js',
|
||||
import: './dist/index.js'
|
||||
}
|
||||
};
|
||||
pkg.bin = { 'n8n-mcp': './dist/mcp/index.js' };
|
||||
pkg.repository = { type: 'git', url: 'git+https://github.com/czlonkowski/n8n-mcp.git' };
|
||||
pkg.keywords = ['n8n', 'mcp', 'model-context-protocol', 'ai', 'workflow', 'automation'];
|
||||
pkg.author = 'Romuald Czlonkowski @ www.aiadvisors.pl/en';
|
||||
pkg.license = 'MIT';
|
||||
pkg.bugs = { url: 'https://github.com/czlonkowski/n8n-mcp/issues' };
|
||||
pkg.homepage = 'https://github.com/czlonkowski/n8n-mcp#readme';
|
||||
pkg.files = ['dist/**/*', 'data/nodes.db', '.env.example', 'README.md', 'LICENSE'];
|
||||
delete pkg.private;
|
||||
require('fs').writeFileSync('./package.json', JSON.stringify(pkg, null, 2));
|
||||
"
|
||||
|
||||
echo "Package prepared for publishing:"
|
||||
echo "Name: $(node -e "console.log(require('./package.json').name)")"
|
||||
echo "Version: $(node -e "console.log(require('./package.json').version)")"
|
||||
|
||||
- name: Publish to NPM with retry
|
||||
uses: nick-invision/retry@v2
|
||||
with:
|
||||
timeout_minutes: 5
|
||||
max_attempts: 3
|
||||
command: |
|
||||
cd npm-publish-temp
|
||||
npm publish --access public
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
- name: Clean up
|
||||
if: always()
|
||||
run: rm -rf npm-publish-temp
|
||||
|
||||
build-docker:
|
||||
name: Build and Push Docker Images
|
||||
runs-on: ubuntu-latest
|
||||
needs: [detect-version-change, build-and-verify]
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
lfs: true
|
||||
|
||||
- name: Check disk space
|
||||
run: |
|
||||
echo "Disk usage before Docker build:"
|
||||
df -h
|
||||
|
||||
# Check available space (require at least 2GB)
|
||||
AVAILABLE_GB=$(df / --output=avail --block-size=1G | tail -1)
|
||||
if [ "$AVAILABLE_GB" -lt 2 ]; then
|
||||
echo "❌ Insufficient disk space: ${AVAILABLE_GB}GB available, 2GB required"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Sufficient disk space: ${AVAILABLE_GB}GB available"
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata for standard image
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}},value=v${{ needs.detect-version-change.outputs.new-version }}
|
||||
type=semver,pattern={{major}}.{{minor}},value=v${{ needs.detect-version-change.outputs.new-version }}
|
||||
type=semver,pattern={{major}},value=v${{ needs.detect-version-change.outputs.new-version }}
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- name: Build and push standard Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Verify multi-arch manifest for latest tag
|
||||
run: |
|
||||
echo "Verifying multi-arch manifest for latest tag..."
|
||||
|
||||
# Retry with exponential backoff (registry propagation can take time)
|
||||
MAX_ATTEMPTS=5
|
||||
ATTEMPT=1
|
||||
WAIT_TIME=2
|
||||
|
||||
while [ $ATTEMPT -le $MAX_ATTEMPTS ]; do
|
||||
echo "Attempt $ATTEMPT of $MAX_ATTEMPTS..."
|
||||
|
||||
MANIFEST=$(docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest 2>&1 || true)
|
||||
|
||||
# Check for both platforms
|
||||
if echo "$MANIFEST" | grep -q "linux/amd64" && echo "$MANIFEST" | grep -q "linux/arm64"; then
|
||||
echo "✅ Multi-arch manifest verified: both amd64 and arm64 present"
|
||||
echo "$MANIFEST"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $ATTEMPT -lt $MAX_ATTEMPTS ]; then
|
||||
echo "⏳ Registry still propagating, waiting ${WAIT_TIME}s before retry..."
|
||||
sleep $WAIT_TIME
|
||||
WAIT_TIME=$((WAIT_TIME * 2)) # Exponential backoff: 2s, 4s, 8s, 16s
|
||||
fi
|
||||
|
||||
ATTEMPT=$((ATTEMPT + 1))
|
||||
done
|
||||
|
||||
echo "❌ ERROR: Multi-arch manifest incomplete after $MAX_ATTEMPTS attempts!"
|
||||
echo "$MANIFEST"
|
||||
exit 1
|
||||
|
||||
- name: Verify multi-arch manifest for version tag
|
||||
run: |
|
||||
VERSION="${{ needs.detect-version-change.outputs.new-version }}"
|
||||
echo "Verifying multi-arch manifest for version tag :$VERSION (without 'v' prefix)..."
|
||||
|
||||
# Retry with exponential backoff (registry propagation can take time)
|
||||
MAX_ATTEMPTS=5
|
||||
ATTEMPT=1
|
||||
WAIT_TIME=2
|
||||
|
||||
while [ $ATTEMPT -le $MAX_ATTEMPTS ]; do
|
||||
echo "Attempt $ATTEMPT of $MAX_ATTEMPTS..."
|
||||
|
||||
MANIFEST=$(docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:$VERSION 2>&1 || true)
|
||||
|
||||
# Check for both platforms
|
||||
if echo "$MANIFEST" | grep -q "linux/amd64" && echo "$MANIFEST" | grep -q "linux/arm64"; then
|
||||
echo "✅ Multi-arch manifest verified for $VERSION: both amd64 and arm64 present"
|
||||
echo "$MANIFEST"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $ATTEMPT -lt $MAX_ATTEMPTS ]; then
|
||||
echo "⏳ Registry still propagating, waiting ${WAIT_TIME}s before retry..."
|
||||
sleep $WAIT_TIME
|
||||
WAIT_TIME=$((WAIT_TIME * 2)) # Exponential backoff: 2s, 4s, 8s, 16s
|
||||
fi
|
||||
|
||||
ATTEMPT=$((ATTEMPT + 1))
|
||||
done
|
||||
|
||||
echo "❌ ERROR: Multi-arch manifest incomplete for version $VERSION after $MAX_ATTEMPTS attempts!"
|
||||
echo "$MANIFEST"
|
||||
exit 1
|
||||
|
||||
- name: Extract metadata for Railway image
|
||||
id: meta-railway
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-railway
|
||||
tags: |
|
||||
type=semver,pattern={{version}},value=v${{ needs.detect-version-change.outputs.new-version }}
|
||||
type=semver,pattern={{major}}.{{minor}},value=v${{ needs.detect-version-change.outputs.new-version }}
|
||||
type=semver,pattern={{major}},value=v${{ needs.detect-version-change.outputs.new-version }}
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- name: Build and push Railway Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile.railway
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: ${{ steps.meta-railway.outputs.tags }}
|
||||
labels: ${{ steps.meta-railway.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
update-documentation:
|
||||
name: Update Documentation
|
||||
runs-on: ubuntu-latest
|
||||
needs: [detect-version-change, create-release, publish-npm, build-docker]
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true' && !failure()
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Update version badges in README
|
||||
run: |
|
||||
VERSION="${{ needs.detect-version-change.outputs.new-version }}"
|
||||
|
||||
# Update README version badges
|
||||
if [ -f "README.md" ]; then
|
||||
# Update npm version badge
|
||||
sed -i.bak "s|npm/v/n8n-mcp/[^)]*|npm/v/n8n-mcp/$VERSION|g" README.md
|
||||
|
||||
# Update any other version references
|
||||
sed -i.bak "s|version-[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*|version-$VERSION|g" README.md
|
||||
|
||||
# Clean up backup file
|
||||
rm -f README.md.bak
|
||||
|
||||
echo "✅ Updated version badges in README.md to $VERSION"
|
||||
fi
|
||||
|
||||
- name: Commit documentation updates
|
||||
env:
|
||||
VERSION: ${{ needs.detect-version-change.outputs.new-version }}
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
if git diff --quiet; then
|
||||
echo "No documentation changes to commit"
|
||||
else
|
||||
git add README.md
|
||||
git commit -m "docs: update version badges to v${VERSION}"
|
||||
git push
|
||||
echo "✅ Committed documentation updates"
|
||||
fi
|
||||
|
||||
notify-completion:
|
||||
name: Notify Release Completion
|
||||
runs-on: ubuntu-latest
|
||||
needs: [detect-version-change, create-release, publish-npm, build-docker, update-documentation]
|
||||
if: always() && needs.detect-version-change.outputs.version-changed == 'true'
|
||||
steps:
|
||||
- name: Create release summary
|
||||
run: |
|
||||
VERSION="${{ needs.detect-version-change.outputs.new-version }}"
|
||||
RELEASE_URL="https://github.com/${{ github.repository }}/releases/tag/v$VERSION"
|
||||
|
||||
echo "## 🎉 Release v$VERSION Published Successfully!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### ✅ Completed Tasks:" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
# Check job statuses
|
||||
if [ "${{ needs.create-release.result }}" = "success" ]; then
|
||||
echo "- ✅ GitHub Release created: [$RELEASE_URL]($RELEASE_URL)" >> $GITHUB_STEP_SUMMARY
|
||||
else
|
||||
echo "- ❌ GitHub Release creation failed" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
if [ "${{ needs.publish-npm.result }}" = "success" ]; then
|
||||
echo "- ✅ NPM package published: [npmjs.com/package/n8n-mcp](https://www.npmjs.com/package/n8n-mcp)" >> $GITHUB_STEP_SUMMARY
|
||||
else
|
||||
echo "- ❌ NPM publishing failed" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
if [ "${{ needs.build-docker.result }}" = "success" ]; then
|
||||
echo "- ✅ Docker images built and pushed" >> $GITHUB_STEP_SUMMARY
|
||||
echo " - Standard: \`ghcr.io/czlonkowski/n8n-mcp:v$VERSION\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo " - Railway: \`ghcr.io/czlonkowski/n8n-mcp-railway:v$VERSION\`" >> $GITHUB_STEP_SUMMARY
|
||||
else
|
||||
echo "- ❌ Docker image building failed" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
if [ "${{ needs.update-documentation.result }}" = "success" ]; then
|
||||
echo "- ✅ Documentation updated" >> $GITHUB_STEP_SUMMARY
|
||||
else
|
||||
echo "- ⚠️ Documentation update skipped or failed" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 📦 Installation:" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "\`\`\`bash" >> $GITHUB_STEP_SUMMARY
|
||||
echo "# NPM" >> $GITHUB_STEP_SUMMARY
|
||||
echo "npx n8n-mcp" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "# Docker" >> $GITHUB_STEP_SUMMARY
|
||||
echo "docker run -p 3000:3000 ghcr.io/czlonkowski/n8n-mcp:v$VERSION" >> $GITHUB_STEP_SUMMARY
|
||||
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
echo "🎉 Release automation completed for v$VERSION!"
|
||||
353
.github/workflows/test.yml
vendored
Normal file
353
.github/workflows/test.yml
vendored
Normal file
@@ -0,0 +1,353 @@
|
||||
name: Test Suite
|
||||
on:
|
||||
push:
|
||||
branches: [main, feat/comprehensive-testing-suite]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- '**.txt'
|
||||
- 'docs/**'
|
||||
- 'examples/**'
|
||||
- '.github/FUNDING.yml'
|
||||
- '.github/ISSUE_TEMPLATE/**'
|
||||
- '.github/pull_request_template.md'
|
||||
- '.gitignore'
|
||||
- 'LICENSE*'
|
||||
- 'ATTRIBUTION.md'
|
||||
- 'SECURITY.md'
|
||||
- 'CODE_OF_CONDUCT.md'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- '**.txt'
|
||||
- 'docs/**'
|
||||
- 'examples/**'
|
||||
- '.github/FUNDING.yml'
|
||||
- '.github/ISSUE_TEMPLATE/**'
|
||||
- '.github/pull_request_template.md'
|
||||
- '.gitignore'
|
||||
- 'LICENSE*'
|
||||
- 'ATTRIBUTION.md'
|
||||
- 'SECURITY.md'
|
||||
- 'CODE_OF_CONDUCT.md'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: write
|
||||
checks: write
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15 # Increased from 10 to accommodate larger database with community nodes
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
# Verify test environment setup
|
||||
- name: Verify test environment
|
||||
run: |
|
||||
echo "Current directory: $(pwd)"
|
||||
echo "Checking for .env.test file:"
|
||||
ls -la .env.test || echo ".env.test not found!"
|
||||
echo "First few lines of .env.test:"
|
||||
head -5 .env.test || echo "Cannot read .env.test"
|
||||
|
||||
# Run unit tests first (without MSW)
|
||||
- name: Run unit tests with coverage
|
||||
run: npm run test:unit -- --coverage --coverage.thresholds.lines=0 --coverage.thresholds.functions=0 --coverage.thresholds.branches=0 --coverage.thresholds.statements=0 --reporter=default --reporter=junit
|
||||
env:
|
||||
CI: true
|
||||
|
||||
# Run integration tests separately (with MSW setup)
|
||||
- name: Run integration tests
|
||||
run: npm run test:integration -- --reporter=default --reporter=junit
|
||||
env:
|
||||
CI: true
|
||||
N8N_API_URL: ${{ secrets.N8N_API_URL }}
|
||||
N8N_API_KEY: ${{ secrets.N8N_API_KEY }}
|
||||
N8N_TEST_WEBHOOK_GET_URL: ${{ secrets.N8N_TEST_WEBHOOK_GET_URL }}
|
||||
N8N_TEST_WEBHOOK_POST_URL: ${{ secrets.N8N_TEST_WEBHOOK_POST_URL }}
|
||||
N8N_TEST_WEBHOOK_PUT_URL: ${{ secrets.N8N_TEST_WEBHOOK_PUT_URL }}
|
||||
N8N_TEST_WEBHOOK_DELETE_URL: ${{ secrets.N8N_TEST_WEBHOOK_DELETE_URL }}
|
||||
|
||||
# Generate test summary
|
||||
- name: Generate test summary
|
||||
if: always()
|
||||
run: node scripts/generate-test-summary.js
|
||||
|
||||
# Generate detailed reports
|
||||
- name: Generate detailed reports
|
||||
if: always()
|
||||
run: node scripts/generate-detailed-reports.js
|
||||
|
||||
# Upload test results artifacts
|
||||
- name: Upload test results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-results-${{ github.run_number }}-${{ github.run_attempt }}
|
||||
path: |
|
||||
test-results/
|
||||
test-summary.md
|
||||
test-reports/
|
||||
retention-days: 30
|
||||
if-no-files-found: warn
|
||||
|
||||
# Upload coverage artifacts
|
||||
- name: Upload coverage reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-${{ github.run_number }}-${{ github.run_attempt }}
|
||||
path: |
|
||||
coverage/
|
||||
retention-days: 30
|
||||
if-no-files-found: warn
|
||||
|
||||
# Upload coverage to Codecov
|
||||
- name: Upload coverage to Codecov
|
||||
if: always()
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./coverage/lcov.info
|
||||
flags: unittests
|
||||
name: codecov-umbrella
|
||||
fail_ci_if_error: false
|
||||
verbose: true
|
||||
|
||||
# Run linting
|
||||
- name: Run linting
|
||||
run: npm run lint
|
||||
|
||||
# Run type checking
|
||||
- name: Run type checking
|
||||
run: npm run typecheck
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks
|
||||
id: benchmarks
|
||||
run: npm run benchmark:ci
|
||||
continue-on-error: true
|
||||
|
||||
# Upload benchmark results
|
||||
- name: Upload benchmark results
|
||||
if: always() && steps.benchmarks.outcome != 'skipped'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: benchmark-results-${{ github.run_number }}-${{ github.run_attempt }}
|
||||
path: |
|
||||
benchmark-results.json
|
||||
retention-days: 30
|
||||
if-no-files-found: warn
|
||||
|
||||
# Create test report comment for PRs
|
||||
- name: Create test report comment
|
||||
if: github.event_name == 'pull_request' && always()
|
||||
uses: actions/github-script@v7
|
||||
continue-on-error: true
|
||||
with:
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
let summary = '## Test Results\n\nTest summary generation failed.';
|
||||
|
||||
try {
|
||||
if (fs.existsSync('test-summary.md')) {
|
||||
summary = fs.readFileSync('test-summary.md', 'utf8');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error reading test summary:', error);
|
||||
}
|
||||
|
||||
try {
|
||||
// Find existing comment
|
||||
const { data: comments } = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
});
|
||||
|
||||
const botComment = comments.find(comment =>
|
||||
comment.user.type === 'Bot' &&
|
||||
comment.body.includes('## Test Results')
|
||||
);
|
||||
|
||||
if (botComment) {
|
||||
// Update existing comment
|
||||
await github.rest.issues.updateComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
comment_id: botComment.id,
|
||||
body: summary
|
||||
});
|
||||
} else {
|
||||
// Create new comment
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: summary
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to create/update PR comment:', error.message);
|
||||
console.log('This is likely due to insufficient permissions for external PRs.');
|
||||
console.log('Test results have been saved to the job summary instead.');
|
||||
}
|
||||
|
||||
# Generate job summary
|
||||
- name: Generate job summary
|
||||
if: always()
|
||||
run: |
|
||||
echo "# Test Run Summary" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
if [ -f test-summary.md ]; then
|
||||
cat test-summary.md >> $GITHUB_STEP_SUMMARY
|
||||
else
|
||||
echo "Test summary generation failed." >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "## 📥 Download Artifacts" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- [Test Results](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- [Coverage Report](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- [Benchmark Results](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
# Store test metadata
|
||||
- name: Store test metadata
|
||||
if: always()
|
||||
run: |
|
||||
cat > test-metadata.json << EOF
|
||||
{
|
||||
"run_id": "${{ github.run_id }}",
|
||||
"run_number": "${{ github.run_number }}",
|
||||
"run_attempt": "${{ github.run_attempt }}",
|
||||
"sha": "${{ github.sha }}",
|
||||
"ref": "${{ github.ref }}",
|
||||
"event_name": "${{ github.event_name }}",
|
||||
"repository": "${{ github.repository }}",
|
||||
"actor": "${{ github.actor }}",
|
||||
"timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
||||
"node_version": "$(node --version)",
|
||||
"npm_version": "$(npm --version)"
|
||||
}
|
||||
EOF
|
||||
|
||||
- name: Upload test metadata
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-metadata-${{ github.run_number }}-${{ github.run_attempt }}
|
||||
path: test-metadata.json
|
||||
retention-days: 30
|
||||
|
||||
# Separate job to process and publish test results
|
||||
publish-results:
|
||||
needs: test
|
||||
runs-on: ubuntu-latest
|
||||
if: always()
|
||||
permissions:
|
||||
checks: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# Download all artifacts
|
||||
- name: Download all artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
# Publish test results as checks
|
||||
- name: Publish test results
|
||||
uses: dorny/test-reporter@v1
|
||||
if: always()
|
||||
continue-on-error: true
|
||||
with:
|
||||
name: Test Results
|
||||
path: 'artifacts/test-results-*/test-results/junit.xml'
|
||||
reporter: java-junit
|
||||
fail-on-error: false
|
||||
fail-on-empty: false
|
||||
|
||||
# Create a combined artifact with all results
|
||||
- name: Create combined results artifact
|
||||
if: always()
|
||||
run: |
|
||||
mkdir -p combined-results
|
||||
cp -r artifacts/* combined-results/ 2>/dev/null || true
|
||||
|
||||
# Create index file
|
||||
cat > combined-results/index.html << 'EOF'
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>n8n-mcp Test Results</title>
|
||||
<style>
|
||||
body { font-family: Arial, sans-serif; margin: 40px; }
|
||||
h1 { color: #333; }
|
||||
.section { margin: 20px 0; padding: 20px; border: 1px solid #ddd; border-radius: 5px; }
|
||||
a { color: #0066cc; text-decoration: none; }
|
||||
a:hover { text-decoration: underline; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>n8n-mcp Test Results</h1>
|
||||
<div class="section">
|
||||
<h2>Test Reports</h2>
|
||||
<ul>
|
||||
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-reports/report.html">📊 Detailed HTML Report</a></li>
|
||||
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-results/html/index.html">📈 Vitest HTML Report</a></li>
|
||||
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-reports/report.md">📄 Markdown Report</a></li>
|
||||
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-summary.md">📝 PR Summary</a></li>
|
||||
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-results/junit.xml">🔧 JUnit XML</a></li>
|
||||
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-results/results.json">🔢 JSON Results</a></li>
|
||||
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-reports/report.json">📊 Full JSON Report</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="section">
|
||||
<h2>Coverage Reports</h2>
|
||||
<ul>
|
||||
<li><a href="coverage-${{ github.run_number }}-${{ github.run_attempt }}/html/index.html">HTML Coverage Report</a></li>
|
||||
<li><a href="coverage-${{ github.run_number }}-${{ github.run_attempt }}/lcov.info">LCOV Report</a></li>
|
||||
<li><a href="coverage-${{ github.run_number }}-${{ github.run_attempt }}/coverage-summary.json">Coverage Summary JSON</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="section">
|
||||
<h2>Benchmark Results</h2>
|
||||
<ul>
|
||||
<li><a href="benchmark-results-${{ github.run_number }}-${{ github.run_attempt }}/benchmark-results.json">Benchmark Results JSON</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="section">
|
||||
<h2>Metadata</h2>
|
||||
<ul>
|
||||
<li><a href="test-metadata-${{ github.run_number }}-${{ github.run_attempt }}/test-metadata.json">Test Run Metadata</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="section">
|
||||
<p><em>Generated at $(date -u +%Y-%m-%dT%H:%M:%SZ)</em></p>
|
||||
<p><em>Run: #${{ github.run_number }} | SHA: ${{ github.sha }}</em></p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
EOF
|
||||
|
||||
- name: Upload combined results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: all-test-results-${{ github.run_number }}
|
||||
path: combined-results/
|
||||
retention-days: 90
|
||||
34
.gitignore
vendored
34
.gitignore
vendored
@@ -39,6 +39,26 @@ logs/
|
||||
# Testing
|
||||
coverage/
|
||||
.nyc_output/
|
||||
test-results/
|
||||
test-reports/
|
||||
test-summary.md
|
||||
test-metadata.json
|
||||
benchmark-results.json
|
||||
benchmark-results*.json
|
||||
benchmark-summary.json
|
||||
coverage-report.json
|
||||
benchmark-comparison.md
|
||||
benchmark-comparison.json
|
||||
benchmark-current.json
|
||||
benchmark-baseline.json
|
||||
tests/data/*.db
|
||||
tests/fixtures/*.tmp
|
||||
tests/test-results/
|
||||
.test-dbs/
|
||||
junit.xml
|
||||
*.test.db
|
||||
test-*.db
|
||||
.vitest/
|
||||
|
||||
# TypeScript
|
||||
*.tsbuildinfo
|
||||
@@ -69,11 +89,19 @@ docker-compose.override.yml
|
||||
temp/
|
||||
tmp/
|
||||
|
||||
# Batch processing error files (may contain API tokens from templates)
|
||||
docs/batch_*.jsonl
|
||||
**/batch_*_error.jsonl
|
||||
|
||||
# Local documentation and analysis files
|
||||
docs/local/
|
||||
|
||||
# Database files
|
||||
# Database files - nodes.db is now tracked directly
|
||||
# data/*.db
|
||||
data/*.db-journal
|
||||
data/*.db.bak
|
||||
data/*.db.backup
|
||||
!data/.gitkeep
|
||||
!data/nodes.db
|
||||
|
||||
@@ -106,3 +134,9 @@ n8n-mcp-wrapper.sh
|
||||
|
||||
# Package tarballs
|
||||
*.tgz
|
||||
|
||||
# MCP configuration files
|
||||
.mcp.json
|
||||
|
||||
# Telemetry configuration (user-specific)
|
||||
~/.n8n-mcp/
|
||||
|
||||
209
ANALYSIS_QUICK_REFERENCE.md
Normal file
209
ANALYSIS_QUICK_REFERENCE.md
Normal file
@@ -0,0 +1,209 @@
|
||||
# N8N-MCP Validation Analysis: Quick Reference
|
||||
|
||||
**Analysis Date**: November 8, 2025 | **Data Period**: 90 days | **Sample Size**: 29,218 events
|
||||
|
||||
---
|
||||
|
||||
## The Core Finding
|
||||
|
||||
**Validation is working perfectly. Guidance is the problem.**
|
||||
|
||||
- 29,218 validation events successfully prevented bad deployments
|
||||
- 100% of agents fix errors same-day (proving feedback works)
|
||||
- 12.6% error rate for advanced users (who attempt complex workflows)
|
||||
- High error volume = high usage, not broken system
|
||||
|
||||
---
|
||||
|
||||
## Top 3 Problem Areas (75% of errors)
|
||||
|
||||
| Area | Errors | Root Cause | Quick Fix |
|
||||
|------|--------|-----------|-----------|
|
||||
| **Workflow Structure** | 1,268 (26%) | JSON malformation | Better error messages with examples |
|
||||
| **Connections** | 676 (14%) | Syntax unintuitive | Create connections guide with diagrams |
|
||||
| **Required Fields** | 378 (8%) | Not marked upfront | Add "⚠️ REQUIRED" to tool responses |
|
||||
|
||||
---
|
||||
|
||||
## Problem Nodes (By Frequency)
|
||||
|
||||
```
|
||||
Webhook/Trigger ......... 127 failures (40 users)
|
||||
Slack .................. 73 failures (2 users)
|
||||
AI Agent ............... 36 failures (20 users)
|
||||
HTTP Request ........... 31 failures (13 users)
|
||||
OpenAI ................. 35 failures (8 users)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Top 5 Validation Errors
|
||||
|
||||
1. **"Duplicate node ID: undefined"** (179)
|
||||
- Fix: Point to exact location + show example format
|
||||
|
||||
2. **"Single-node workflows only valid for webhooks"** (58)
|
||||
- Fix: Create webhook guide explaining rule
|
||||
|
||||
3. **"responseNode requires onError: continueRegularOutput"** (57)
|
||||
- Fix: Same guide + inline error context
|
||||
|
||||
4. **"Required property X cannot be empty"** (25)
|
||||
- Fix: Mark required fields before validation
|
||||
|
||||
5. **"Duplicate node name: undefined"** (61)
|
||||
- Fix: Related to structural issues, same solution as #1
|
||||
|
||||
---
|
||||
|
||||
## Success Indicators
|
||||
|
||||
✓ **Agents learn from errors**: 100% same-day correction rate
|
||||
✓ **Validation catches issues**: Prevents bad deployments
|
||||
✓ **Feedback is clear**: Quick fixes show error messages work
|
||||
✓ **No systemic failures**: No "unfixable" errors
|
||||
|
||||
---
|
||||
|
||||
## What Works Well
|
||||
|
||||
- Error messages lead to immediate corrections
|
||||
- Agents retry and succeed same-day
|
||||
- Validation prevents broken workflows
|
||||
- 9,021 users actively using system
|
||||
|
||||
---
|
||||
|
||||
## What Needs Improvement
|
||||
|
||||
1. Required fields not marked in tool responses
|
||||
2. Error messages don't show valid options for enums
|
||||
3. Workflow structure documentation lacks examples
|
||||
4. Connection syntax unintuitive/undocumented
|
||||
5. Some error messages too generic
|
||||
|
||||
---
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1 (2 weeks): Quick Wins
|
||||
- Enhanced error messages (location + example)
|
||||
- Required field markers in tools
|
||||
- Webhook configuration guide
|
||||
- **Expected Impact**: 25-30% failure reduction
|
||||
|
||||
### Phase 2 (2 weeks): Documentation
|
||||
- Enum value suggestions in validation
|
||||
- Workflow connections guide
|
||||
- Error handler configuration guide
|
||||
- AI Agent validation improvements
|
||||
- **Expected Impact**: Additional 15-20% reduction
|
||||
|
||||
### Phase 3 (2 weeks): Advanced Features
|
||||
- Improved search with config hints
|
||||
- Node type fuzzy matching
|
||||
- KPI tracking setup
|
||||
- Test coverage
|
||||
- **Expected Impact**: Additional 10-15% reduction
|
||||
|
||||
**Total Impact**: 50-65% failure reduction (target: 6-7% error rate)
|
||||
|
||||
---
|
||||
|
||||
## Key Metrics
|
||||
|
||||
| Metric | Current | Target | Timeline |
|
||||
|--------|---------|--------|----------|
|
||||
| Validation failure rate | 12.6% | 6-7% | 6 weeks |
|
||||
| First-attempt success | ~77% | 85%+ | 6 weeks |
|
||||
| Retry success | 100% | 100% | N/A |
|
||||
| Webhook failures | 127 | <30 | Week 2 |
|
||||
| Connection errors | 676 | <270 | Week 4 |
|
||||
|
||||
---
|
||||
|
||||
## Files Delivered
|
||||
|
||||
1. **VALIDATION_ANALYSIS_REPORT.md** (27KB)
|
||||
- Complete analysis with 16 SQL queries
|
||||
- Detailed findings by category
|
||||
- 8 actionable recommendations
|
||||
|
||||
2. **VALIDATION_ANALYSIS_SUMMARY.md** (13KB)
|
||||
- Executive summary (one-page)
|
||||
- Key metrics scorecard
|
||||
- Top recommendations with ROI
|
||||
|
||||
3. **IMPLEMENTATION_ROADMAP.md** (4.3KB)
|
||||
- 6-week implementation plan
|
||||
- Phase-by-phase breakdown
|
||||
- Code locations and effort estimates
|
||||
|
||||
4. **ANALYSIS_QUICK_REFERENCE.md** (this file)
|
||||
- Quick lookup reference
|
||||
- Top problems at a glance
|
||||
- Decision-making summary
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **Week 1**: Review analysis + get team approval
|
||||
2. **Week 2**: Start Phase 1 (error messages + markers)
|
||||
3. **Week 4**: Deploy Phase 1 + start Phase 2
|
||||
4. **Week 6**: Deploy Phase 2 + start Phase 3
|
||||
5. **Week 8**: Deploy Phase 3 + measure impact
|
||||
6. **Week 9+**: Monitor KPIs + iterate
|
||||
|
||||
---
|
||||
|
||||
## Key Recommendations Priority
|
||||
|
||||
### HIGH (Do First - Week 1-2)
|
||||
1. Enhance structure error messages
|
||||
2. Add required field markers to tools
|
||||
3. Create webhook configuration guide
|
||||
|
||||
### MEDIUM (Do Next - Week 3-4)
|
||||
4. Add enum suggestions to validation responses
|
||||
5. Create workflow connections guide
|
||||
6. Add AI Agent node validation
|
||||
|
||||
### LOW (Do Later - Week 5-6)
|
||||
7. Enhance search with config hints
|
||||
8. Build fuzzy node matcher
|
||||
9. Setup KPI tracking
|
||||
|
||||
---
|
||||
|
||||
## Discussion Points
|
||||
|
||||
**Q: Why don't we just weaken validation?**
|
||||
A: Validation prevents 29,218 bad deployments. That's its job. We improve guidance instead.
|
||||
|
||||
**Q: Are agents really learning from errors?**
|
||||
A: Yes, 100% same-day recovery across 661 user-date pairs with errors.
|
||||
|
||||
**Q: Why do documentation readers have higher error rates?**
|
||||
A: They attempt more complex workflows (6.8x more attempts). Success rate is still 87.4%.
|
||||
|
||||
**Q: Which node needs the most help?**
|
||||
A: Webhook/Trigger configuration (127 failures). Most urgent fix.
|
||||
|
||||
**Q: Can we hit 50% reduction in 6 weeks?**
|
||||
A: Yes, analysis shows 50-65% reduction is achievable with these changes.
|
||||
|
||||
---
|
||||
|
||||
## Contact & Questions
|
||||
|
||||
For detailed information:
|
||||
- Full analysis: `VALIDATION_ANALYSIS_REPORT.md`
|
||||
- Executive summary: `VALIDATION_ANALYSIS_SUMMARY.md`
|
||||
- Implementation plan: `IMPLEMENTATION_ROADMAP.md`
|
||||
|
||||
---
|
||||
|
||||
**Report Status**: Complete and Ready for Action
|
||||
**Confidence Level**: High (9,021 users, 29,218 events, comprehensive analysis)
|
||||
**Generated**: November 8, 2025
|
||||
167
CHANGELOG.md
Normal file
167
CHANGELOG.md
Normal file
@@ -0,0 +1,167 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [2.32.1] - 2026-01-08
|
||||
|
||||
### Fixed
|
||||
|
||||
- **Community node case sensitivity bug**: Fixed `extractNodeNameFromPackage` to use lowercase node names, matching n8n's community node convention (e.g., `chatwoot` instead of `Chatwoot`). This resolves validation failures for community nodes with incorrect casing.
|
||||
- **Case-insensitive node lookup**: Added fallback in `getNode` to handle case differences between stored and requested node types for better robustness.
|
||||
|
||||
## [2.32.0] - 2026-01-07
|
||||
|
||||
### Added
|
||||
|
||||
**Community Nodes Support (Issues #23, #490)**
|
||||
|
||||
Added comprehensive support for n8n community nodes, expanding the node database from 537 core nodes to 1,084 total nodes (537 core + 547 community).
|
||||
|
||||
**New Features:**
|
||||
- **547 community nodes** indexed (301 verified + 246 popular npm packages)
|
||||
- **`source` filter** for `search_nodes`: Filter by `all`, `core`, `community`, or `verified`
|
||||
- **Community metadata** in search results: `isCommunity`, `isVerified`, `authorName`, `npmDownloads`
|
||||
- **Full schema support** for verified community nodes (no additional parsing needed)
|
||||
|
||||
**Data Sources:**
|
||||
- Verified nodes fetched from n8n Strapi API (`api.n8n.io/api/community-nodes`)
|
||||
- Popular npm packages from npm registry (keyword: `n8n-community-node-package`)
|
||||
|
||||
**New CLI Commands:**
|
||||
```bash
|
||||
npm run fetch:community # Full rebuild (verified + top 100 npm)
|
||||
npm run fetch:community:verified # Verified nodes only (fast)
|
||||
npm run fetch:community:update # Incremental update (skip existing)
|
||||
```
|
||||
|
||||
**Example Usage:**
|
||||
```javascript
|
||||
// Search only community nodes
|
||||
search_nodes({query: "scraping", source: "community"})
|
||||
|
||||
// Search verified community nodes
|
||||
search_nodes({query: "pdf", source: "verified"})
|
||||
|
||||
// Results include community metadata
|
||||
{
|
||||
nodeType: "n8n-nodes-brightdata.brightData",
|
||||
displayName: "BrightData",
|
||||
isCommunity: true,
|
||||
isVerified: true,
|
||||
authorName: "brightdata.com",
|
||||
npmDownloads: 1234
|
||||
}
|
||||
```
|
||||
|
||||
**Files Added:**
|
||||
- `src/community/community-node-service.ts` - Business logic for syncing community nodes
|
||||
- `src/community/community-node-fetcher.ts` - API integration for Strapi and npm
|
||||
- `src/scripts/fetch-community-nodes.ts` - CLI script for fetching community nodes
|
||||
|
||||
**Files Modified:**
|
||||
- `src/database/schema.sql` - Added community columns and indexes
|
||||
- `src/database/node-repository.ts` - Extended for community node fields
|
||||
- `src/mcp/tools.ts` - Added `source` parameter to `search_nodes`
|
||||
- `src/mcp/server.ts` - Added source filtering and community metadata to results
|
||||
- `src/mcp/tool-docs/discovery/search-nodes.ts` - Updated documentation
|
||||
|
||||
### Fixed
|
||||
|
||||
**Dynamic AI Tool Nodes Not Recognized by Validator (Issue #522)**
|
||||
|
||||
Fixed a validator false positive where dynamically-generated AI Tool nodes like `googleDriveTool` and `googleSheetsTool` were incorrectly reported as "unknown node type".
|
||||
|
||||
**Root Cause:** n8n creates Tool variants at runtime when ANY node is connected to an AI Agent's tool slot (e.g., `googleDrive` → `googleDriveTool`). These dynamic nodes don't exist in npm packages, so the MCP database couldn't discover them during rebuild.
|
||||
|
||||
**Solution:** Added validation-time inference that checks if the base node exists when a `*Tool` node type is not found. If the base node exists, the Tool variant is treated as valid with an informative warning.
|
||||
|
||||
**Changes:**
|
||||
- `workflow-validator.ts`: Added inference logic for dynamic Tool variants
|
||||
- `node-similarity-service.ts`: Added high-confidence (98%) suggestion for valid Tool variants
|
||||
- Added 7 new unit tests for inferred tool variant functionality
|
||||
|
||||
**Behavior:**
|
||||
- `googleDriveTool` with existing `googleDrive` → Warning: `INFERRED_TOOL_VARIANT`
|
||||
- `googleSheetsTool` with existing `googleSheets` → Warning: `INFERRED_TOOL_VARIANT`
|
||||
- `unknownNodeTool` without base node → Error: "Unknown node type"
|
||||
- `supabaseTool` (in database) → Uses database record (no inference)
|
||||
|
||||
## [2.31.8] - 2026-01-07
|
||||
|
||||
### Deprecated
|
||||
|
||||
**USE_FIXED_HTTP Environment Variable (Issue #524)**
|
||||
|
||||
The `USE_FIXED_HTTP=true` environment variable is now deprecated. The fixed HTTP implementation does not support SSE (Server-Sent Events) streaming required by clients like OpenAI Codex.
|
||||
|
||||
**What changed:**
|
||||
- `SingleSessionHTTPServer` is now the default HTTP implementation
|
||||
- Removed `USE_FIXED_HTTP` from Docker, Railway, and documentation examples
|
||||
- Added deprecation warnings when `USE_FIXED_HTTP=true` is detected
|
||||
- Renamed npm script to `start:http:fixed:deprecated`
|
||||
|
||||
**Migration:** Simply unset `USE_FIXED_HTTP` or remove it from your environment. The `SingleSessionHTTPServer` supports both JSON-RPC and SSE streaming automatically.
|
||||
|
||||
**Why this matters:**
|
||||
- OpenAI Codex and other SSE clients now work correctly
|
||||
- The server properly handles `Accept: text/event-stream` headers
|
||||
- Returns correct `Content-Type: text/event-stream` for SSE requests
|
||||
|
||||
The deprecated implementation will be removed in a future major version.
|
||||
|
||||
## [2.31.7] - 2026-01-06
|
||||
|
||||
### Changed
|
||||
|
||||
- Updated n8n from 2.1.5 to 2.2.3
|
||||
- Updated n8n-core from 2.1.4 to 2.2.2
|
||||
- Updated n8n-workflow from 2.1.1 to 2.2.2
|
||||
- Updated @n8n/n8n-nodes-langchain from 2.1.4 to 2.2.2
|
||||
- Rebuilt node database with 540 nodes (434 from n8n-nodes-base, 106 from @n8n/n8n-nodes-langchain)
|
||||
|
||||
## [2.31.6] - 2026-01-03
|
||||
|
||||
### Changed
|
||||
|
||||
**Dependencies Update**
|
||||
|
||||
- Updated n8n from 2.1.4 to 2.1.5
|
||||
- Updated n8n-core from 2.1.3 to 2.1.4
|
||||
- Updated @n8n/n8n-nodes-langchain from 2.1.3 to 2.1.4
|
||||
- Rebuilt node database with 540 nodes (434 from n8n-nodes-base, 106 from @n8n/n8n-nodes-langchain)
|
||||
|
||||
## [2.31.5] - 2026-01-02
|
||||
|
||||
### Added
|
||||
|
||||
**MCP Tool Annotations (PR #512)**
|
||||
|
||||
Added MCP tool annotations to all 20 tools following the [MCP specification](https://spec.modelcontextprotocol.io/specification/2025-03-26/server/tools/#annotations). These annotations help AI assistants understand tool behavior and capabilities.
|
||||
|
||||
**Annotations added:**
|
||||
- `title`: Human-readable name for each tool
|
||||
- `readOnlyHint`: True for tools that don't modify state (11 tools)
|
||||
- `destructiveHint`: True for delete operations (3 tools)
|
||||
- `idempotentHint`: True for operations that produce same result when called repeatedly (14 tools)
|
||||
- `openWorldHint`: True for tools accessing external n8n API (13 tools)
|
||||
|
||||
**Documentation tools** (7): All marked `readOnlyHint=true`, `idempotentHint=true`
|
||||
- `tools_documentation`, `search_nodes`, `get_node`, `validate_node`, `get_template`, `search_templates`, `validate_workflow`
|
||||
|
||||
**Management tools** (13): All marked `openWorldHint=true`
|
||||
- Read-only: `n8n_get_workflow`, `n8n_list_workflows`, `n8n_validate_workflow`, `n8n_health_check`
|
||||
- Idempotent updates: `n8n_update_full_workflow`, `n8n_update_partial_workflow`, `n8n_autofix_workflow`
|
||||
- Destructive: `n8n_delete_workflow`, `n8n_executions` (delete action), `n8n_workflow_versions` (delete/truncate)
|
||||
|
||||
## [2.31.4] - 2026-01-02
|
||||
|
||||
### Fixed
|
||||
|
||||
**Workflow Data Mangled During Serialization: snake_case Conversion (Issue #517)**
|
||||
|
||||
Fixed a critical bug where workflow mutation data was corrupted during serialization to Supabase, making 98.9% of collected workflow data invalid for n8n API operations.
|
||||
237
CLAUDE.md
237
CLAUDE.md
@@ -6,15 +6,232 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
||||
|
||||
n8n-mcp is a comprehensive documentation and knowledge server that provides AI assistants with complete access to n8n node information through the Model Context Protocol (MCP). It serves as a bridge between n8n's workflow automation platform and AI models, enabling them to understand and work with n8n nodes effectively.
|
||||
|
||||
## ✅ Latest Updates (v2.7.6)
|
||||
### Current Architecture:
|
||||
```
|
||||
src/
|
||||
├── loaders/
|
||||
│ └── node-loader.ts # NPM package loader for both packages
|
||||
├── parsers/
|
||||
│ ├── node-parser.ts # Enhanced parser with version support
|
||||
│ └── property-extractor.ts # Dedicated property/operation extraction
|
||||
├── mappers/
|
||||
│ └── docs-mapper.ts # Documentation mapping with fixes
|
||||
├── database/
|
||||
│ ├── schema.sql # SQLite schema
|
||||
│ ├── node-repository.ts # Data access layer
|
||||
│ └── database-adapter.ts # Universal database adapter (NEW in v2.3)
|
||||
├── services/
|
||||
│ ├── property-filter.ts # Filters properties to essentials (NEW in v2.4)
|
||||
│ ├── example-generator.ts # Generates working examples (NEW in v2.4)
|
||||
│ ├── task-templates.ts # Pre-configured node settings (NEW in v2.4)
|
||||
│ ├── config-validator.ts # Configuration validation (NEW in v2.4)
|
||||
│ ├── enhanced-config-validator.ts # Operation-aware validation (NEW in v2.4.2)
|
||||
│ ├── node-specific-validators.ts # Node-specific validation logic (NEW in v2.4.2)
|
||||
│ ├── property-dependencies.ts # Dependency analysis (NEW in v2.4)
|
||||
│ ├── type-structure-service.ts # Type structure validation (NEW in v2.22.21)
|
||||
│ ├── expression-validator.ts # n8n expression syntax validation (NEW in v2.5.0)
|
||||
│ └── workflow-validator.ts # Complete workflow validation (NEW in v2.5.0)
|
||||
├── types/
|
||||
│ ├── type-structures.ts # Type structure definitions (NEW in v2.22.21)
|
||||
│ ├── instance-context.ts # Multi-tenant instance configuration
|
||||
│ └── session-state.ts # Session persistence types (NEW in v2.24.1)
|
||||
├── constants/
|
||||
│ └── type-structures.ts # 22 complete type structures (NEW in v2.22.21)
|
||||
├── templates/
|
||||
│ ├── template-fetcher.ts # Fetches templates from n8n.io API (NEW in v2.4.1)
|
||||
│ ├── template-repository.ts # Template database operations (NEW in v2.4.1)
|
||||
│ └── template-service.ts # Template business logic (NEW in v2.4.1)
|
||||
├── scripts/
|
||||
│ ├── rebuild.ts # Database rebuild with validation
|
||||
│ ├── validate.ts # Node validation
|
||||
│ ├── test-nodes.ts # Critical node tests
|
||||
│ ├── test-essentials.ts # Test new essentials tools (NEW in v2.4)
|
||||
│ ├── test-enhanced-validation.ts # Test enhanced validation (NEW in v2.4.2)
|
||||
│ ├── test-structure-validation.ts # Test type structure validation (NEW in v2.22.21)
|
||||
│ ├── test-workflow-validation.ts # Test workflow validation (NEW in v2.5.0)
|
||||
│ ├── test-ai-workflow-validation.ts # Test AI workflow validation (NEW in v2.5.1)
|
||||
│ ├── test-mcp-tools.ts # Test MCP tool enhancements (NEW in v2.5.1)
|
||||
│ ├── test-n8n-validate-workflow.ts # Test n8n_validate_workflow tool (NEW in v2.6.3)
|
||||
│ ├── test-typeversion-validation.ts # Test typeVersion validation (NEW in v2.6.1)
|
||||
│ ├── test-workflow-diff.ts # Test workflow diff engine (NEW in v2.7.0)
|
||||
│ ├── test-tools-documentation.ts # Test tools documentation (NEW in v2.7.3)
|
||||
│ ├── fetch-templates.ts # Fetch workflow templates from n8n.io (NEW in v2.4.1)
|
||||
│ └── test-templates.ts # Test template functionality (NEW in v2.4.1)
|
||||
├── mcp/
|
||||
│ ├── server.ts # MCP server with enhanced tools
|
||||
│ ├── tools.ts # Tool definitions including new essentials
|
||||
│ ├── tools-documentation.ts # Tool documentation system (NEW in v2.7.3)
|
||||
│ └── index.ts # Main entry point with mode selection
|
||||
├── utils/
|
||||
│ ├── console-manager.ts # Console output isolation (NEW in v2.3.1)
|
||||
│ └── logger.ts # Logging utility with HTTP awareness
|
||||
├── http-server-single-session.ts # Single-session HTTP server (NEW in v2.3.1)
|
||||
│ # Session persistence API (NEW in v2.24.1)
|
||||
├── mcp-engine.ts # Clean API for service integration (NEW in v2.3.1)
|
||||
│ # Session persistence wrappers (NEW in v2.24.1)
|
||||
└── index.ts # Library exports
|
||||
```
|
||||
|
||||
### Update (v2.7.8) - npm Publishing Process:
|
||||
- ✅ **NEW: npm Publishing Steps**
|
||||
- To publish v2.7.8 to npm:
|
||||
```
|
||||
npm run prepare:publish
|
||||
cd npm-publish-temp
|
||||
npm publish --otp=YOUR_OTP_CODE
|
||||
```
|
||||
## Common Development Commands
|
||||
|
||||
[Rest of the existing file content remains the same]
|
||||
```bash
|
||||
# Build and Setup
|
||||
npm run build # Build TypeScript (always run after changes)
|
||||
npm run rebuild # Rebuild node database from n8n packages
|
||||
npm run validate # Validate all node data in database
|
||||
|
||||
# Testing
|
||||
npm test # Run all tests
|
||||
npm run test:unit # Run unit tests only
|
||||
npm run test:integration # Run integration tests
|
||||
npm run test:coverage # Run tests with coverage report
|
||||
npm run test:watch # Run tests in watch mode
|
||||
npm run test:structure-validation # Test type structure validation (Phase 3)
|
||||
|
||||
# Run a single test file
|
||||
npm test -- tests/unit/services/property-filter.test.ts
|
||||
|
||||
# Linting and Type Checking
|
||||
npm run lint # Check TypeScript types (alias for typecheck)
|
||||
npm run typecheck # Check TypeScript types
|
||||
|
||||
# Running the Server
|
||||
npm start # Start MCP server in stdio mode
|
||||
npm run start:http # Start MCP server in HTTP mode
|
||||
npm run dev # Build, rebuild database, and validate
|
||||
npm run dev:http # Run HTTP server with auto-reload
|
||||
|
||||
# Update n8n Dependencies
|
||||
npm run update:n8n:check # Check for n8n updates (dry run)
|
||||
npm run update:n8n # Update n8n packages to latest
|
||||
|
||||
# Database Management
|
||||
npm run db:rebuild # Rebuild database from scratch
|
||||
npm run migrate:fts5 # Migrate to FTS5 search (if needed)
|
||||
|
||||
# Template Management
|
||||
npm run fetch:templates # Fetch latest workflow templates from n8n.io
|
||||
npm run test:templates # Test template functionality
|
||||
```
|
||||
|
||||
## High-Level Architecture
|
||||
|
||||
### Core Components
|
||||
|
||||
1. **MCP Server** (`mcp/server.ts`)
|
||||
- Implements Model Context Protocol for AI assistants
|
||||
- Provides tools for searching, validating, and managing n8n nodes
|
||||
- Supports both stdio (Claude Desktop) and HTTP modes
|
||||
|
||||
2. **Database Layer** (`database/`)
|
||||
- SQLite database storing all n8n node information
|
||||
- Universal adapter pattern supporting both better-sqlite3 and sql.js
|
||||
- Full-text search capabilities with FTS5
|
||||
|
||||
3. **Node Processing Pipeline**
|
||||
- **Loader** (`loaders/node-loader.ts`): Loads nodes from n8n packages
|
||||
- **Parser** (`parsers/node-parser.ts`): Extracts node metadata and structure
|
||||
- **Property Extractor** (`parsers/property-extractor.ts`): Deep property analysis
|
||||
- **Docs Mapper** (`mappers/docs-mapper.ts`): Maps external documentation
|
||||
|
||||
4. **Service Layer** (`services/`)
|
||||
- **Property Filter**: Reduces node properties to AI-friendly essentials
|
||||
- **Config Validator**: Multi-profile validation system
|
||||
- **Type Structure Service**: Validates complex type structures (filter, resourceMapper, etc.)
|
||||
- **Expression Validator**: Validates n8n expression syntax
|
||||
- **Workflow Validator**: Complete workflow structure validation
|
||||
|
||||
5. **Template System** (`templates/`)
|
||||
- Fetches and stores workflow templates from n8n.io
|
||||
- Provides pre-built workflow examples
|
||||
- Supports template search and validation
|
||||
|
||||
### Key Design Patterns
|
||||
|
||||
1. **Repository Pattern**: All database operations go through repository classes
|
||||
2. **Service Layer**: Business logic separated from data access
|
||||
3. **Validation Profiles**: Different validation strictness levels (minimal, runtime, ai-friendly, strict)
|
||||
4. **Diff-Based Updates**: Efficient workflow updates using operation diffs
|
||||
|
||||
### MCP Tools Architecture
|
||||
|
||||
The MCP server exposes tools in several categories:
|
||||
|
||||
1. **Discovery Tools**: Finding and exploring nodes
|
||||
2. **Configuration Tools**: Getting node details and examples
|
||||
3. **Validation Tools**: Validating configurations before deployment
|
||||
4. **Workflow Tools**: Complete workflow validation
|
||||
5. **Management Tools**: Creating and updating workflows (requires API config)
|
||||
|
||||
## Memories and Notes for Development
|
||||
|
||||
### Development Workflow Reminders
|
||||
- When you make changes to MCP server, you need to ask the user to reload it before you test
|
||||
- When the user asks to review issues, you should use GH CLI to get the issue and all the comments
|
||||
- When the task can be divided into separated subtasks, you should spawn separate sub-agents to handle them in parallel
|
||||
- Use the best sub-agent for the task as per their descriptions
|
||||
|
||||
### Testing Best Practices
|
||||
- Always run `npm run build` before testing changes
|
||||
- Use `npm run dev` to rebuild database after package updates
|
||||
- Check coverage with `npm run test:coverage`
|
||||
- Integration tests require a clean database state
|
||||
|
||||
### Common Pitfalls
|
||||
- The MCP server needs to be reloaded in Claude Desktop after changes
|
||||
- HTTP mode requires proper CORS and auth token configuration
|
||||
- Database rebuilds can take 2-3 minutes due to n8n package size
|
||||
- Always validate workflows before deployment to n8n
|
||||
|
||||
### Performance Considerations
|
||||
- Use `get_node_essentials()` instead of `get_node_info()` for faster responses
|
||||
- Batch validation operations when possible
|
||||
- The diff-based update system saves 80-90% tokens on workflow updates
|
||||
|
||||
### Agent Interaction Guidelines
|
||||
- Sub-agents are not allowed to spawn further sub-agents
|
||||
- When you use sub-agents, do not allow them to commit and push. That should be done by you
|
||||
|
||||
### Development Best Practices
|
||||
- Run typecheck and lint after every code change
|
||||
|
||||
### Session Persistence Feature (v2.24.1)
|
||||
|
||||
**Location:**
|
||||
- Types: `src/types/session-state.ts`
|
||||
- Implementation: `src/http-server-single-session.ts` (lines 698-702, 1444-1584)
|
||||
- Wrapper: `src/mcp-engine.ts` (lines 123-169)
|
||||
- Tests: `tests/unit/http-server/session-persistence.test.ts`, `tests/unit/mcp-engine/session-persistence.test.ts`
|
||||
|
||||
**Key Features:**
|
||||
- **Export/Restore API**: `exportSessionState()` and `restoreSessionState()` methods
|
||||
- **Multi-tenant support**: Enables zero-downtime deployments for SaaS platforms
|
||||
- **Security-first**: API keys exported as plaintext - downstream MUST encrypt
|
||||
- **Dormant sessions**: Restored sessions recreate transports on first request
|
||||
- **Automatic expiration**: Respects `sessionTimeout` setting (default 30 min)
|
||||
- **MAX_SESSIONS limit**: Caps at 100 concurrent sessions (configurable via N8N_MCP_MAX_SESSIONS env var)
|
||||
|
||||
**Important Implementation Notes:**
|
||||
- Only exports sessions with valid n8nApiUrl and n8nApiKey in context
|
||||
- Skips expired sessions during both export and restore
|
||||
- Uses `validateInstanceContext()` for data integrity checks
|
||||
- Handles null/invalid session gracefully with warnings
|
||||
- Session metadata (timestamps) and context (credentials) are persisted
|
||||
- Transport and server objects are NOT persisted (recreated on-demand)
|
||||
|
||||
**Testing:**
|
||||
- 22 unit tests covering export, restore, edge cases, and round-trip cycles
|
||||
- Tests use current timestamps to avoid expiration issues
|
||||
- Integration with multi-tenant backends documented in README.md
|
||||
|
||||
# important-instruction-reminders
|
||||
Do what has been asked; nothing more, nothing less.
|
||||
NEVER create files unless they're absolutely necessary for achieving your goal.
|
||||
ALWAYS prefer editing an existing file to creating a new one.
|
||||
NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly requested by the User.
|
||||
- When you make changes to MCP server, you need to ask the user to reload it before you test
|
||||
- When the user asks to review issues, you should use GH CLI to get the issue and all the comments
|
||||
- When the task can be divided into separated subtasks, you should spawn separate sub-agents to handle them in paralel
|
||||
- Use the best sub-agent for the task as per their descriptions
|
||||
- Do not use hyperbolic or dramatic language in comments and documentation
|
||||
- Add to every commit and PR: Concieved by Romuald Członkowski - and then link to www.aiadvisors.pl/en. Don't add it in conversations
|
||||
55
Dockerfile
55
Dockerfile
@@ -2,39 +2,45 @@
|
||||
# Ultra-optimized Dockerfile - minimal runtime dependencies (no n8n packages)
|
||||
|
||||
# Stage 1: Builder (TypeScript compilation only)
|
||||
FROM node:20-alpine AS builder
|
||||
FROM node:22-alpine AS builder
|
||||
WORKDIR /app
|
||||
|
||||
# Copy tsconfig for TypeScript compilation
|
||||
COPY tsconfig.json ./
|
||||
# Copy tsconfig files for TypeScript compilation
|
||||
COPY tsconfig*.json ./
|
||||
|
||||
# Create minimal package.json and install ONLY build dependencies
|
||||
# Note: openai and zod are needed for TypeScript compilation of template metadata modules
|
||||
RUN --mount=type=cache,target=/root/.npm \
|
||||
echo '{}' > package.json && \
|
||||
npm install --no-save typescript@^5.8.3 @types/node@^22.15.30 @types/express@^5.0.3 \
|
||||
@modelcontextprotocol/sdk@^1.12.1 dotenv@^16.5.0 express@^5.1.0 axios@^1.10.0 \
|
||||
n8n-workflow@^1.96.0 uuid@^11.0.5 @types/uuid@^10.0.0
|
||||
@modelcontextprotocol/sdk@1.20.1 dotenv@^16.5.0 express@^5.1.0 axios@^1.10.0 \
|
||||
n8n-workflow@^1.96.0 uuid@^11.0.5 @types/uuid@^10.0.0 \
|
||||
openai@^4.77.0 zod@3.24.1 lru-cache@^11.2.1 @supabase/supabase-js@^2.57.4
|
||||
|
||||
# Copy source and build
|
||||
COPY src ./src
|
||||
# Note: src/n8n contains TypeScript types needed for compilation
|
||||
# These will be compiled but not included in runtime
|
||||
RUN npx tsc
|
||||
RUN npx tsc -p tsconfig.build.json
|
||||
|
||||
# Stage 2: Runtime (minimal dependencies)
|
||||
FROM node:20-alpine AS runtime
|
||||
FROM node:22-alpine AS runtime
|
||||
WORKDIR /app
|
||||
|
||||
# Install only essential runtime tools
|
||||
RUN apk add --no-cache curl && \
|
||||
RUN apk add --no-cache curl su-exec && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
# Copy runtime-only package.json
|
||||
COPY package.runtime.json package.json
|
||||
|
||||
# Install runtime dependencies with cache mount
|
||||
# Install runtime dependencies with better-sqlite3 compilation
|
||||
# Build tools (python3, make, g++) are installed, used for compilation, then removed
|
||||
# This enables native SQLite (better-sqlite3) instead of sql.js, preventing memory leaks
|
||||
RUN --mount=type=cache,target=/root/.npm \
|
||||
npm install --production --no-audit --no-fund
|
||||
apk add --no-cache python3 make g++ && \
|
||||
npm install --production --no-audit --no-fund && \
|
||||
apk del python3 make g++
|
||||
|
||||
# Copy built application
|
||||
COPY --from=builder /app/dist ./dist
|
||||
@@ -45,9 +51,11 @@ COPY data/nodes.db ./data/
|
||||
COPY src/database/schema-optimized.sql ./src/database/
|
||||
COPY .env.example ./
|
||||
|
||||
# Copy entrypoint script
|
||||
# Copy entrypoint script, config parser, and n8n-mcp command
|
||||
COPY docker/docker-entrypoint.sh /usr/local/bin/
|
||||
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
||||
COPY docker/parse-config.js /app/docker/
|
||||
COPY docker/n8n-mcp /usr/local/bin/
|
||||
RUN chmod +x /usr/local/bin/docker-entrypoint.sh /usr/local/bin/n8n-mcp
|
||||
|
||||
# Add container labels
|
||||
LABEL org.opencontainers.image.source="https://github.com/czlonkowski/n8n-mcp"
|
||||
@@ -55,9 +63,13 @@ LABEL org.opencontainers.image.description="n8n MCP Server - Runtime Only"
|
||||
LABEL org.opencontainers.image.licenses="MIT"
|
||||
LABEL org.opencontainers.image.title="n8n-mcp"
|
||||
|
||||
# Create non-root user
|
||||
RUN addgroup -g 1001 -S nodejs && \
|
||||
adduser -S nodejs -u 1001 && \
|
||||
# Create non-root user with unpredictable UID/GID
|
||||
# Using a hash of the build time to generate unpredictable IDs
|
||||
RUN BUILD_HASH=$(date +%s | sha256sum | head -c 8) && \
|
||||
UID=$((10000 + 0x${BUILD_HASH} % 50000)) && \
|
||||
GID=$((10000 + 0x${BUILD_HASH} % 50000)) && \
|
||||
addgroup -g ${GID} -S nodejs && \
|
||||
adduser -S nodejs -u ${UID} -G nodejs && \
|
||||
chown -R nodejs:nodejs /app
|
||||
|
||||
# Switch to non-root user
|
||||
@@ -66,13 +78,20 @@ USER nodejs
|
||||
# Set Docker environment flag
|
||||
ENV IS_DOCKER=true
|
||||
|
||||
# Expose HTTP port
|
||||
# Telemetry: Anonymous usage statistics are ENABLED by default
|
||||
# To opt-out, uncomment the following line:
|
||||
# ENV N8N_MCP_TELEMETRY_DISABLED=true
|
||||
|
||||
# Expose HTTP port (default 3000, configurable via PORT environment variable at runtime)
|
||||
EXPOSE 3000
|
||||
|
||||
# Set stop signal to SIGTERM (default, but explicit is better)
|
||||
STOPSIGNAL SIGTERM
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://127.0.0.1:3000/health || exit 1
|
||||
CMD sh -c 'curl -f http://127.0.0.1:${PORT:-3000}/health || exit 1'
|
||||
|
||||
# Optimized entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
|
||||
CMD ["node", "dist/mcp/index.js"]
|
||||
CMD ["node", "dist/mcp/index.js"]
|
||||
|
||||
93
Dockerfile.railway
Normal file
93
Dockerfile.railway
Normal file
@@ -0,0 +1,93 @@
|
||||
# syntax=docker/dockerfile:1.7
|
||||
# Railway-compatible Dockerfile for n8n-mcp
|
||||
|
||||
# --- Stage 1: Builder ---
|
||||
FROM node:22-alpine AS builder
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies for native modules
|
||||
RUN apk add --no-cache python3 make g++ && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
# Copy package files and tsconfig files
|
||||
COPY package*.json tsconfig*.json ./
|
||||
|
||||
# Install all dependencies (including devDependencies for build)
|
||||
RUN npm ci --no-audit --no-fund
|
||||
|
||||
# Copy source code
|
||||
COPY src ./src
|
||||
|
||||
# Build the application
|
||||
RUN npm run build
|
||||
|
||||
# --- Stage 2: Runtime ---
|
||||
FROM node:22-alpine AS runtime
|
||||
WORKDIR /app
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache curl && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
# Copy runtime-only package.json
|
||||
COPY package.runtime.json package.json
|
||||
|
||||
# Install production dependencies with temporary build tools
|
||||
# Build tools (python3, make, g++) enable better-sqlite3 compilation (native SQLite)
|
||||
# They are removed after installation to reduce image size and attack surface
|
||||
RUN apk add --no-cache python3 make g++ && \
|
||||
npm install --production --no-audit --no-fund && \
|
||||
npm cache clean --force && \
|
||||
apk del python3 make g++
|
||||
|
||||
# Copy built application from builder stage
|
||||
COPY --from=builder /app/dist ./dist
|
||||
|
||||
# Copy necessary data and configuration files
|
||||
COPY data/ ./data/
|
||||
COPY src/database/schema-optimized.sql ./src/database/schema-optimized.sql
|
||||
COPY .env.example ./
|
||||
|
||||
# Copy entrypoint script
|
||||
COPY docker/docker-entrypoint.sh /usr/local/bin/
|
||||
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
||||
|
||||
# Create data directory if it doesn't exist and set permissions
|
||||
RUN mkdir -p ./data && \
|
||||
chmod 755 ./data
|
||||
|
||||
# Add metadata labels
|
||||
LABEL org.opencontainers.image.source="https://github.com/czlonkowski/n8n-mcp"
|
||||
LABEL org.opencontainers.image.description="n8n MCP Server - Integration between n8n workflow automation and Model Context Protocol"
|
||||
LABEL org.opencontainers.image.licenses="MIT"
|
||||
LABEL org.opencontainers.image.title="n8n-mcp"
|
||||
LABEL org.opencontainers.image.version="2.7.13"
|
||||
|
||||
# Create non-root user for security
|
||||
RUN addgroup -g 1001 -S nodejs && \
|
||||
adduser -S nodejs -u 1001 && \
|
||||
chown -R nodejs:nodejs /app
|
||||
USER nodejs
|
||||
|
||||
# Set Railway-optimized environment variables
|
||||
ENV AUTH_TOKEN="REPLACE_THIS_AUTH_TOKEN_32_CHARS_MIN_abcdefgh"
|
||||
ENV NODE_ENV=production
|
||||
ENV IS_DOCKER=true
|
||||
ENV MCP_MODE=http
|
||||
# NOTE: USE_FIXED_HTTP is deprecated. SingleSessionHTTPServer is now the default.
|
||||
# See: https://github.com/czlonkowski/n8n-mcp/issues/524
|
||||
ENV LOG_LEVEL=info
|
||||
ENV TRUST_PROXY=1
|
||||
ENV HOST=0.0.0.0
|
||||
ENV CORS_ORIGIN="*"
|
||||
|
||||
# Expose port (Railway will set PORT automatically)
|
||||
EXPOSE 3000
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://127.0.0.1:${PORT:-3000}/health || exit 1
|
||||
|
||||
# Optimized entrypoint (identical to main Dockerfile)
|
||||
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
|
||||
CMD ["node", "dist/mcp/index.js", "--http"]
|
||||
@@ -1,5 +1,5 @@
|
||||
# Quick test Dockerfile using pre-built files
|
||||
FROM node:20-alpine
|
||||
FROM node:22-alpine
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
||||
@@ -1,17 +1,131 @@
|
||||
# n8n Update Process - Quick Reference
|
||||
|
||||
## Quick Steps to Update n8n
|
||||
## ⚡ Recommended Fast Workflow (2025-11-04)
|
||||
|
||||
When there's a new n8n version available, follow these steps:
|
||||
**CRITICAL FIRST STEP**: Check existing releases to avoid version conflicts!
|
||||
|
||||
```bash
|
||||
# 1. CHECK EXISTING RELEASES FIRST (prevents version conflicts!)
|
||||
gh release list | head -5
|
||||
# Look at the latest version - your new version must be higher!
|
||||
|
||||
# 2. Switch to main and pull
|
||||
git checkout main && git pull
|
||||
|
||||
# 3. Check for updates (dry run)
|
||||
npm run update:n8n:check
|
||||
|
||||
# 4. Run update and skip tests (we'll test in CI)
|
||||
yes y | npm run update:n8n
|
||||
|
||||
# 5. Create feature branch
|
||||
git checkout -b update/n8n-X.X.X
|
||||
|
||||
# 6. Update version in package.json (must be HIGHER than latest release!)
|
||||
# Edit: "version": "2.XX.X" (not the version from the release list!)
|
||||
|
||||
# 7. Update CHANGELOG.md
|
||||
# - Change version number to match package.json
|
||||
# - Update date to today
|
||||
# - Update dependency versions
|
||||
|
||||
# 8. Update README badge
|
||||
# Edit line 8: Change n8n version badge to new n8n version
|
||||
|
||||
# 9. Commit and push
|
||||
git add -A
|
||||
git commit -m "chore: update n8n to X.X.X and bump version to 2.XX.X
|
||||
|
||||
- Updated n8n from X.X.X to X.X.X
|
||||
- Updated n8n-core from X.X.X to X.X.X
|
||||
- Updated n8n-workflow from X.X.X to X.X.X
|
||||
- Updated @n8n/n8n-nodes-langchain from X.X.X to X.X.X
|
||||
- Rebuilt node database with XXX nodes (XXX from n8n-nodes-base, XXX from @n8n/n8n-nodes-langchain)
|
||||
- Updated README badge with new n8n version
|
||||
- Updated CHANGELOG with dependency changes
|
||||
|
||||
Conceived by Romuald Członkowski - https://www.aiadvisors.pl/en
|
||||
|
||||
🤖 Generated with [Claude Code](https://claude.com/claude-code)
|
||||
|
||||
Co-Authored-By: Claude <noreply@anthropic.com>"
|
||||
|
||||
git push -u origin update/n8n-X.X.X
|
||||
|
||||
# 10. Create PR
|
||||
gh pr create --title "chore: update n8n to X.X.X" --body "Updates n8n and all related dependencies to the latest versions..."
|
||||
|
||||
# 11. After PR is merged, verify release triggered
|
||||
gh release list | head -1
|
||||
# If the new version appears, you're done!
|
||||
# If not, the version might have already been released - bump version again and create new PR
|
||||
```
|
||||
|
||||
### Why This Workflow?
|
||||
|
||||
✅ **Fast**: Skip local tests (2-3 min saved) - CI runs them anyway
|
||||
✅ **Safe**: Unit tests in CI verify compatibility
|
||||
✅ **Clean**: All changes in one PR with proper tracking
|
||||
✅ **Automatic**: Release workflow triggers on merge if version is new
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Problem**: Release workflow doesn't trigger after merge
|
||||
**Cause**: Version number was already released (check `gh release list`)
|
||||
**Solution**: Create new PR bumping version by one patch number
|
||||
|
||||
**Problem**: Integration tests fail in CI with "unauthorized"
|
||||
**Cause**: n8n test instance credentials expired (infrastructure issue)
|
||||
**Solution**: Ignore if unit tests pass - this is not a code problem
|
||||
|
||||
**Problem**: CI takes 8+ minutes
|
||||
**Reason**: Integration tests need live n8n instance (slow)
|
||||
**Normal**: Unit tests (~2 min) + integration tests (~6 min) = ~8 min total
|
||||
|
||||
## Quick One-Command Update
|
||||
|
||||
For a complete update with tests and publish preparation:
|
||||
|
||||
```bash
|
||||
npm run update:all
|
||||
```
|
||||
|
||||
This single command will:
|
||||
1. ✅ Check for n8n updates and ask for confirmation
|
||||
2. ✅ Update all n8n dependencies to latest compatible versions
|
||||
3. ✅ Run all 1,182 tests (933 unit + 249 integration)
|
||||
4. ✅ Validate critical nodes
|
||||
5. ✅ Build the project
|
||||
6. ✅ Bump the version
|
||||
7. ✅ Update README badges
|
||||
8. ✅ Prepare everything for npm publish
|
||||
9. ✅ Create a comprehensive commit
|
||||
|
||||
## Manual Steps (if needed)
|
||||
|
||||
### Quick Steps to Update n8n
|
||||
|
||||
```bash
|
||||
# 1. Update n8n dependencies automatically
|
||||
npm run update:n8n
|
||||
|
||||
# 2. Validate the update
|
||||
# 2. Run tests
|
||||
npm test
|
||||
|
||||
# 3. Validate the update
|
||||
npm run validate
|
||||
|
||||
# 3. Commit and push
|
||||
# 4. Build
|
||||
npm run build
|
||||
|
||||
# 5. Bump version
|
||||
npm version patch
|
||||
|
||||
# 6. Update README badges manually
|
||||
# - Update version badge
|
||||
# - Update n8n version badge
|
||||
|
||||
# 7. Commit and push
|
||||
git add -A
|
||||
git commit -m "chore: update n8n to vX.X.X
|
||||
|
||||
@@ -21,6 +135,7 @@ git commit -m "chore: update n8n to vX.X.X
|
||||
- Updated @n8n/n8n-nodes-langchain from X.X.X to X.X.X
|
||||
- Rebuilt node database with XXX nodes
|
||||
- Sanitized XXX workflow templates (if present)
|
||||
- All 1,182 tests passing (933 unit, 249 integration)
|
||||
- All validation tests passing
|
||||
|
||||
🤖 Generated with [Claude Code](https://claude.ai/code)
|
||||
@@ -31,8 +146,21 @@ git push origin main
|
||||
|
||||
## What the Commands Do
|
||||
|
||||
### `npm run update:all`
|
||||
This comprehensive command:
|
||||
1. Checks current branch and git status
|
||||
2. Shows current versions and checks for updates
|
||||
3. Updates all n8n dependencies to compatible versions
|
||||
4. **Runs the complete test suite** (NEW!)
|
||||
5. Validates critical nodes
|
||||
6. Builds the project
|
||||
7. Bumps the patch version
|
||||
8. Updates version badges in README
|
||||
9. Creates a detailed commit with all changes
|
||||
10. Provides next steps for GitHub release and npm publish
|
||||
|
||||
### `npm run update:n8n`
|
||||
This single command:
|
||||
This command:
|
||||
1. Checks for the latest n8n version
|
||||
2. Updates n8n and all its required dependencies (n8n-core, n8n-workflow, @n8n/n8n-nodes-langchain)
|
||||
3. Runs `npm install` to update package-lock.json
|
||||
@@ -45,13 +173,22 @@ This single command:
|
||||
- Shows database statistics
|
||||
- Confirms everything is working correctly
|
||||
|
||||
### `npm test`
|
||||
- Runs all 1,182 tests
|
||||
- Unit tests: 933 tests across 30 files
|
||||
- Integration tests: 249 tests across 14 files
|
||||
- Must pass before publishing!
|
||||
|
||||
## Important Notes
|
||||
|
||||
1. **Always run on main branch** - Make sure you're on main and it's clean
|
||||
2. **The update script is smart** - It automatically syncs all n8n dependencies to compatible versions
|
||||
3. **Database rebuild is automatic** - The update script handles this for you
|
||||
4. **Template sanitization is automatic** - Any API tokens in workflow templates are replaced with placeholders
|
||||
5. **Docker image builds automatically** - Pushing to GitHub triggers the workflow
|
||||
1. **ALWAYS check existing releases first** - Use `gh release list` to see what versions are already released. Your new version must be higher!
|
||||
2. **Release workflow only triggers on version CHANGE** - If you merge a PR with an already-released version (e.g., 2.22.8), the workflow won't run. You'll need to bump to a new version (e.g., 2.22.9) and create another PR.
|
||||
3. **Integration test failures in CI are usually infrastructure issues** - If unit tests pass but integration tests fail with "unauthorized", this is typically because the test n8n instance credentials need updating. The code itself is fine.
|
||||
4. **Skip local tests - let CI handle them** - Running tests locally adds 2-3 minutes with no benefit since CI runs them anyway. The fast workflow skips local tests.
|
||||
5. **The update script is smart** - It automatically syncs all n8n dependencies to compatible versions
|
||||
6. **Database rebuild is automatic** - The update script handles this for you
|
||||
7. **Template sanitization is automatic** - Any API tokens in workflow templates are replaced with placeholders
|
||||
8. **Docker image builds automatically** - Pushing to GitHub triggers the workflow
|
||||
|
||||
## GitHub Push Protection
|
||||
|
||||
@@ -62,12 +199,34 @@ As of July 2025, GitHub's push protection may block database pushes if they cont
|
||||
3. If push is still blocked, use the GitHub web interface to review and allow the push
|
||||
|
||||
## Time Estimate
|
||||
- Total time: ~3-5 minutes
|
||||
- Most time is spent on `npm install` and database rebuild
|
||||
- The actual commands take seconds to run
|
||||
|
||||
### Fast Workflow (Recommended)
|
||||
- Local work: ~2-3 minutes
|
||||
- npm install and database rebuild: ~2-3 minutes
|
||||
- File edits (CHANGELOG, README, package.json): ~30 seconds
|
||||
- Git operations (commit, push, create PR): ~30 seconds
|
||||
- CI testing after PR creation: ~8-10 minutes (runs automatically)
|
||||
- Unit tests: ~2 minutes
|
||||
- Integration tests: ~6 minutes (may fail with infrastructure issues - ignore if unit tests pass)
|
||||
- Other checks: ~1 minute
|
||||
|
||||
**Total hands-on time: ~3 minutes** (then wait for CI)
|
||||
|
||||
### Full Workflow with Local Tests
|
||||
- Total time: ~5-7 minutes
|
||||
- Test suite: ~2.5 minutes
|
||||
- npm install and database rebuild: ~2-3 minutes
|
||||
- The rest: seconds
|
||||
|
||||
**Note**: The fast workflow is recommended since CI runs the same tests anyway.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If tests fail:
|
||||
1. Check the test output for specific failures
|
||||
2. Run `npm run test:unit` or `npm run test:integration` separately
|
||||
3. Fix any issues before proceeding with the update
|
||||
|
||||
If validation fails:
|
||||
1. Check the error message - usually it's a node type reference issue
|
||||
2. The update script handles most compatibility issues automatically
|
||||
@@ -79,4 +238,23 @@ To see what would be updated without making changes:
|
||||
npm run update:n8n:check
|
||||
```
|
||||
|
||||
This shows you the available updates without modifying anything.
|
||||
This shows you the available updates without modifying anything.
|
||||
|
||||
## Publishing to npm
|
||||
|
||||
After updating:
|
||||
```bash
|
||||
# Prepare for publish (runs tests automatically)
|
||||
npm run prepare:publish
|
||||
|
||||
# Follow the instructions to publish with OTP
|
||||
cd npm-publish-temp
|
||||
npm publish --otp=YOUR_OTP_CODE
|
||||
```
|
||||
|
||||
## Creating a GitHub Release
|
||||
|
||||
After pushing:
|
||||
```bash
|
||||
gh release create vX.X.X --title "vX.X.X" --notes "Updated n8n to vX.X.X"
|
||||
```
|
||||
336
MEMORY_TEMPLATE_UPDATE.md
Normal file
336
MEMORY_TEMPLATE_UPDATE.md
Normal file
@@ -0,0 +1,336 @@
|
||||
# Template Update Process - Quick Reference
|
||||
|
||||
## Overview
|
||||
|
||||
The n8n-mcp project maintains a database of workflow templates from n8n.io. This guide explains how to update the template database incrementally without rebuilding from scratch.
|
||||
|
||||
## Current Database State
|
||||
|
||||
As of the last update:
|
||||
- **2,598 templates** in database
|
||||
- Templates from the last 12 months
|
||||
- Latest template: September 12, 2025
|
||||
|
||||
## Quick Commands
|
||||
|
||||
### Incremental Update (Recommended)
|
||||
```bash
|
||||
# Build if needed
|
||||
npm run build
|
||||
|
||||
# Fetch only NEW templates (5-10 minutes)
|
||||
npm run fetch:templates:update
|
||||
```
|
||||
|
||||
### Full Rebuild (Rare)
|
||||
```bash
|
||||
# Rebuild entire database from scratch (30-40 minutes)
|
||||
npm run fetch:templates
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
### Incremental Update Mode (`--update`)
|
||||
|
||||
The incremental update is **smart and efficient**:
|
||||
|
||||
1. **Loads existing template IDs** from database (~2,598 templates)
|
||||
2. **Fetches template list** from n8n.io API (all templates from last 12 months)
|
||||
3. **Filters** to find only NEW templates not in database
|
||||
4. **Fetches details** for new templates only (saves time and API calls)
|
||||
5. **Saves** new templates to database (existing ones untouched)
|
||||
6. **Rebuilds FTS5** search index for new templates
|
||||
|
||||
### Key Benefits
|
||||
|
||||
✅ **Non-destructive**: All existing templates preserved
|
||||
✅ **Fast**: Only fetches new templates (5-10 min vs 30-40 min)
|
||||
✅ **API friendly**: Reduces load on n8n.io API
|
||||
✅ **Safe**: Preserves AI-generated metadata
|
||||
✅ **Smart**: Automatically skips duplicates
|
||||
|
||||
## Performance Comparison
|
||||
|
||||
| Mode | Templates Fetched | Time | Use Case |
|
||||
|------|------------------|------|----------|
|
||||
| **Update** | Only new (~50-200) | 5-10 min | Regular updates |
|
||||
| **Rebuild** | All (~8000+) | 30-40 min | Initial setup or corruption |
|
||||
|
||||
## Command Options
|
||||
|
||||
### Basic Update
|
||||
```bash
|
||||
npm run fetch:templates:update
|
||||
```
|
||||
|
||||
### Full Rebuild
|
||||
```bash
|
||||
npm run fetch:templates
|
||||
```
|
||||
|
||||
### With Metadata Generation
|
||||
```bash
|
||||
# Update templates and generate AI metadata
|
||||
npm run fetch:templates -- --update --generate-metadata
|
||||
|
||||
# Or just generate metadata for existing templates
|
||||
npm run fetch:templates -- --metadata-only
|
||||
```
|
||||
|
||||
### Help
|
||||
```bash
|
||||
npm run fetch:templates -- --help
|
||||
```
|
||||
|
||||
## Update Frequency
|
||||
|
||||
Recommended update schedule:
|
||||
- **Weekly**: Run incremental update to get latest templates
|
||||
- **Monthly**: Review database statistics
|
||||
- **As needed**: Rebuild only if database corruption suspected
|
||||
|
||||
## Template Filtering
|
||||
|
||||
The fetcher automatically filters templates:
|
||||
- ✅ **Includes**: Templates from last 12 months
|
||||
- ✅ **Includes**: Templates with >10 views
|
||||
- ❌ **Excludes**: Templates with ≤10 views (too niche)
|
||||
- ❌ **Excludes**: Templates older than 12 months
|
||||
|
||||
## Workflow
|
||||
|
||||
### Regular Update Workflow
|
||||
|
||||
```bash
|
||||
# 1. Check current state
|
||||
sqlite3 data/nodes.db "SELECT COUNT(*) FROM templates"
|
||||
|
||||
# 2. Build project (if code changed)
|
||||
npm run build
|
||||
|
||||
# 3. Run incremental update
|
||||
npm run fetch:templates:update
|
||||
|
||||
# 4. Verify new templates added
|
||||
sqlite3 data/nodes.db "SELECT COUNT(*) FROM templates"
|
||||
```
|
||||
|
||||
### After n8n Dependency Update
|
||||
|
||||
When you update n8n dependencies, templates remain compatible:
|
||||
```bash
|
||||
# 1. Update n8n (from MEMORY_N8N_UPDATE.md)
|
||||
npm run update:all
|
||||
|
||||
# 2. Fetch new templates incrementally
|
||||
npm run fetch:templates:update
|
||||
|
||||
# 3. Check how many templates were added
|
||||
sqlite3 data/nodes.db "SELECT COUNT(*) FROM templates"
|
||||
|
||||
# 4. Generate AI metadata for new templates (optional, requires OPENAI_API_KEY)
|
||||
npm run fetch:templates -- --metadata-only
|
||||
|
||||
# 5. IMPORTANT: Sanitize templates before pushing database
|
||||
npm run build
|
||||
npm run sanitize:templates
|
||||
```
|
||||
|
||||
Templates are independent of n8n version - they're just workflow JSON data.
|
||||
|
||||
**CRITICAL**: Always run `npm run sanitize:templates` before pushing the database to remove API tokens from template workflows.
|
||||
|
||||
**Note**: New templates fetched via `--update` mode will NOT have AI-generated metadata by default. You need to run `--metadata-only` separately to generate metadata for templates that don't have it yet.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### No New Templates Found
|
||||
|
||||
This is normal! It means:
|
||||
- All recent templates are already in your database
|
||||
- n8n.io hasn't published many new templates recently
|
||||
- Your database is up to date
|
||||
|
||||
```bash
|
||||
📊 Update mode: 0 new templates to fetch (skipping 2598 existing)
|
||||
✅ All templates already have metadata
|
||||
```
|
||||
|
||||
### API Rate Limiting
|
||||
|
||||
If you hit rate limits:
|
||||
- The fetcher includes built-in delays (150ms between requests)
|
||||
- Wait a few minutes and try again
|
||||
- Use `--update` mode instead of full rebuild
|
||||
|
||||
### Database Corruption
|
||||
|
||||
If you suspect corruption:
|
||||
```bash
|
||||
# Full rebuild from scratch
|
||||
npm run fetch:templates
|
||||
|
||||
# This will:
|
||||
# - Drop and recreate templates table
|
||||
# - Fetch all templates fresh
|
||||
# - Rebuild search indexes
|
||||
```
|
||||
|
||||
## Database Schema
|
||||
|
||||
Templates are stored with:
|
||||
- Basic info (id, name, description, author, views, created_at)
|
||||
- Node types used (JSON array)
|
||||
- Complete workflow (gzip compressed, base64 encoded)
|
||||
- AI-generated metadata (optional, requires OpenAI API key)
|
||||
- FTS5 search index for fast text search
|
||||
|
||||
## Metadata Generation
|
||||
|
||||
Generate AI metadata for templates:
|
||||
```bash
|
||||
# Requires OPENAI_API_KEY in .env
|
||||
export OPENAI_API_KEY="sk-..."
|
||||
|
||||
# Generate for templates without metadata (recommended after incremental update)
|
||||
npm run fetch:templates -- --metadata-only
|
||||
|
||||
# Generate during template fetch (slower, but automatic)
|
||||
npm run fetch:templates:update -- --generate-metadata
|
||||
```
|
||||
|
||||
**Important**: Incremental updates (`--update`) do NOT generate metadata by default. After running `npm run fetch:templates:update`, you'll have new templates without metadata. Run `--metadata-only` separately to generate metadata for them.
|
||||
|
||||
### Check Metadata Coverage
|
||||
|
||||
```bash
|
||||
# See how many templates have metadata
|
||||
sqlite3 data/nodes.db "SELECT
|
||||
COUNT(*) as total,
|
||||
SUM(CASE WHEN metadata_json IS NOT NULL THEN 1 ELSE 0 END) as with_metadata,
|
||||
SUM(CASE WHEN metadata_json IS NULL THEN 1 ELSE 0 END) as without_metadata
|
||||
FROM templates"
|
||||
|
||||
# See recent templates without metadata
|
||||
sqlite3 data/nodes.db "SELECT id, name, created_at
|
||||
FROM templates
|
||||
WHERE metadata_json IS NULL
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 10"
|
||||
```
|
||||
|
||||
Metadata includes:
|
||||
- Categories
|
||||
- Complexity level (simple/medium/complex)
|
||||
- Use cases
|
||||
- Estimated setup time
|
||||
- Required services
|
||||
- Key features
|
||||
- Target audience
|
||||
|
||||
### Metadata Generation Troubleshooting
|
||||
|
||||
If metadata generation fails:
|
||||
|
||||
1. **Check error file**: Errors are saved to `temp/batch/batch_*_error.jsonl`
|
||||
2. **Common issues**:
|
||||
- `"Unsupported value: 'temperature'"` - Model doesn't support custom temperature
|
||||
- `"Invalid request"` - Check OPENAI_API_KEY is valid
|
||||
- Model availability issues
|
||||
3. **Model**: Uses `gpt-5-mini-2025-08-07` by default
|
||||
4. **Token limit**: 3000 tokens per request for detailed metadata
|
||||
|
||||
The system will automatically:
|
||||
- Process error files and assign default metadata to failed templates
|
||||
- Save error details for debugging
|
||||
- Continue processing even if some templates fail
|
||||
|
||||
**Example error handling**:
|
||||
```bash
|
||||
# If you see: "No output file available for batch job"
|
||||
# Check: temp/batch/batch_*_error.jsonl for error details
|
||||
# The system now automatically processes errors and generates default metadata
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
Optional configuration:
|
||||
```bash
|
||||
# OpenAI for metadata generation
|
||||
OPENAI_API_KEY=sk-...
|
||||
OPENAI_MODEL=gpt-4o-mini # Default model
|
||||
OPENAI_BATCH_SIZE=50 # Batch size for metadata generation
|
||||
|
||||
# Metadata generation limits
|
||||
METADATA_LIMIT=100 # Max templates to process (0 = all)
|
||||
```
|
||||
|
||||
## Statistics
|
||||
|
||||
After update, check stats:
|
||||
```bash
|
||||
# Template count
|
||||
sqlite3 data/nodes.db "SELECT COUNT(*) FROM templates"
|
||||
|
||||
# Most recent template
|
||||
sqlite3 data/nodes.db "SELECT MAX(created_at) FROM templates"
|
||||
|
||||
# Templates by view count
|
||||
sqlite3 data/nodes.db "SELECT COUNT(*),
|
||||
CASE
|
||||
WHEN views < 50 THEN '<50'
|
||||
WHEN views < 100 THEN '50-100'
|
||||
WHEN views < 500 THEN '100-500'
|
||||
ELSE '500+'
|
||||
END as view_range
|
||||
FROM templates GROUP BY view_range"
|
||||
```
|
||||
|
||||
## Integration with n8n-mcp
|
||||
|
||||
Templates are available through MCP tools:
|
||||
- `list_templates`: List all templates
|
||||
- `get_template`: Get specific template with workflow
|
||||
- `search_templates`: Search by keyword
|
||||
- `list_node_templates`: Templates using specific nodes
|
||||
- `get_templates_for_task`: Templates for common tasks
|
||||
- `search_templates_by_metadata`: Advanced filtering
|
||||
|
||||
See `npm run test:templates` for usage examples.
|
||||
|
||||
## Time Estimates
|
||||
|
||||
Typical incremental update:
|
||||
- Loading existing IDs: 1-2 seconds
|
||||
- Fetching template list: 2-3 minutes
|
||||
- Filtering new templates: instant
|
||||
- Fetching details for 100 new templates: ~15 seconds (0.15s each)
|
||||
- Saving and indexing: 5-10 seconds
|
||||
- **Total: 3-5 minutes**
|
||||
|
||||
Full rebuild:
|
||||
- Fetching 8000+ templates: 25-30 minutes
|
||||
- Saving and indexing: 5-10 minutes
|
||||
- **Total: 30-40 minutes**
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use incremental updates** for regular maintenance
|
||||
2. **Rebuild only when necessary** (corruption, major changes)
|
||||
3. **Generate metadata incrementally** to avoid OpenAI costs
|
||||
4. **Monitor template count** to verify updates working
|
||||
5. **Keep database backed up** before major operations
|
||||
|
||||
## Next Steps
|
||||
|
||||
After updating templates:
|
||||
1. Test template search: `npm run test:templates`
|
||||
2. Verify MCP tools work: Test in Claude Desktop
|
||||
3. Check statistics in database
|
||||
4. Commit changes if desired (database changes)
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- `MEMORY_N8N_UPDATE.md` - Updating n8n dependencies
|
||||
- `CLAUDE.md` - Project overview and architecture
|
||||
- `README.md` - User documentation
|
||||
484
P0-R3-TEST-PLAN.md
Normal file
484
P0-R3-TEST-PLAN.md
Normal file
@@ -0,0 +1,484 @@
|
||||
# P0-R3 Feature Test Coverage Plan
|
||||
|
||||
## Executive Summary
|
||||
|
||||
This document outlines comprehensive test coverage for the P0-R3 feature (Template-based Configuration Examples). The feature adds real-world configuration examples from popular templates to node search and essentials tools.
|
||||
|
||||
**Feature Overview:**
|
||||
- New database table: `template_node_configs` (197 pre-extracted configurations)
|
||||
- Enhanced tools: `search_nodes({includeExamples: true})` and `get_node_essentials({includeExamples: true})`
|
||||
- Breaking changes: Removed `get_node_for_task` tool
|
||||
|
||||
## Test Files Created
|
||||
|
||||
### Unit Tests
|
||||
|
||||
#### 1. `/tests/unit/scripts/fetch-templates-extraction.test.ts` ✅
|
||||
**Purpose:** Test template extraction logic from `fetch-templates.ts`
|
||||
|
||||
**Coverage:**
|
||||
- `extractNodeConfigs()` - 90%+ coverage
|
||||
- Valid workflows with multiple nodes
|
||||
- Empty workflows
|
||||
- Malformed compressed data
|
||||
- Invalid JSON
|
||||
- Nodes without parameters
|
||||
- Sticky note filtering
|
||||
- Credential handling
|
||||
- Expression detection
|
||||
- Special characters
|
||||
- Large workflows (100 nodes)
|
||||
|
||||
- `detectExpressions()` - 100% coverage
|
||||
- `={{...}}` syntax detection
|
||||
- `$json` references
|
||||
- `$node` references
|
||||
- Nested objects
|
||||
- Arrays
|
||||
- Null/undefined handling
|
||||
- Multiple expression types
|
||||
|
||||
**Test Count:** 27 tests
|
||||
**Expected Coverage:** 92%+
|
||||
|
||||
---
|
||||
|
||||
#### 2. `/tests/unit/mcp/search-nodes-examples.test.ts` ✅
|
||||
**Purpose:** Test `search_nodes` tool with includeExamples parameter
|
||||
|
||||
**Coverage:**
|
||||
- includeExamples parameter behavior
|
||||
- false: no examples returned
|
||||
- undefined: no examples returned (default)
|
||||
- true: examples returned
|
||||
- Example data structure validation
|
||||
- Top 2 limit enforcement
|
||||
- Backward compatibility
|
||||
- Performance (<100ms)
|
||||
- Error handling (malformed JSON, database errors)
|
||||
- searchNodesLIKE integration
|
||||
- searchNodesFTS integration
|
||||
|
||||
**Test Count:** 12 tests
|
||||
**Expected Coverage:** 85%+
|
||||
|
||||
---
|
||||
|
||||
#### 3. `/tests/unit/mcp/get-node-essentials-examples.test.ts` ✅
|
||||
**Purpose:** Test `get_node_essentials` tool with includeExamples parameter
|
||||
|
||||
**Coverage:**
|
||||
- includeExamples parameter behavior
|
||||
- Full metadata structure
|
||||
- configuration object
|
||||
- source (template, views, complexity)
|
||||
- useCases (limited to 2)
|
||||
- metadata (hasCredentials, hasExpressions)
|
||||
- Cache key differentiation
|
||||
- Backward compatibility
|
||||
- Performance (<100ms)
|
||||
- Error handling
|
||||
- Top 3 limit enforcement
|
||||
|
||||
**Test Count:** 13 tests
|
||||
**Expected Coverage:** 88%+
|
||||
|
||||
---
|
||||
|
||||
### Integration Tests
|
||||
|
||||
#### 4. `/tests/integration/database/template-node-configs.test.ts` ✅
|
||||
**Purpose:** Test database schema, migrations, and operations
|
||||
|
||||
**Coverage:**
|
||||
- Schema validation
|
||||
- Table creation
|
||||
- All columns present
|
||||
- Correct types and constraints
|
||||
- CHECK constraint on complexity
|
||||
- Indexes
|
||||
- idx_config_node_type_rank
|
||||
- idx_config_complexity
|
||||
- idx_config_auth
|
||||
- View: ranked_node_configs
|
||||
- Top 5 per node_type
|
||||
- Correct ordering
|
||||
- Foreign key constraints
|
||||
- CASCADE delete
|
||||
- Referential integrity
|
||||
- Data operations
|
||||
- INSERT with all fields
|
||||
- Nullable fields
|
||||
- Rank updates
|
||||
- Delete rank > 10
|
||||
- Performance
|
||||
- 1000 records < 10ms queries
|
||||
- Migration idempotency
|
||||
|
||||
**Test Count:** 19 tests
|
||||
**Expected Coverage:** 95%+
|
||||
|
||||
---
|
||||
|
||||
#### 5. `/tests/integration/mcp/template-examples-e2e.test.ts` ✅
|
||||
**Purpose:** End-to-end integration testing
|
||||
|
||||
**Coverage:**
|
||||
- Direct SQL queries
|
||||
- Top 2 examples for search_nodes
|
||||
- Top 3 examples with metadata for get_node_essentials
|
||||
- Data structure validation
|
||||
- Valid JSON in all fields
|
||||
- Credentials when has_credentials=1
|
||||
- Ranked view functionality
|
||||
- Performance with 100+ configs
|
||||
- Query performance < 5ms
|
||||
- Complexity filtering
|
||||
- Edge cases
|
||||
- Non-existent node types
|
||||
- Long parameters_json (100 params)
|
||||
- Special characters (Unicode, emojis, symbols)
|
||||
- Data integrity
|
||||
- Foreign key constraints
|
||||
- Cascade deletes
|
||||
|
||||
**Test Count:** 14 tests
|
||||
**Expected Coverage:** 90%+
|
||||
|
||||
---
|
||||
|
||||
### Test Fixtures
|
||||
|
||||
#### 6. `/tests/fixtures/template-configs.ts` ✅
|
||||
**Purpose:** Reusable test data
|
||||
|
||||
**Provides:**
|
||||
- `sampleConfigs`: 7 realistic node configurations
|
||||
- simpleWebhook
|
||||
- webhookWithAuth
|
||||
- httpRequestBasic
|
||||
- httpRequestWithExpressions
|
||||
- slackMessage
|
||||
- codeNodeTransform
|
||||
- codeNodeWithExpressions
|
||||
|
||||
- `sampleWorkflows`: 3 complete workflows
|
||||
- webhookToSlack
|
||||
- apiWorkflow
|
||||
- complexWorkflow
|
||||
|
||||
- **Helper Functions:**
|
||||
- `compressWorkflow()` - Compress to base64
|
||||
- `createTemplateMetadata()` - Generate metadata
|
||||
- `createConfigBatch()` - Batch create configs
|
||||
- `getConfigByComplexity()` - Filter by complexity
|
||||
- `getConfigsWithExpressions()` - Filter with expressions
|
||||
- `getConfigsWithCredentials()` - Filter with credentials
|
||||
- `createInsertStatement()` - SQL insert helper
|
||||
|
||||
---
|
||||
|
||||
## Existing Tests Requiring Updates
|
||||
|
||||
### High Priority
|
||||
|
||||
#### 1. `tests/unit/mcp/parameter-validation.test.ts`
|
||||
**Line 480:** Remove `get_node_for_task` from legacyValidationTools array
|
||||
|
||||
```typescript
|
||||
// REMOVE THIS:
|
||||
{ name: 'get_node_for_task', args: {}, expected: 'Missing required parameters for get_node_for_task: task' },
|
||||
```
|
||||
|
||||
**Status:** ⚠️ BREAKING CHANGE - Tool removed
|
||||
|
||||
---
|
||||
|
||||
#### 2. `tests/unit/mcp/tools.test.ts`
|
||||
**Update:** Remove `get_node_for_task` from templates category
|
||||
|
||||
```typescript
|
||||
// BEFORE:
|
||||
templates: ['list_tasks', 'get_node_for_task', 'search_templates', ...]
|
||||
|
||||
// AFTER:
|
||||
templates: ['list_tasks', 'search_templates', ...]
|
||||
```
|
||||
|
||||
**Add:** Tests for new includeExamples parameter in tool definitions
|
||||
|
||||
```typescript
|
||||
it('should have includeExamples parameter in search_nodes', () => {
|
||||
const searchNodesTool = tools.find(t => t.name === 'search_nodes');
|
||||
expect(searchNodesTool.inputSchema.properties.includeExamples).toBeDefined();
|
||||
expect(searchNodesTool.inputSchema.properties.includeExamples.type).toBe('boolean');
|
||||
expect(searchNodesTool.inputSchema.properties.includeExamples.default).toBe(false);
|
||||
});
|
||||
|
||||
it('should have includeExamples parameter in get_node_essentials', () => {
|
||||
const essentialsTool = tools.find(t => t.name === 'get_node_essentials');
|
||||
expect(essentialsTool.inputSchema.properties.includeExamples).toBeDefined();
|
||||
});
|
||||
```
|
||||
|
||||
**Status:** ⚠️ REQUIRED UPDATE
|
||||
|
||||
---
|
||||
|
||||
#### 3. `tests/integration/mcp-protocol/session-management.test.ts`
|
||||
**Remove:** Test case calling `get_node_for_task` with invalid task
|
||||
|
||||
```typescript
|
||||
// REMOVE THIS TEST:
|
||||
client.callTool({ name: 'get_node_for_task', arguments: { task: 'invalid_task' } }).catch(e => e)
|
||||
```
|
||||
|
||||
**Status:** ⚠️ BREAKING CHANGE
|
||||
|
||||
---
|
||||
|
||||
#### 4. `tests/integration/mcp-protocol/tool-invocation.test.ts`
|
||||
**Remove:** Entire `get_node_for_task` describe block
|
||||
|
||||
**Add:** Tests for new includeExamples functionality
|
||||
|
||||
```typescript
|
||||
describe('search_nodes with includeExamples', () => {
|
||||
it('should return examples when includeExamples is true', async () => {
|
||||
const response = await client.callTool({
|
||||
name: 'search_nodes',
|
||||
arguments: { query: 'webhook', includeExamples: true }
|
||||
});
|
||||
|
||||
expect(response.results).toBeDefined();
|
||||
// Examples may or may not be present depending on database
|
||||
});
|
||||
|
||||
it('should not return examples when includeExamples is false', async () => {
|
||||
const response = await client.callTool({
|
||||
name: 'search_nodes',
|
||||
arguments: { query: 'webhook', includeExamples: false }
|
||||
});
|
||||
|
||||
expect(response.results).toBeDefined();
|
||||
response.results.forEach(node => {
|
||||
expect(node.examples).toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('get_node_essentials with includeExamples', () => {
|
||||
it('should return examples with metadata when includeExamples is true', async () => {
|
||||
const response = await client.callTool({
|
||||
name: 'get_node_essentials',
|
||||
arguments: { nodeType: 'nodes-base.webhook', includeExamples: true }
|
||||
});
|
||||
|
||||
expect(response.nodeType).toBeDefined();
|
||||
// Examples may or may not be present depending on database
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
**Status:** ⚠️ REQUIRED UPDATE
|
||||
|
||||
---
|
||||
|
||||
### Medium Priority
|
||||
|
||||
#### 5. `tests/unit/services/task-templates.test.ts`
|
||||
**Status:** ✅ No changes needed (TaskTemplates marked as deprecated but not removed)
|
||||
|
||||
**Note:** TaskTemplates remains for backward compatibility. Tests should continue to pass.
|
||||
|
||||
---
|
||||
|
||||
## Test Execution Plan
|
||||
|
||||
### Phase 1: Unit Tests
|
||||
```bash
|
||||
# Run new unit tests
|
||||
npm test tests/unit/scripts/fetch-templates-extraction.test.ts
|
||||
npm test tests/unit/mcp/search-nodes-examples.test.ts
|
||||
npm test tests/unit/mcp/get-node-essentials-examples.test.ts
|
||||
|
||||
# Expected: All pass, 52 tests
|
||||
```
|
||||
|
||||
### Phase 2: Integration Tests
|
||||
```bash
|
||||
# Run new integration tests
|
||||
npm test tests/integration/database/template-node-configs.test.ts
|
||||
npm test tests/integration/mcp/template-examples-e2e.test.ts
|
||||
|
||||
# Expected: All pass, 33 tests
|
||||
```
|
||||
|
||||
### Phase 3: Update Existing Tests
|
||||
```bash
|
||||
# Update files as outlined above, then run:
|
||||
npm test tests/unit/mcp/parameter-validation.test.ts
|
||||
npm test tests/unit/mcp/tools.test.ts
|
||||
npm test tests/integration/mcp-protocol/session-management.test.ts
|
||||
npm test tests/integration/mcp-protocol/tool-invocation.test.ts
|
||||
|
||||
# Expected: All pass after updates
|
||||
```
|
||||
|
||||
### Phase 4: Full Test Suite
|
||||
```bash
|
||||
# Run all tests
|
||||
npm test
|
||||
|
||||
# Run with coverage
|
||||
npm run test:coverage
|
||||
|
||||
# Expected coverage improvements:
|
||||
# - src/scripts/fetch-templates.ts: +20% (60% → 80%)
|
||||
# - src/mcp/server.ts: +5% (75% → 80%)
|
||||
# - Overall project: +2% (current → current+2%)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Coverage Expectations
|
||||
|
||||
### New Code Coverage
|
||||
|
||||
| File | Function | Target | Tests |
|
||||
|------|----------|--------|-------|
|
||||
| fetch-templates.ts | extractNodeConfigs | 95% | 15 tests |
|
||||
| fetch-templates.ts | detectExpressions | 100% | 12 tests |
|
||||
| server.ts | searchNodes (with examples) | 90% | 8 tests |
|
||||
| server.ts | getNodeEssentials (with examples) | 90% | 10 tests |
|
||||
| Database migration | template_node_configs | 100% | 19 tests |
|
||||
|
||||
### Overall Coverage Goals
|
||||
|
||||
- **Unit Tests:** 90%+ coverage for new code
|
||||
- **Integration Tests:** All happy paths + critical error paths
|
||||
- **E2E Tests:** Complete feature workflows
|
||||
- **Performance:** All queries <10ms (database), <100ms (MCP)
|
||||
|
||||
---
|
||||
|
||||
## Test Infrastructure
|
||||
|
||||
### Dependencies Required
|
||||
All dependencies already present in `package.json`:
|
||||
- vitest (test runner)
|
||||
- better-sqlite3 (database)
|
||||
- @vitest/coverage-v8 (coverage)
|
||||
|
||||
### Test Utilities Used
|
||||
- TestDatabase helper (from existing test utils)
|
||||
- createTestDatabaseAdapter (from existing test utils)
|
||||
- Standard vitest matchers
|
||||
|
||||
### No New Dependencies Required ✅
|
||||
|
||||
---
|
||||
|
||||
## Regression Prevention
|
||||
|
||||
### Critical Paths Protected
|
||||
|
||||
1. **Backward Compatibility**
|
||||
- Tools work without includeExamples parameter
|
||||
- Existing workflows unchanged
|
||||
- Cache keys differentiated
|
||||
|
||||
2. **Performance**
|
||||
- No degradation when includeExamples=false
|
||||
- Indexed queries <10ms
|
||||
- Example fetch errors don't break responses
|
||||
|
||||
3. **Data Integrity**
|
||||
- Foreign key constraints enforced
|
||||
- JSON validation in all fields
|
||||
- Rank calculations correct
|
||||
|
||||
---
|
||||
|
||||
## CI/CD Integration
|
||||
|
||||
### GitHub Actions Updates
|
||||
No changes required. Existing test commands will run new tests:
|
||||
|
||||
```yaml
|
||||
- run: npm test
|
||||
- run: npm run test:coverage
|
||||
```
|
||||
|
||||
### Coverage Thresholds
|
||||
Current thresholds maintained. Expected improvements:
|
||||
- Lines: +2%
|
||||
- Functions: +3%
|
||||
- Branches: +2%
|
||||
|
||||
---
|
||||
|
||||
## Manual Testing Checklist
|
||||
|
||||
### Pre-Deployment Verification
|
||||
|
||||
- [ ] Run `npm run rebuild` - Verify migration applies cleanly
|
||||
- [ ] Run `npm run fetch:templates --extract-only` - Verify extraction works
|
||||
- [ ] Check database: `SELECT COUNT(*) FROM template_node_configs` - Should be ~197
|
||||
- [ ] Test MCP tool: `search_nodes({query: "webhook", includeExamples: true})`
|
||||
- [ ] Test MCP tool: `get_node_essentials({nodeType: "nodes-base.webhook", includeExamples: true})`
|
||||
- [ ] Verify backward compatibility: Tools work without includeExamples parameter
|
||||
- [ ] Performance test: Query 100 nodes with examples < 200ms
|
||||
|
||||
---
|
||||
|
||||
## Rollback Plan
|
||||
|
||||
If issues are detected:
|
||||
|
||||
1. **Database Rollback:**
|
||||
```sql
|
||||
DROP TABLE IF EXISTS template_node_configs;
|
||||
DROP VIEW IF EXISTS ranked_node_configs;
|
||||
```
|
||||
|
||||
2. **Code Rollback:**
|
||||
- Revert server.ts changes
|
||||
- Revert tools.ts changes
|
||||
- Restore get_node_for_task tool (if critical)
|
||||
|
||||
3. **Test Rollback:**
|
||||
- Revert parameter-validation.test.ts
|
||||
- Revert tools.test.ts
|
||||
- Revert tool-invocation.test.ts
|
||||
|
||||
---
|
||||
|
||||
## Success Metrics
|
||||
|
||||
### Test Metrics
|
||||
- ✅ 85+ new tests added
|
||||
- ✅ 0 tests failing after updates
|
||||
- ✅ Coverage increase 2%+
|
||||
- ✅ All performance tests pass
|
||||
|
||||
### Feature Metrics
|
||||
- ✅ 197 template configs extracted
|
||||
- ✅ Top 2/3 examples returned correctly
|
||||
- ✅ Query performance <10ms
|
||||
- ✅ No backward compatibility breaks
|
||||
|
||||
---
|
||||
|
||||
## Conclusion
|
||||
|
||||
This test plan provides **comprehensive coverage** for the P0-R3 feature with:
|
||||
- **85+ new tests** across unit, integration, and E2E levels
|
||||
- **Complete coverage** of extraction, storage, and retrieval
|
||||
- **Backward compatibility** protection
|
||||
- **Performance validation** (<10ms queries)
|
||||
- **Clear migration path** for existing tests
|
||||
|
||||
**All test files are ready for execution.** Update the 4 existing test files as outlined, then run the full test suite.
|
||||
|
||||
**Estimated Total Implementation Time:** 2-3 hours for updating existing tests + validation
|
||||
73
PRIVACY.md
Normal file
73
PRIVACY.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# Privacy Policy for n8n-mcp Telemetry
|
||||
|
||||
## Overview
|
||||
n8n-mcp collects anonymous usage statistics to help improve the tool. This data collection is designed to respect user privacy while providing valuable insights into how the tool is used.
|
||||
|
||||
## What We Collect
|
||||
- **Anonymous User ID**: A hashed identifier derived from your machine characteristics (no personal information)
|
||||
- **Tool Usage**: Which MCP tools are used and their performance metrics
|
||||
- **Workflow Patterns**: Sanitized workflow structures (all sensitive data removed)
|
||||
- **Error Types**: Categories of errors encountered (no error messages with user data)
|
||||
- **System Information**: Platform, architecture, Node.js version, and n8n-mcp version
|
||||
|
||||
## What We DON'T Collect
|
||||
- Personal information or usernames
|
||||
- API keys, tokens, or credentials
|
||||
- URLs, endpoints, or hostnames
|
||||
- Email addresses or contact information
|
||||
- File paths or directory structures
|
||||
- Actual workflow data or parameters
|
||||
- Database connection strings
|
||||
- Any authentication information
|
||||
|
||||
## Data Sanitization
|
||||
All collected data undergoes automatic sanitization:
|
||||
- URLs are replaced with `[URL]` or `[REDACTED]`
|
||||
- Long alphanumeric strings (potential keys) are replaced with `[KEY]`
|
||||
- Email addresses are replaced with `[EMAIL]`
|
||||
- Authentication-related fields are completely removed
|
||||
|
||||
## Data Storage
|
||||
- Data is stored securely using Supabase
|
||||
- Anonymous users have write-only access (cannot read data back)
|
||||
- Row Level Security (RLS) policies prevent data access by anonymous users
|
||||
|
||||
## Opt-Out
|
||||
You can disable telemetry at any time:
|
||||
```bash
|
||||
npx n8n-mcp telemetry disable
|
||||
```
|
||||
|
||||
To re-enable:
|
||||
```bash
|
||||
npx n8n-mcp telemetry enable
|
||||
```
|
||||
|
||||
To check status:
|
||||
```bash
|
||||
npx n8n-mcp telemetry status
|
||||
```
|
||||
|
||||
## Data Usage
|
||||
Collected data is used solely to:
|
||||
- Understand which features are most used
|
||||
- Identify common error patterns
|
||||
- Improve tool performance and reliability
|
||||
- Guide development priorities
|
||||
- Train machine learning models for workflow generation
|
||||
|
||||
All ML training uses sanitized, anonymized data only.
|
||||
Users can opt-out at any time with `npx n8n-mcp telemetry disable`
|
||||
|
||||
## Data Retention
|
||||
- Data is retained for analysis purposes
|
||||
- No personal identification is possible from the collected data
|
||||
|
||||
## Changes to This Policy
|
||||
We may update this privacy policy from time to time. Updates will be reflected in this document.
|
||||
|
||||
## Contact
|
||||
For questions about telemetry or privacy, please open an issue on GitHub:
|
||||
https://github.com/czlonkowski/n8n-mcp/issues
|
||||
|
||||
Last updated: 2025-11-06
|
||||
318
README_ANALYSIS.md
Normal file
318
README_ANALYSIS.md
Normal file
@@ -0,0 +1,318 @@
|
||||
# N8N-MCP Validation Analysis: Complete Report
|
||||
|
||||
**Date**: November 8, 2025
|
||||
**Dataset**: 29,218 validation events | 9,021 unique users | 90 days
|
||||
**Status**: Complete and ready for action
|
||||
|
||||
---
|
||||
|
||||
## Analysis Documents
|
||||
|
||||
### 1. ANALYSIS_QUICK_REFERENCE.md (5.8KB)
|
||||
**Best for**: Quick decisions, meetings, slide presentations
|
||||
|
||||
START HERE if you want the key points in 5 minutes.
|
||||
|
||||
**Contains**:
|
||||
- One-paragraph core finding
|
||||
- Top 3 problem areas with root causes
|
||||
- 5 most common errors
|
||||
- Implementation plan summary
|
||||
- Key metrics & targets
|
||||
- FAQ section
|
||||
|
||||
---
|
||||
|
||||
### 2. VALIDATION_ANALYSIS_SUMMARY.md (13KB)
|
||||
**Best for**: Executive stakeholders, team leads, decision makers
|
||||
|
||||
Read this for comprehensive but concise overview.
|
||||
|
||||
**Contains**:
|
||||
- One-page executive summary
|
||||
- Health scorecard with key metrics
|
||||
- Detailed problem area breakdown
|
||||
- Error category distribution
|
||||
- Agent behavior insights
|
||||
- Tool usage patterns
|
||||
- Documentation impact findings
|
||||
- Top 5 recommendations with ROI estimates
|
||||
- 50-65% improvement projection
|
||||
|
||||
---
|
||||
|
||||
### 3. VALIDATION_ANALYSIS_REPORT.md (27KB)
|
||||
**Best for**: Technical deep-dive, implementation planning, root cause analysis
|
||||
|
||||
Complete reference document with all findings.
|
||||
|
||||
**Contains**:
|
||||
- All 16 SQL queries (reproducible)
|
||||
- Node-specific difficulty ranking (top 20)
|
||||
- Top 25 unique validation error messages
|
||||
- Error categorization with root causes
|
||||
- Tool usage patterns before failures
|
||||
- Search query analysis
|
||||
- Documentation effectiveness study
|
||||
- Retry success rate analysis
|
||||
- Property-level difficulty matrix
|
||||
- 8 detailed recommendations with implementation guides
|
||||
- Phase-by-phase action items
|
||||
- KPI tracking setup
|
||||
- Complete appendix with error message reference
|
||||
|
||||
---
|
||||
|
||||
### 4. IMPLEMENTATION_ROADMAP.md (4.3KB)
|
||||
**Best for**: Project managers, development team, sprint planning
|
||||
|
||||
Actionable roadmap for the next 6 weeks.
|
||||
|
||||
**Contains**:
|
||||
- Phase 1-3 breakdown (2 weeks each)
|
||||
- Specific file locations to modify
|
||||
- Effort estimates per task
|
||||
- Success criteria for each phase
|
||||
- Expected impact projections
|
||||
- Code examples (before/after)
|
||||
- Key changes documentation
|
||||
|
||||
---
|
||||
|
||||
## Reading Paths
|
||||
|
||||
### Path A: Decision Maker (30 minutes)
|
||||
1. Read: ANALYSIS_QUICK_REFERENCE.md
|
||||
2. Review: Key metrics in VALIDATION_ANALYSIS_SUMMARY.md
|
||||
3. Decision: Approve IMPLEMENTATION_ROADMAP.md
|
||||
|
||||
### Path B: Product Manager (1 hour)
|
||||
1. Read: VALIDATION_ANALYSIS_SUMMARY.md
|
||||
2. Skim: Top recommendations in VALIDATION_ANALYSIS_REPORT.md
|
||||
3. Review: IMPLEMENTATION_ROADMAP.md
|
||||
4. Check: Success metrics and timelines
|
||||
|
||||
### Path C: Technical Lead (2-3 hours)
|
||||
1. Read: ANALYSIS_QUICK_REFERENCE.md
|
||||
2. Deep-dive: VALIDATION_ANALYSIS_REPORT.md
|
||||
3. Study: IMPLEMENTATION_ROADMAP.md
|
||||
4. Review: Code examples and SQL queries
|
||||
5. Plan: Ticket creation and sprint allocation
|
||||
|
||||
### Path D: Developer (3-4 hours)
|
||||
1. Skim: ANALYSIS_QUICK_REFERENCE.md for context
|
||||
2. Read: VALIDATION_ANALYSIS_REPORT.md sections 3-8
|
||||
3. Study: IMPLEMENTATION_ROADMAP.md thoroughly
|
||||
4. Review: All code locations and examples
|
||||
5. Plan: First task implementation
|
||||
|
||||
---
|
||||
|
||||
## Key Findings Overview
|
||||
|
||||
### The Core Insight
|
||||
Validation failures are NOT broken—they're evidence the system works perfectly. 29,218 validation events prevented bad deployments. The challenge is GUIDANCE GAPS that cause first-attempt failures.
|
||||
|
||||
### Success Evidence
|
||||
- 100% same-day error recovery rate
|
||||
- 100% retry success rate
|
||||
- All agents fix errors when given feedback
|
||||
- Zero "unfixable" errors
|
||||
|
||||
### Problem Areas (75% of errors)
|
||||
1. **Workflow structure** (26%) - JSON malformation
|
||||
2. **Connections** (14%) - Unintuitive syntax
|
||||
3. **Required fields** (8%) - Not marked upfront
|
||||
|
||||
### Most Problematic Nodes
|
||||
- Webhook/Trigger (127 failures)
|
||||
- Slack (73 failures)
|
||||
- AI Agent (36 failures)
|
||||
- HTTP Request (31 failures)
|
||||
- OpenAI (35 failures)
|
||||
|
||||
### Solution Strategy
|
||||
- Phase 1: Better error messages + required field markers (25-30% reduction)
|
||||
- Phase 2: Documentation + validation improvements (additional 15-20%)
|
||||
- Phase 3: Advanced features + monitoring (additional 10-15%)
|
||||
- **Target**: 50-65% total failure reduction in 6 weeks
|
||||
|
||||
---
|
||||
|
||||
## Critical Numbers
|
||||
|
||||
```
|
||||
Validation Events ............. 29,218
|
||||
Unique Users .................. 9,021
|
||||
Data Quality .................. 100% (all marked as errors)
|
||||
|
||||
Current Metrics:
|
||||
Error Rate (doc users) ....... 12.6%
|
||||
Error Rate (non-doc users) ... 10.8%
|
||||
First-attempt success ........ ~77%
|
||||
Retry success ................ 100%
|
||||
Same-day recovery ............ 100%
|
||||
|
||||
Target Metrics (after 6 weeks):
|
||||
Error Rate ................... 6-7% (-50%)
|
||||
First-attempt success ........ 85%+
|
||||
Retry success ................ 100%
|
||||
Implementation effort ........ 60-80 hours
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Timeline
|
||||
|
||||
```
|
||||
Week 1-2: Phase 1 (Error messages, field markers, webhook guide)
|
||||
Expected: 25-30% failure reduction
|
||||
|
||||
Week 3-4: Phase 2 (Enum suggestions, connection guide, AI validation)
|
||||
Expected: Additional 15-20% reduction
|
||||
|
||||
Week 5-6: Phase 3 (Search improvements, fuzzy matching, KPI setup)
|
||||
Expected: Additional 10-15% reduction
|
||||
|
||||
Target: 50-65% total reduction by Week 6
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## How to Use These Documents
|
||||
|
||||
### For Review & Approval
|
||||
1. Start with ANALYSIS_QUICK_REFERENCE.md
|
||||
2. Check key metrics in VALIDATION_ANALYSIS_SUMMARY.md
|
||||
3. Review IMPLEMENTATION_ROADMAP.md for feasibility
|
||||
4. Decision: Approve phase 1-3
|
||||
|
||||
### For Team Planning
|
||||
1. Read IMPLEMENTATION_ROADMAP.md
|
||||
2. Create GitHub issues from each task
|
||||
3. Assign based on effort estimates
|
||||
4. Schedule sprints for phase 1-3
|
||||
|
||||
### For Development
|
||||
1. Review specific recommendations in VALIDATION_ANALYSIS_REPORT.md
|
||||
2. Find code locations in IMPLEMENTATION_ROADMAP.md
|
||||
3. Study code examples (before/after)
|
||||
4. Implement and test
|
||||
|
||||
### For Measurement
|
||||
1. Record baseline metrics (current state)
|
||||
2. Deploy Phase 1 and measure impact
|
||||
3. Use KPI queries from VALIDATION_ANALYSIS_REPORT.md
|
||||
4. Adjust strategy based on actual results
|
||||
|
||||
---
|
||||
|
||||
## Key Recommendations (Priority Order)
|
||||
|
||||
### IMMEDIATE (Week 1-2)
|
||||
1. **Enhance error messages** - Add location + examples
|
||||
2. **Mark required fields** - Add "⚠️ REQUIRED" to tools
|
||||
3. **Create webhook guide** - Document configuration rules
|
||||
|
||||
### HIGH (Week 3-4)
|
||||
4. **Add enum suggestions** - Show valid values in errors
|
||||
5. **Create connections guide** - Document syntax + examples
|
||||
6. **Add AI Agent validation** - Detect missing LLM connections
|
||||
|
||||
### MEDIUM (Week 5-6)
|
||||
7. **Improve search results** - Add configuration hints
|
||||
8. **Build fuzzy matcher** - Suggest similar node types
|
||||
9. **Setup KPI tracking** - Monitor improvement
|
||||
|
||||
---
|
||||
|
||||
## Questions & Answers
|
||||
|
||||
**Q: Why so many validation failures?**
|
||||
A: High usage (9,021 users, complex workflows). System is working—preventing bad deployments.
|
||||
|
||||
**Q: Shouldn't we just allow invalid configurations?**
|
||||
A: No, validation prevents 29,218 broken workflows from deploying. We improve guidance instead.
|
||||
|
||||
**Q: Do agents actually learn from errors?**
|
||||
A: Yes, 100% same-day recovery rate proves feedback works perfectly.
|
||||
|
||||
**Q: Can we really reduce failures by 50-65%?**
|
||||
A: Yes, analysis shows these specific improvements target the actual root causes.
|
||||
|
||||
**Q: How long will this take?**
|
||||
A: 60-80 developer-hours across 6 weeks. Can start immediately.
|
||||
|
||||
**Q: What's the biggest win?**
|
||||
A: Marking required fields (378 errors) + better structure messages (1,268 errors).
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **This Week**: Review all documents and get approval
|
||||
2. **Week 1**: Create GitHub issues from IMPLEMENTATION_ROADMAP.md
|
||||
3. **Week 2**: Assign to team, start Phase 1
|
||||
4. **Week 4**: Deploy Phase 1, start Phase 2
|
||||
5. **Week 6**: Deploy Phase 2, start Phase 3
|
||||
6. **Week 8**: Deploy Phase 3, begin monitoring
|
||||
7. **Week 9+**: Review metrics, iterate
|
||||
|
||||
---
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
/Users/romualdczlonkowski/Pliki/n8n-mcp/n8n-mcp/
|
||||
├── ANALYSIS_QUICK_REFERENCE.md ............ Quick lookup (5.8KB)
|
||||
├── VALIDATION_ANALYSIS_SUMMARY.md ........ Executive summary (13KB)
|
||||
├── VALIDATION_ANALYSIS_REPORT.md ......... Complete analysis (27KB)
|
||||
├── IMPLEMENTATION_ROADMAP.md ............. Action plan (4.3KB)
|
||||
└── README_ANALYSIS.md ................... This file
|
||||
```
|
||||
|
||||
**Total Documentation**: 50KB of analysis, recommendations, and implementation guidance
|
||||
|
||||
---
|
||||
|
||||
## Contact & Support
|
||||
|
||||
For specific questions:
|
||||
- **Why?** → See VALIDATION_ANALYSIS_REPORT.md Section 2-8
|
||||
- **How?** → See IMPLEMENTATION_ROADMAP.md for code locations
|
||||
- **When?** → See IMPLEMENTATION_ROADMAP.md for timeline
|
||||
- **Metrics?** → See VALIDATION_ANALYSIS_SUMMARY.md key metrics section
|
||||
|
||||
---
|
||||
|
||||
## Metadata
|
||||
|
||||
| Item | Value |
|
||||
|------|-------|
|
||||
| Analysis Date | November 8, 2025 |
|
||||
| Data Period | Sept 26 - Nov 8, 2025 (90 days) |
|
||||
| Sample Size | 29,218 validation events |
|
||||
| Users Analyzed | 9,021 unique users |
|
||||
| SQL Queries | 16 comprehensive queries |
|
||||
| Confidence Level | HIGH |
|
||||
| Status | Complete & Ready for Implementation |
|
||||
|
||||
---
|
||||
|
||||
## Analysis Methodology
|
||||
|
||||
1. **Data Collection**: Extracted all validation_details events from PostgreSQL
|
||||
2. **Categorization**: Grouped errors by type, node, and message pattern
|
||||
3. **Pattern Analysis**: Identified root causes for each error category
|
||||
4. **User Behavior**: Tracked tool usage before/after failures
|
||||
5. **Recovery Analysis**: Measured success rates and correction time
|
||||
6. **Recommendation Development**: Mapped solutions to specific problems
|
||||
7. **Impact Projection**: Estimated improvement from each solution
|
||||
8. **Roadmap Creation**: Phased implementation plan with effort estimates
|
||||
|
||||
**Data Quality**: 100% of validation events properly categorized, no data loss or corruption
|
||||
|
||||
---
|
||||
|
||||
**Analysis Complete** | **Ready for Review** | **Awaiting Approval to Proceed**
|
||||
|
||||
41
_config.yml
Normal file
41
_config.yml
Normal file
@@ -0,0 +1,41 @@
|
||||
# Jekyll configuration for GitHub Pages
|
||||
# This is only used for serving benchmark results
|
||||
|
||||
# Only process benchmark-related files
|
||||
include:
|
||||
- index.html
|
||||
- benchmarks/
|
||||
|
||||
# Exclude everything else to prevent Liquid syntax errors
|
||||
exclude:
|
||||
- "*.md"
|
||||
- "*.json"
|
||||
- "*.ts"
|
||||
- "*.js"
|
||||
- "*.yml"
|
||||
- src/
|
||||
- tests/
|
||||
- docs/
|
||||
- scripts/
|
||||
- dist/
|
||||
- node_modules/
|
||||
- package.json
|
||||
- package-lock.json
|
||||
- tsconfig.json
|
||||
- README.md
|
||||
- CHANGELOG.md
|
||||
- LICENSE
|
||||
- Dockerfile*
|
||||
- docker-compose*
|
||||
- .github/
|
||||
- .vscode/
|
||||
- .claude/
|
||||
- deploy/
|
||||
- examples/
|
||||
- data/
|
||||
|
||||
# Disable Jekyll processing for files we don't want processed
|
||||
plugins: []
|
||||
|
||||
# Use simple theme
|
||||
theme: null
|
||||
53
codecov.yml
Normal file
53
codecov.yml
Normal file
@@ -0,0 +1,53 @@
|
||||
codecov:
|
||||
require_ci_to_pass: yes
|
||||
|
||||
coverage:
|
||||
precision: 2
|
||||
round: down
|
||||
range: "70...100"
|
||||
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 80%
|
||||
threshold: 1%
|
||||
base: auto
|
||||
if_not_found: success
|
||||
if_ci_failed: error
|
||||
informational: false
|
||||
only_pulls: false
|
||||
patch:
|
||||
default:
|
||||
target: 80%
|
||||
threshold: 1%
|
||||
base: auto
|
||||
if_not_found: success
|
||||
if_ci_failed: error
|
||||
informational: true
|
||||
only_pulls: false
|
||||
|
||||
parsers:
|
||||
gcov:
|
||||
branch_detection:
|
||||
conditional: yes
|
||||
loop: yes
|
||||
method: no
|
||||
macro: no
|
||||
|
||||
comment:
|
||||
layout: "reach,diff,flags,files,footer"
|
||||
behavior: default
|
||||
require_changes: false
|
||||
require_base: false
|
||||
require_head: true
|
||||
|
||||
ignore:
|
||||
- "node_modules/**/*"
|
||||
- "dist/**/*"
|
||||
- "tests/**/*"
|
||||
- "scripts/**/*"
|
||||
- "**/*.test.ts"
|
||||
- "**/*.spec.ts"
|
||||
- "src/mcp/index.ts"
|
||||
- "src/http-server.ts"
|
||||
- "src/http-server-single-session.ts"
|
||||
13
coverage.json
Normal file
13
coverage.json
Normal file
File diff suppressed because one or more lines are too long
BIN
data/nodes.db
BIN
data/nodes.db
Binary file not shown.
0
data/templates.db
Normal file
0
data/templates.db
Normal file
232
deploy/quick-deploy-n8n.sh
Executable file
232
deploy/quick-deploy-n8n.sh
Executable file
@@ -0,0 +1,232 @@
|
||||
#!/bin/bash
|
||||
# Quick deployment script for n8n + n8n-mcp stack
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Default values
|
||||
COMPOSE_FILE="docker-compose.n8n.yml"
|
||||
ENV_FILE=".env"
|
||||
ENV_EXAMPLE=".env.n8n.example"
|
||||
|
||||
# Function to print colored output
|
||||
print_info() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Function to generate random token
|
||||
generate_token() {
|
||||
openssl rand -hex 32
|
||||
}
|
||||
|
||||
# Function to check prerequisites
|
||||
check_prerequisites() {
|
||||
print_info "Checking prerequisites..."
|
||||
|
||||
# Check Docker
|
||||
if ! command -v docker &> /dev/null; then
|
||||
print_error "Docker is not installed. Please install Docker first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check Docker Compose
|
||||
if ! command -v docker-compose &> /dev/null && ! docker compose version &> /dev/null; then
|
||||
print_error "Docker Compose is not installed. Please install Docker Compose first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check openssl for token generation
|
||||
if ! command -v openssl &> /dev/null; then
|
||||
print_error "OpenSSL is not installed. Please install OpenSSL first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_info "All prerequisites are installed."
|
||||
}
|
||||
|
||||
# Function to setup environment
|
||||
setup_environment() {
|
||||
print_info "Setting up environment..."
|
||||
|
||||
# Check if .env exists
|
||||
if [ -f "$ENV_FILE" ]; then
|
||||
print_warn ".env file already exists. Backing up to .env.backup"
|
||||
cp "$ENV_FILE" ".env.backup"
|
||||
fi
|
||||
|
||||
# Copy example env file
|
||||
if [ -f "$ENV_EXAMPLE" ]; then
|
||||
cp "$ENV_EXAMPLE" "$ENV_FILE"
|
||||
print_info "Created .env file from example"
|
||||
else
|
||||
print_error ".env.n8n.example file not found!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Generate encryption key
|
||||
ENCRYPTION_KEY=$(generate_token)
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
sed -i '' "s/N8N_ENCRYPTION_KEY=/N8N_ENCRYPTION_KEY=$ENCRYPTION_KEY/" "$ENV_FILE"
|
||||
else
|
||||
sed -i "s/N8N_ENCRYPTION_KEY=/N8N_ENCRYPTION_KEY=$ENCRYPTION_KEY/" "$ENV_FILE"
|
||||
fi
|
||||
print_info "Generated n8n encryption key"
|
||||
|
||||
# Generate MCP auth token
|
||||
MCP_TOKEN=$(generate_token)
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
sed -i '' "s/MCP_AUTH_TOKEN=/MCP_AUTH_TOKEN=$MCP_TOKEN/" "$ENV_FILE"
|
||||
else
|
||||
sed -i "s/MCP_AUTH_TOKEN=/MCP_AUTH_TOKEN=$MCP_TOKEN/" "$ENV_FILE"
|
||||
fi
|
||||
print_info "Generated MCP authentication token"
|
||||
|
||||
print_warn "Please update the following in .env file:"
|
||||
print_warn " - N8N_BASIC_AUTH_PASSWORD (current: changeme)"
|
||||
print_warn " - N8N_API_KEY (get from n8n UI after first start)"
|
||||
}
|
||||
|
||||
# Function to build images
|
||||
build_images() {
|
||||
print_info "Building n8n-mcp image..."
|
||||
|
||||
if docker compose version &> /dev/null; then
|
||||
docker compose -f "$COMPOSE_FILE" build
|
||||
else
|
||||
docker-compose -f "$COMPOSE_FILE" build
|
||||
fi
|
||||
|
||||
print_info "Image built successfully"
|
||||
}
|
||||
|
||||
# Function to start services
|
||||
start_services() {
|
||||
print_info "Starting services..."
|
||||
|
||||
if docker compose version &> /dev/null; then
|
||||
docker compose -f "$COMPOSE_FILE" up -d
|
||||
else
|
||||
docker-compose -f "$COMPOSE_FILE" up -d
|
||||
fi
|
||||
|
||||
print_info "Services started"
|
||||
}
|
||||
|
||||
# Function to show status
|
||||
show_status() {
|
||||
print_info "Checking service status..."
|
||||
|
||||
if docker compose version &> /dev/null; then
|
||||
docker compose -f "$COMPOSE_FILE" ps
|
||||
else
|
||||
docker-compose -f "$COMPOSE_FILE" ps
|
||||
fi
|
||||
|
||||
echo ""
|
||||
print_info "Services are starting up. This may take a minute..."
|
||||
print_info "n8n will be available at: http://localhost:5678"
|
||||
print_info "n8n-mcp will be available at: http://localhost:3000"
|
||||
echo ""
|
||||
print_warn "Next steps:"
|
||||
print_warn "1. Access n8n at http://localhost:5678"
|
||||
print_warn "2. Log in with admin/changeme (or your custom password)"
|
||||
print_warn "3. Go to Settings > n8n API > Create API Key"
|
||||
print_warn "4. Update N8N_API_KEY in .env file"
|
||||
print_warn "5. Restart n8n-mcp: docker-compose -f $COMPOSE_FILE restart n8n-mcp"
|
||||
}
|
||||
|
||||
# Function to stop services
|
||||
stop_services() {
|
||||
print_info "Stopping services..."
|
||||
|
||||
if docker compose version &> /dev/null; then
|
||||
docker compose -f "$COMPOSE_FILE" down
|
||||
else
|
||||
docker-compose -f "$COMPOSE_FILE" down
|
||||
fi
|
||||
|
||||
print_info "Services stopped"
|
||||
}
|
||||
|
||||
# Function to view logs
|
||||
view_logs() {
|
||||
SERVICE=$1
|
||||
|
||||
if [ -z "$SERVICE" ]; then
|
||||
if docker compose version &> /dev/null; then
|
||||
docker compose -f "$COMPOSE_FILE" logs -f
|
||||
else
|
||||
docker-compose -f "$COMPOSE_FILE" logs -f
|
||||
fi
|
||||
else
|
||||
if docker compose version &> /dev/null; then
|
||||
docker compose -f "$COMPOSE_FILE" logs -f "$SERVICE"
|
||||
else
|
||||
docker-compose -f "$COMPOSE_FILE" logs -f "$SERVICE"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Main script
|
||||
case "${1:-help}" in
|
||||
setup)
|
||||
check_prerequisites
|
||||
setup_environment
|
||||
build_images
|
||||
start_services
|
||||
show_status
|
||||
;;
|
||||
start)
|
||||
start_services
|
||||
show_status
|
||||
;;
|
||||
stop)
|
||||
stop_services
|
||||
;;
|
||||
restart)
|
||||
stop_services
|
||||
start_services
|
||||
show_status
|
||||
;;
|
||||
status)
|
||||
show_status
|
||||
;;
|
||||
logs)
|
||||
view_logs "${2}"
|
||||
;;
|
||||
build)
|
||||
build_images
|
||||
;;
|
||||
*)
|
||||
echo "n8n-mcp Quick Deploy Script"
|
||||
echo ""
|
||||
echo "Usage: $0 {setup|start|stop|restart|status|logs|build}"
|
||||
echo ""
|
||||
echo "Commands:"
|
||||
echo " setup - Initial setup: create .env, build images, and start services"
|
||||
echo " start - Start all services"
|
||||
echo " stop - Stop all services"
|
||||
echo " restart - Restart all services"
|
||||
echo " status - Show service status"
|
||||
echo " logs - View logs (optionally specify service: logs n8n-mcp)"
|
||||
echo " build - Build/rebuild images"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 setup # First time setup"
|
||||
echo " $0 logs n8n-mcp # View n8n-mcp logs"
|
||||
echo " $0 restart # Restart all services"
|
||||
;;
|
||||
esac
|
||||
15
dist/config/n8n-api.d.ts
vendored
Normal file
15
dist/config/n8n-api.d.ts
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
export declare function getN8nApiConfig(): {
|
||||
baseUrl: string;
|
||||
apiKey: string;
|
||||
timeout: number;
|
||||
maxRetries: number;
|
||||
} | null;
|
||||
export declare function isN8nApiConfigured(): boolean;
|
||||
export declare function getN8nApiConfigFromContext(context: {
|
||||
n8nApiUrl?: string;
|
||||
n8nApiKey?: string;
|
||||
n8nApiTimeout?: number;
|
||||
n8nApiMaxRetries?: number;
|
||||
}): N8nApiConfig | null;
|
||||
export type N8nApiConfig = NonNullable<ReturnType<typeof getN8nApiConfig>>;
|
||||
//# sourceMappingURL=n8n-api.d.ts.map
|
||||
1
dist/config/n8n-api.d.ts.map
vendored
Normal file
1
dist/config/n8n-api.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"n8n-api.d.ts","sourceRoot":"","sources":["../../src/config/n8n-api.ts"],"names":[],"mappings":"AAgBA,wBAAgB,eAAe;;;;;SA0B9B;AAGD,wBAAgB,kBAAkB,IAAI,OAAO,CAG5C;AAMD,wBAAgB,0BAA0B,CAAC,OAAO,EAAE;IAClD,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,gBAAgB,CAAC,EAAE,MAAM,CAAC;CAC3B,GAAG,YAAY,GAAG,IAAI,CAWtB;AAGD,MAAM,MAAM,YAAY,GAAG,WAAW,CAAC,UAAU,CAAC,OAAO,eAAe,CAAC,CAAC,CAAC"}
|
||||
53
dist/config/n8n-api.js
vendored
Normal file
53
dist/config/n8n-api.js
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
"use strict";
|
||||
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getN8nApiConfig = getN8nApiConfig;
|
||||
exports.isN8nApiConfigured = isN8nApiConfigured;
|
||||
exports.getN8nApiConfigFromContext = getN8nApiConfigFromContext;
|
||||
const zod_1 = require("zod");
|
||||
const dotenv_1 = __importDefault(require("dotenv"));
|
||||
const n8nApiConfigSchema = zod_1.z.object({
|
||||
N8N_API_URL: zod_1.z.string().url().optional(),
|
||||
N8N_API_KEY: zod_1.z.string().min(1).optional(),
|
||||
N8N_API_TIMEOUT: zod_1.z.coerce.number().positive().default(30000),
|
||||
N8N_API_MAX_RETRIES: zod_1.z.coerce.number().positive().default(3),
|
||||
});
|
||||
let envLoaded = false;
|
||||
function getN8nApiConfig() {
|
||||
if (!envLoaded) {
|
||||
dotenv_1.default.config();
|
||||
envLoaded = true;
|
||||
}
|
||||
const result = n8nApiConfigSchema.safeParse(process.env);
|
||||
if (!result.success) {
|
||||
return null;
|
||||
}
|
||||
const config = result.data;
|
||||
if (!config.N8N_API_URL || !config.N8N_API_KEY) {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
baseUrl: config.N8N_API_URL,
|
||||
apiKey: config.N8N_API_KEY,
|
||||
timeout: config.N8N_API_TIMEOUT,
|
||||
maxRetries: config.N8N_API_MAX_RETRIES,
|
||||
};
|
||||
}
|
||||
function isN8nApiConfigured() {
|
||||
const config = getN8nApiConfig();
|
||||
return config !== null;
|
||||
}
|
||||
function getN8nApiConfigFromContext(context) {
|
||||
if (!context.n8nApiUrl || !context.n8nApiKey) {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
baseUrl: context.n8nApiUrl,
|
||||
apiKey: context.n8nApiKey,
|
||||
timeout: context.n8nApiTimeout ?? 30000,
|
||||
maxRetries: context.n8nApiMaxRetries ?? 3,
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=n8n-api.js.map
|
||||
1
dist/config/n8n-api.js.map
vendored
Normal file
1
dist/config/n8n-api.js.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"n8n-api.js","sourceRoot":"","sources":["../../src/config/n8n-api.ts"],"names":[],"mappings":";;;;;AAgBA,0CA0BC;AAGD,gDAGC;AAMD,gEAgBC;AAtED,6BAAwB;AACxB,oDAA4B;AAI5B,MAAM,kBAAkB,GAAG,OAAC,CAAC,MAAM,CAAC;IAClC,WAAW,EAAE,OAAC,CAAC,MAAM,EAAE,CAAC,GAAG,EAAE,CAAC,QAAQ,EAAE;IACxC,WAAW,EAAE,OAAC,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,QAAQ,EAAE;IACzC,eAAe,EAAE,OAAC,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,QAAQ,EAAE,CAAC,OAAO,CAAC,KAAK,CAAC;IAC5D,mBAAmB,EAAE,OAAC,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,QAAQ,EAAE,CAAC,OAAO,CAAC,CAAC,CAAC;CAC7D,CAAC,CAAC;AAGH,IAAI,SAAS,GAAG,KAAK,CAAC;AAGtB,SAAgB,eAAe;IAE7B,IAAI,CAAC,SAAS,EAAE,CAAC;QACf,gBAAM,CAAC,MAAM,EAAE,CAAC;QAChB,SAAS,GAAG,IAAI,CAAC;IACnB,CAAC;IAED,MAAM,MAAM,GAAG,kBAAkB,CAAC,SAAS,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC;IAEzD,IAAI,CAAC,MAAM,CAAC,OAAO,EAAE,CAAC;QACpB,OAAO,IAAI,CAAC;IACd,CAAC;IAED,MAAM,MAAM,GAAG,MAAM,CAAC,IAAI,CAAC;IAG3B,IAAI,CAAC,MAAM,CAAC,WAAW,IAAI,CAAC,MAAM,CAAC,WAAW,EAAE,CAAC;QAC/C,OAAO,IAAI,CAAC;IACd,CAAC;IAED,OAAO;QACL,OAAO,EAAE,MAAM,CAAC,WAAW;QAC3B,MAAM,EAAE,MAAM,CAAC,WAAW;QAC1B,OAAO,EAAE,MAAM,CAAC,eAAe;QAC/B,UAAU,EAAE,MAAM,CAAC,mBAAmB;KACvC,CAAC;AACJ,CAAC;AAGD,SAAgB,kBAAkB;IAChC,MAAM,MAAM,GAAG,eAAe,EAAE,CAAC;IACjC,OAAO,MAAM,KAAK,IAAI,CAAC;AACzB,CAAC;AAMD,SAAgB,0BAA0B,CAAC,OAK1C;IACC,IAAI,CAAC,OAAO,CAAC,SAAS,IAAI,CAAC,OAAO,CAAC,SAAS,EAAE,CAAC;QAC7C,OAAO,IAAI,CAAC;IACd,CAAC;IAED,OAAO;QACL,OAAO,EAAE,OAAO,CAAC,SAAS;QAC1B,MAAM,EAAE,OAAO,CAAC,SAAS;QACzB,OAAO,EAAE,OAAO,CAAC,aAAa,IAAI,KAAK;QACvC,UAAU,EAAE,OAAO,CAAC,gBAAgB,IAAI,CAAC;KAC1C,CAAC;AACJ,CAAC"}
|
||||
123
dist/constants/type-structures.d.ts
vendored
Normal file
123
dist/constants/type-structures.d.ts
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
import type { NodePropertyTypes } from 'n8n-workflow';
|
||||
import type { TypeStructure } from '../types/type-structures';
|
||||
export declare const TYPE_STRUCTURES: Record<NodePropertyTypes, TypeStructure>;
|
||||
export declare const COMPLEX_TYPE_EXAMPLES: {
|
||||
collection: {
|
||||
basic: {
|
||||
name: string;
|
||||
email: string;
|
||||
};
|
||||
nested: {
|
||||
user: {
|
||||
firstName: string;
|
||||
lastName: string;
|
||||
};
|
||||
preferences: {
|
||||
theme: string;
|
||||
notifications: boolean;
|
||||
};
|
||||
};
|
||||
withExpressions: {
|
||||
id: string;
|
||||
timestamp: string;
|
||||
data: string;
|
||||
};
|
||||
};
|
||||
fixedCollection: {
|
||||
httpHeaders: {
|
||||
headers: {
|
||||
name: string;
|
||||
value: string;
|
||||
}[];
|
||||
};
|
||||
queryParameters: {
|
||||
queryParameters: {
|
||||
name: string;
|
||||
value: string;
|
||||
}[];
|
||||
};
|
||||
multipleCollections: {
|
||||
headers: {
|
||||
name: string;
|
||||
value: string;
|
||||
}[];
|
||||
queryParameters: {
|
||||
name: string;
|
||||
value: string;
|
||||
}[];
|
||||
};
|
||||
};
|
||||
filter: {
|
||||
simple: {
|
||||
conditions: {
|
||||
id: string;
|
||||
leftValue: string;
|
||||
operator: {
|
||||
type: string;
|
||||
operation: string;
|
||||
};
|
||||
rightValue: string;
|
||||
}[];
|
||||
combinator: string;
|
||||
};
|
||||
complex: {
|
||||
conditions: ({
|
||||
id: string;
|
||||
leftValue: string;
|
||||
operator: {
|
||||
type: string;
|
||||
operation: string;
|
||||
};
|
||||
rightValue: number;
|
||||
} | {
|
||||
id: string;
|
||||
leftValue: string;
|
||||
operator: {
|
||||
type: string;
|
||||
operation: string;
|
||||
};
|
||||
rightValue: string;
|
||||
})[];
|
||||
combinator: string;
|
||||
};
|
||||
};
|
||||
resourceMapper: {
|
||||
autoMap: {
|
||||
mappingMode: string;
|
||||
value: {};
|
||||
};
|
||||
manual: {
|
||||
mappingMode: string;
|
||||
value: {
|
||||
firstName: string;
|
||||
lastName: string;
|
||||
email: string;
|
||||
status: string;
|
||||
};
|
||||
};
|
||||
};
|
||||
assignmentCollection: {
|
||||
basic: {
|
||||
assignments: {
|
||||
id: string;
|
||||
name: string;
|
||||
value: string;
|
||||
type: string;
|
||||
}[];
|
||||
};
|
||||
multiple: {
|
||||
assignments: ({
|
||||
id: string;
|
||||
name: string;
|
||||
value: string;
|
||||
type: string;
|
||||
} | {
|
||||
id: string;
|
||||
name: string;
|
||||
value: boolean;
|
||||
type: string;
|
||||
})[];
|
||||
};
|
||||
};
|
||||
};
|
||||
//# sourceMappingURL=type-structures.d.ts.map
|
||||
1
dist/constants/type-structures.d.ts.map
vendored
Normal file
1
dist/constants/type-structures.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"type-structures.d.ts","sourceRoot":"","sources":["../../src/constants/type-structures.ts"],"names":[],"mappings":"AAaA,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,cAAc,CAAC;AACtD,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,0BAA0B,CAAC;AAe9D,eAAO,MAAM,eAAe,EAAE,MAAM,CAAC,iBAAiB,EAAE,aAAa,CAilBpE,CAAC;AAUF,eAAO,MAAM,qBAAqB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA4GjC,CAAC"}
|
||||
654
dist/constants/type-structures.js
vendored
Normal file
654
dist/constants/type-structures.js
vendored
Normal file
@@ -0,0 +1,654 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.COMPLEX_TYPE_EXAMPLES = exports.TYPE_STRUCTURES = void 0;
|
||||
exports.TYPE_STRUCTURES = {
|
||||
string: {
|
||||
type: 'primitive',
|
||||
jsType: 'string',
|
||||
description: 'A text value that can contain any characters',
|
||||
example: 'Hello World',
|
||||
examples: ['', 'A simple text', '{{ $json.name }}', 'https://example.com'],
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: ['Most common property type', 'Supports n8n expressions'],
|
||||
},
|
||||
number: {
|
||||
type: 'primitive',
|
||||
jsType: 'number',
|
||||
description: 'A numeric value (integer or decimal)',
|
||||
example: 42,
|
||||
examples: [0, -10, 3.14, 100],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: ['Can be constrained with min/max in typeOptions'],
|
||||
},
|
||||
boolean: {
|
||||
type: 'primitive',
|
||||
jsType: 'boolean',
|
||||
description: 'A true/false toggle value',
|
||||
example: true,
|
||||
examples: [true, false],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: ['Rendered as checkbox in n8n UI'],
|
||||
},
|
||||
dateTime: {
|
||||
type: 'primitive',
|
||||
jsType: 'string',
|
||||
description: 'A date and time value in ISO 8601 format',
|
||||
example: '2024-01-20T10:30:00Z',
|
||||
examples: [
|
||||
'2024-01-20T10:30:00Z',
|
||||
'2024-01-20',
|
||||
'{{ $now }}',
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
pattern: '^\\d{4}-\\d{2}-\\d{2}(T\\d{2}:\\d{2}:\\d{2}(\\.\\d{3})?Z?)?$',
|
||||
},
|
||||
notes: ['Accepts ISO 8601 format', 'Can use n8n date expressions'],
|
||||
},
|
||||
color: {
|
||||
type: 'primitive',
|
||||
jsType: 'string',
|
||||
description: 'A color value in hex format',
|
||||
example: '#FF5733',
|
||||
examples: ['#FF5733', '#000000', '#FFFFFF', '{{ $json.color }}'],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
pattern: '^#[0-9A-Fa-f]{6}$',
|
||||
},
|
||||
notes: ['Must be 6-digit hex color', 'Rendered with color picker in UI'],
|
||||
},
|
||||
json: {
|
||||
type: 'primitive',
|
||||
jsType: 'string',
|
||||
description: 'A JSON string that can be parsed into any structure',
|
||||
example: '{"key": "value", "nested": {"data": 123}}',
|
||||
examples: [
|
||||
'{}',
|
||||
'{"name": "John", "age": 30}',
|
||||
'[1, 2, 3]',
|
||||
'{{ $json }}',
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: ['Must be valid JSON when parsed', 'Often used for custom payloads'],
|
||||
},
|
||||
options: {
|
||||
type: 'primitive',
|
||||
jsType: 'string',
|
||||
description: 'Single selection from a list of predefined options',
|
||||
example: 'option1',
|
||||
examples: ['GET', 'POST', 'channelMessage', 'update'],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'Value must match one of the defined option values',
|
||||
'Rendered as dropdown in UI',
|
||||
'Options defined in property.options array',
|
||||
],
|
||||
},
|
||||
multiOptions: {
|
||||
type: 'array',
|
||||
jsType: 'array',
|
||||
description: 'Multiple selections from a list of predefined options',
|
||||
structure: {
|
||||
items: {
|
||||
type: 'string',
|
||||
description: 'Selected option value',
|
||||
},
|
||||
},
|
||||
example: ['option1', 'option2'],
|
||||
examples: [[], ['GET', 'POST'], ['read', 'write', 'delete']],
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'Array of option values',
|
||||
'Each value must exist in property.options',
|
||||
'Rendered as multi-select dropdown',
|
||||
],
|
||||
},
|
||||
collection: {
|
||||
type: 'collection',
|
||||
jsType: 'object',
|
||||
description: 'A group of related properties with dynamic values',
|
||||
structure: {
|
||||
properties: {
|
||||
'<propertyName>': {
|
||||
type: 'any',
|
||||
description: 'Any nested property from the collection definition',
|
||||
},
|
||||
},
|
||||
flexible: true,
|
||||
},
|
||||
example: {
|
||||
name: 'John Doe',
|
||||
email: 'john@example.com',
|
||||
age: 30,
|
||||
},
|
||||
examples: [
|
||||
{},
|
||||
{ key1: 'value1', key2: 123 },
|
||||
{ nested: { deep: { value: true } } },
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Properties defined in property.values array',
|
||||
'Each property can be any type',
|
||||
'UI renders as expandable section',
|
||||
],
|
||||
},
|
||||
fixedCollection: {
|
||||
type: 'collection',
|
||||
jsType: 'object',
|
||||
description: 'A collection with predefined groups of properties',
|
||||
structure: {
|
||||
properties: {
|
||||
'<collectionName>': {
|
||||
type: 'array',
|
||||
description: 'Array of collection items',
|
||||
items: {
|
||||
type: 'object',
|
||||
description: 'Collection item with defined properties',
|
||||
},
|
||||
},
|
||||
},
|
||||
required: [],
|
||||
},
|
||||
example: {
|
||||
headers: [
|
||||
{ name: 'Content-Type', value: 'application/json' },
|
||||
{ name: 'Authorization', value: 'Bearer token' },
|
||||
],
|
||||
},
|
||||
examples: [
|
||||
{},
|
||||
{ queryParameters: [{ name: 'id', value: '123' }] },
|
||||
{
|
||||
headers: [{ name: 'Accept', value: '*/*' }],
|
||||
queryParameters: [{ name: 'limit', value: '10' }],
|
||||
},
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Each collection has predefined structure',
|
||||
'Often used for headers, parameters, etc.',
|
||||
'Supports multiple values per collection',
|
||||
],
|
||||
},
|
||||
resourceLocator: {
|
||||
type: 'special',
|
||||
jsType: 'object',
|
||||
description: 'A flexible way to specify a resource by ID, name, URL, or list',
|
||||
structure: {
|
||||
properties: {
|
||||
mode: {
|
||||
type: 'string',
|
||||
description: 'How the resource is specified',
|
||||
enum: ['id', 'url', 'list'],
|
||||
required: true,
|
||||
},
|
||||
value: {
|
||||
type: 'string',
|
||||
description: 'The resource identifier',
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
required: ['mode', 'value'],
|
||||
},
|
||||
example: {
|
||||
mode: 'id',
|
||||
value: 'abc123',
|
||||
},
|
||||
examples: [
|
||||
{ mode: 'url', value: 'https://example.com/resource/123' },
|
||||
{ mode: 'list', value: 'item-from-dropdown' },
|
||||
{ mode: 'id', value: '{{ $json.resourceId }}' },
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Provides flexible resource selection',
|
||||
'Mode determines how value is interpreted',
|
||||
'UI adapts based on selected mode',
|
||||
],
|
||||
},
|
||||
resourceMapper: {
|
||||
type: 'special',
|
||||
jsType: 'object',
|
||||
description: 'Maps input data fields to resource fields with transformation options',
|
||||
structure: {
|
||||
properties: {
|
||||
mappingMode: {
|
||||
type: 'string',
|
||||
description: 'How fields are mapped',
|
||||
enum: ['defineBelow', 'autoMapInputData'],
|
||||
},
|
||||
value: {
|
||||
type: 'object',
|
||||
description: 'Field mappings',
|
||||
properties: {
|
||||
'<fieldName>': {
|
||||
type: 'string',
|
||||
description: 'Expression or value for this field',
|
||||
},
|
||||
},
|
||||
flexible: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
example: {
|
||||
mappingMode: 'defineBelow',
|
||||
value: {
|
||||
name: '{{ $json.fullName }}',
|
||||
email: '{{ $json.emailAddress }}',
|
||||
status: 'active',
|
||||
},
|
||||
},
|
||||
examples: [
|
||||
{ mappingMode: 'autoMapInputData', value: {} },
|
||||
{
|
||||
mappingMode: 'defineBelow',
|
||||
value: { id: '{{ $json.userId }}', name: '{{ $json.name }}' },
|
||||
},
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Complex mapping with UI assistance',
|
||||
'Can auto-map or manually define',
|
||||
'Supports field transformations',
|
||||
],
|
||||
},
|
||||
filter: {
|
||||
type: 'special',
|
||||
jsType: 'object',
|
||||
description: 'Defines conditions for filtering data with boolean logic',
|
||||
structure: {
|
||||
properties: {
|
||||
conditions: {
|
||||
type: 'array',
|
||||
description: 'Array of filter conditions',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
id: {
|
||||
type: 'string',
|
||||
description: 'Unique condition identifier',
|
||||
required: true,
|
||||
},
|
||||
leftValue: {
|
||||
type: 'any',
|
||||
description: 'Left side of comparison',
|
||||
},
|
||||
operator: {
|
||||
type: 'object',
|
||||
description: 'Comparison operator',
|
||||
required: true,
|
||||
properties: {
|
||||
type: {
|
||||
type: 'string',
|
||||
enum: ['string', 'number', 'boolean', 'dateTime', 'array', 'object'],
|
||||
required: true,
|
||||
},
|
||||
operation: {
|
||||
type: 'string',
|
||||
description: 'Operation to perform',
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
rightValue: {
|
||||
type: 'any',
|
||||
description: 'Right side of comparison',
|
||||
},
|
||||
},
|
||||
},
|
||||
required: true,
|
||||
},
|
||||
combinator: {
|
||||
type: 'string',
|
||||
description: 'How to combine conditions',
|
||||
enum: ['and', 'or'],
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
required: ['conditions', 'combinator'],
|
||||
},
|
||||
example: {
|
||||
conditions: [
|
||||
{
|
||||
id: 'abc-123',
|
||||
leftValue: '{{ $json.status }}',
|
||||
operator: { type: 'string', operation: 'equals' },
|
||||
rightValue: 'active',
|
||||
},
|
||||
],
|
||||
combinator: 'and',
|
||||
},
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Advanced filtering UI in n8n',
|
||||
'Supports complex boolean logic',
|
||||
'Operations vary by data type',
|
||||
],
|
||||
},
|
||||
assignmentCollection: {
|
||||
type: 'special',
|
||||
jsType: 'object',
|
||||
description: 'Defines variable assignments with expressions',
|
||||
structure: {
|
||||
properties: {
|
||||
assignments: {
|
||||
type: 'array',
|
||||
description: 'Array of variable assignments',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
id: {
|
||||
type: 'string',
|
||||
description: 'Unique assignment identifier',
|
||||
required: true,
|
||||
},
|
||||
name: {
|
||||
type: 'string',
|
||||
description: 'Variable name',
|
||||
required: true,
|
||||
},
|
||||
value: {
|
||||
type: 'any',
|
||||
description: 'Value to assign',
|
||||
required: true,
|
||||
},
|
||||
type: {
|
||||
type: 'string',
|
||||
description: 'Data type of the value',
|
||||
enum: ['string', 'number', 'boolean', 'array', 'object'],
|
||||
},
|
||||
},
|
||||
},
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
required: ['assignments'],
|
||||
},
|
||||
example: {
|
||||
assignments: [
|
||||
{
|
||||
id: 'abc-123',
|
||||
name: 'userName',
|
||||
value: '{{ $json.name }}',
|
||||
type: 'string',
|
||||
},
|
||||
{
|
||||
id: 'def-456',
|
||||
name: 'userAge',
|
||||
value: 30,
|
||||
type: 'number',
|
||||
},
|
||||
],
|
||||
},
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Used in Set node and similar',
|
||||
'Each assignment can use expressions',
|
||||
'Type helps with validation',
|
||||
],
|
||||
},
|
||||
credentials: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Reference to credential configuration',
|
||||
example: 'googleSheetsOAuth2Api',
|
||||
examples: ['httpBasicAuth', 'slackOAuth2Api', 'postgresApi'],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'References credential type name',
|
||||
'Credential must be configured in n8n',
|
||||
'Type name matches credential definition',
|
||||
],
|
||||
},
|
||||
credentialsSelect: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Dropdown to select from available credentials',
|
||||
example: 'credential-id-123',
|
||||
examples: ['cred-abc', 'cred-def', '{{ $credentials.id }}'],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'User selects from configured credentials',
|
||||
'Returns credential ID',
|
||||
'Used when multiple credential instances exist',
|
||||
],
|
||||
},
|
||||
hidden: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Hidden property not shown in UI (used for internal logic)',
|
||||
example: '',
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Not rendered in UI',
|
||||
'Can store metadata or computed values',
|
||||
'Often used for version tracking',
|
||||
],
|
||||
},
|
||||
button: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Clickable button that triggers an action',
|
||||
example: '',
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'Triggers action when clicked',
|
||||
'Does not store a value',
|
||||
'Action defined in routing property',
|
||||
],
|
||||
},
|
||||
callout: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Informational message box (warning, info, success, error)',
|
||||
example: '',
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'Display-only, no value stored',
|
||||
'Used for warnings and hints',
|
||||
'Style controlled by typeOptions',
|
||||
],
|
||||
},
|
||||
notice: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Notice message displayed to user',
|
||||
example: '',
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: ['Similar to callout', 'Display-only element', 'Provides contextual information'],
|
||||
},
|
||||
workflowSelector: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Dropdown to select another workflow',
|
||||
example: 'workflow-123',
|
||||
examples: ['wf-abc', '{{ $json.workflowId }}'],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Selects from available workflows',
|
||||
'Returns workflow ID',
|
||||
'Used in Execute Workflow node',
|
||||
],
|
||||
},
|
||||
curlImport: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Import configuration from cURL command',
|
||||
example: 'curl -X GET https://api.example.com/data',
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'Parses cURL command to populate fields',
|
||||
'Used in HTTP Request node',
|
||||
'One-time import feature',
|
||||
],
|
||||
},
|
||||
};
|
||||
exports.COMPLEX_TYPE_EXAMPLES = {
|
||||
collection: {
|
||||
basic: {
|
||||
name: 'John Doe',
|
||||
email: 'john@example.com',
|
||||
},
|
||||
nested: {
|
||||
user: {
|
||||
firstName: 'Jane',
|
||||
lastName: 'Smith',
|
||||
},
|
||||
preferences: {
|
||||
theme: 'dark',
|
||||
notifications: true,
|
||||
},
|
||||
},
|
||||
withExpressions: {
|
||||
id: '{{ $json.userId }}',
|
||||
timestamp: '{{ $now }}',
|
||||
data: '{{ $json.payload }}',
|
||||
},
|
||||
},
|
||||
fixedCollection: {
|
||||
httpHeaders: {
|
||||
headers: [
|
||||
{ name: 'Content-Type', value: 'application/json' },
|
||||
{ name: 'Authorization', value: 'Bearer {{ $credentials.token }}' },
|
||||
],
|
||||
},
|
||||
queryParameters: {
|
||||
queryParameters: [
|
||||
{ name: 'page', value: '1' },
|
||||
{ name: 'limit', value: '100' },
|
||||
],
|
||||
},
|
||||
multipleCollections: {
|
||||
headers: [{ name: 'Accept', value: 'application/json' }],
|
||||
queryParameters: [{ name: 'filter', value: 'active' }],
|
||||
},
|
||||
},
|
||||
filter: {
|
||||
simple: {
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
leftValue: '{{ $json.status }}',
|
||||
operator: { type: 'string', operation: 'equals' },
|
||||
rightValue: 'active',
|
||||
},
|
||||
],
|
||||
combinator: 'and',
|
||||
},
|
||||
complex: {
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
leftValue: '{{ $json.age }}',
|
||||
operator: { type: 'number', operation: 'gt' },
|
||||
rightValue: 18,
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
leftValue: '{{ $json.country }}',
|
||||
operator: { type: 'string', operation: 'equals' },
|
||||
rightValue: 'US',
|
||||
},
|
||||
],
|
||||
combinator: 'and',
|
||||
},
|
||||
},
|
||||
resourceMapper: {
|
||||
autoMap: {
|
||||
mappingMode: 'autoMapInputData',
|
||||
value: {},
|
||||
},
|
||||
manual: {
|
||||
mappingMode: 'defineBelow',
|
||||
value: {
|
||||
firstName: '{{ $json.first_name }}',
|
||||
lastName: '{{ $json.last_name }}',
|
||||
email: '{{ $json.email_address }}',
|
||||
status: 'active',
|
||||
},
|
||||
},
|
||||
},
|
||||
assignmentCollection: {
|
||||
basic: {
|
||||
assignments: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'fullName',
|
||||
value: '{{ $json.firstName }} {{ $json.lastName }}',
|
||||
type: 'string',
|
||||
},
|
||||
],
|
||||
},
|
||||
multiple: {
|
||||
assignments: [
|
||||
{ id: '1', name: 'userName', value: '{{ $json.name }}', type: 'string' },
|
||||
{ id: '2', name: 'userAge', value: '{{ $json.age }}', type: 'number' },
|
||||
{ id: '3', name: 'isActive', value: true, type: 'boolean' },
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
//# sourceMappingURL=type-structures.js.map
|
||||
1
dist/constants/type-structures.js.map
vendored
Normal file
1
dist/constants/type-structures.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
33
dist/database/database-adapter.d.ts
vendored
Normal file
33
dist/database/database-adapter.d.ts
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
export interface DatabaseAdapter {
|
||||
prepare(sql: string): PreparedStatement;
|
||||
exec(sql: string): void;
|
||||
close(): void;
|
||||
pragma(key: string, value?: any): any;
|
||||
readonly inTransaction: boolean;
|
||||
transaction<T>(fn: () => T): T;
|
||||
checkFTS5Support(): boolean;
|
||||
}
|
||||
export interface PreparedStatement {
|
||||
run(...params: any[]): RunResult;
|
||||
get(...params: any[]): any;
|
||||
all(...params: any[]): any[];
|
||||
iterate(...params: any[]): IterableIterator<any>;
|
||||
pluck(toggle?: boolean): this;
|
||||
expand(toggle?: boolean): this;
|
||||
raw(toggle?: boolean): this;
|
||||
columns(): ColumnDefinition[];
|
||||
bind(...params: any[]): this;
|
||||
}
|
||||
export interface RunResult {
|
||||
changes: number;
|
||||
lastInsertRowid: number | bigint;
|
||||
}
|
||||
export interface ColumnDefinition {
|
||||
name: string;
|
||||
column: string | null;
|
||||
table: string | null;
|
||||
database: string | null;
|
||||
type: string | null;
|
||||
}
|
||||
export declare function createDatabaseAdapter(dbPath: string): Promise<DatabaseAdapter>;
|
||||
//# sourceMappingURL=database-adapter.d.ts.map
|
||||
1
dist/database/database-adapter.d.ts.map
vendored
Normal file
1
dist/database/database-adapter.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"database-adapter.d.ts","sourceRoot":"","sources":["../../src/database/database-adapter.ts"],"names":[],"mappings":"AAQA,MAAM,WAAW,eAAe;IAC9B,OAAO,CAAC,GAAG,EAAE,MAAM,GAAG,iBAAiB,CAAC;IACxC,IAAI,CAAC,GAAG,EAAE,MAAM,GAAG,IAAI,CAAC;IACxB,KAAK,IAAI,IAAI,CAAC;IACd,MAAM,CAAC,GAAG,EAAE,MAAM,EAAE,KAAK,CAAC,EAAE,GAAG,GAAG,GAAG,CAAC;IACtC,QAAQ,CAAC,aAAa,EAAE,OAAO,CAAC;IAChC,WAAW,CAAC,CAAC,EAAE,EAAE,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC;IAC/B,gBAAgB,IAAI,OAAO,CAAC;CAC7B;AAED,MAAM,WAAW,iBAAiB;IAChC,GAAG,CAAC,GAAG,MAAM,EAAE,GAAG,EAAE,GAAG,SAAS,CAAC;IACjC,GAAG,CAAC,GAAG,MAAM,EAAE,GAAG,EAAE,GAAG,GAAG,CAAC;IAC3B,GAAG,CAAC,GAAG,MAAM,EAAE,GAAG,EAAE,GAAG,GAAG,EAAE,CAAC;IAC7B,OAAO,CAAC,GAAG,MAAM,EAAE,GAAG,EAAE,GAAG,gBAAgB,CAAC,GAAG,CAAC,CAAC;IACjD,KAAK,CAAC,MAAM,CAAC,EAAE,OAAO,GAAG,IAAI,CAAC;IAC9B,MAAM,CAAC,MAAM,CAAC,EAAE,OAAO,GAAG,IAAI,CAAC;IAC/B,GAAG,CAAC,MAAM,CAAC,EAAE,OAAO,GAAG,IAAI,CAAC;IAC5B,OAAO,IAAI,gBAAgB,EAAE,CAAC;IAC9B,IAAI,CAAC,GAAG,MAAM,EAAE,GAAG,EAAE,GAAG,IAAI,CAAC;CAC9B;AAED,MAAM,WAAW,SAAS;IACxB,OAAO,EAAE,MAAM,CAAC;IAChB,eAAe,EAAE,MAAM,GAAG,MAAM,CAAC;CAClC;AAED,MAAM,WAAW,gBAAgB;IAC/B,IAAI,EAAE,MAAM,CAAC;IACb,MAAM,EAAE,MAAM,GAAG,IAAI,CAAC;IACtB,KAAK,EAAE,MAAM,GAAG,IAAI,CAAC;IACrB,QAAQ,EAAE,MAAM,GAAG,IAAI,CAAC;IACxB,IAAI,EAAE,MAAM,GAAG,IAAI,CAAC;CACrB;AAMD,wBAAsB,qBAAqB,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,eAAe,CAAC,CAoDpF"}
|
||||
420
dist/database/database-adapter.js
vendored
Normal file
420
dist/database/database-adapter.js
vendored
Normal file
@@ -0,0 +1,420 @@
|
||||
"use strict";
|
||||
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
||||
if (k2 === undefined) k2 = k;
|
||||
var desc = Object.getOwnPropertyDescriptor(m, k);
|
||||
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
||||
desc = { enumerable: true, get: function() { return m[k]; } };
|
||||
}
|
||||
Object.defineProperty(o, k2, desc);
|
||||
}) : (function(o, m, k, k2) {
|
||||
if (k2 === undefined) k2 = k;
|
||||
o[k2] = m[k];
|
||||
}));
|
||||
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
||||
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
||||
}) : function(o, v) {
|
||||
o["default"] = v;
|
||||
});
|
||||
var __importStar = (this && this.__importStar) || (function () {
|
||||
var ownKeys = function(o) {
|
||||
ownKeys = Object.getOwnPropertyNames || function (o) {
|
||||
var ar = [];
|
||||
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
||||
return ar;
|
||||
};
|
||||
return ownKeys(o);
|
||||
};
|
||||
return function (mod) {
|
||||
if (mod && mod.__esModule) return mod;
|
||||
var result = {};
|
||||
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
||||
__setModuleDefault(result, mod);
|
||||
return result;
|
||||
};
|
||||
})();
|
||||
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.createDatabaseAdapter = createDatabaseAdapter;
|
||||
const fs_1 = require("fs");
|
||||
const fsSync = __importStar(require("fs"));
|
||||
const path_1 = __importDefault(require("path"));
|
||||
const logger_1 = require("../utils/logger");
|
||||
async function createDatabaseAdapter(dbPath) {
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.info(`Node.js version: ${process.version}`);
|
||||
}
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.info(`Platform: ${process.platform} ${process.arch}`);
|
||||
}
|
||||
try {
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.info('Attempting to use better-sqlite3...');
|
||||
}
|
||||
const adapter = await createBetterSQLiteAdapter(dbPath);
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.info('Successfully initialized better-sqlite3 adapter');
|
||||
}
|
||||
return adapter;
|
||||
}
|
||||
catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
if (errorMessage.includes('NODE_MODULE_VERSION') || errorMessage.includes('was compiled against a different Node.js version')) {
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.warn(`Node.js version mismatch detected. Better-sqlite3 was compiled for a different Node.js version.`);
|
||||
}
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.warn(`Current Node.js version: ${process.version}`);
|
||||
}
|
||||
}
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.warn('Failed to initialize better-sqlite3, falling back to sql.js', error);
|
||||
}
|
||||
try {
|
||||
const adapter = await createSQLJSAdapter(dbPath);
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.info('Successfully initialized sql.js adapter (pure JavaScript, no native dependencies)');
|
||||
}
|
||||
return adapter;
|
||||
}
|
||||
catch (sqlJsError) {
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.error('Failed to initialize sql.js adapter', sqlJsError);
|
||||
}
|
||||
throw new Error('Failed to initialize any database adapter');
|
||||
}
|
||||
}
|
||||
}
|
||||
async function createBetterSQLiteAdapter(dbPath) {
|
||||
try {
|
||||
const Database = require('better-sqlite3');
|
||||
const db = new Database(dbPath);
|
||||
return new BetterSQLiteAdapter(db);
|
||||
}
|
||||
catch (error) {
|
||||
throw new Error(`Failed to create better-sqlite3 adapter: ${error}`);
|
||||
}
|
||||
}
|
||||
async function createSQLJSAdapter(dbPath) {
|
||||
let initSqlJs;
|
||||
try {
|
||||
initSqlJs = require('sql.js');
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error('Failed to load sql.js module:', error);
|
||||
throw new Error('sql.js module not found. This might be an issue with npm package installation.');
|
||||
}
|
||||
const SQL = await initSqlJs({
|
||||
locateFile: (file) => {
|
||||
if (file.endsWith('.wasm')) {
|
||||
const possiblePaths = [
|
||||
path_1.default.join(__dirname, '../../node_modules/sql.js/dist/', file),
|
||||
path_1.default.join(__dirname, '../../../sql.js/dist/', file),
|
||||
path_1.default.join(process.cwd(), 'node_modules/sql.js/dist/', file),
|
||||
path_1.default.join(path_1.default.dirname(require.resolve('sql.js')), '../dist/', file)
|
||||
];
|
||||
for (const tryPath of possiblePaths) {
|
||||
if (fsSync.existsSync(tryPath)) {
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.debug(`Found WASM file at: ${tryPath}`);
|
||||
}
|
||||
return tryPath;
|
||||
}
|
||||
}
|
||||
try {
|
||||
const wasmPath = require.resolve('sql.js/dist/sql-wasm.wasm');
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.debug(`Found WASM file via require.resolve: ${wasmPath}`);
|
||||
}
|
||||
return wasmPath;
|
||||
}
|
||||
catch (e) {
|
||||
logger_1.logger.warn(`Could not find WASM file, using default path: ${file}`);
|
||||
return file;
|
||||
}
|
||||
}
|
||||
return file;
|
||||
}
|
||||
});
|
||||
let db;
|
||||
try {
|
||||
const data = await fs_1.promises.readFile(dbPath);
|
||||
db = new SQL.Database(new Uint8Array(data));
|
||||
logger_1.logger.info(`Loaded existing database from ${dbPath}`);
|
||||
}
|
||||
catch (error) {
|
||||
db = new SQL.Database();
|
||||
logger_1.logger.info(`Created new database at ${dbPath}`);
|
||||
}
|
||||
return new SQLJSAdapter(db, dbPath);
|
||||
}
|
||||
class BetterSQLiteAdapter {
|
||||
constructor(db) {
|
||||
this.db = db;
|
||||
}
|
||||
prepare(sql) {
|
||||
const stmt = this.db.prepare(sql);
|
||||
return new BetterSQLiteStatement(stmt);
|
||||
}
|
||||
exec(sql) {
|
||||
this.db.exec(sql);
|
||||
}
|
||||
close() {
|
||||
this.db.close();
|
||||
}
|
||||
pragma(key, value) {
|
||||
return this.db.pragma(key, value);
|
||||
}
|
||||
get inTransaction() {
|
||||
return this.db.inTransaction;
|
||||
}
|
||||
transaction(fn) {
|
||||
return this.db.transaction(fn)();
|
||||
}
|
||||
checkFTS5Support() {
|
||||
try {
|
||||
this.exec("CREATE VIRTUAL TABLE IF NOT EXISTS test_fts5 USING fts5(content);");
|
||||
this.exec("DROP TABLE IF EXISTS test_fts5;");
|
||||
return true;
|
||||
}
|
||||
catch (error) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
class SQLJSAdapter {
|
||||
constructor(db, dbPath) {
|
||||
this.db = db;
|
||||
this.dbPath = dbPath;
|
||||
this.saveTimer = null;
|
||||
this.closed = false;
|
||||
const envInterval = process.env.SQLJS_SAVE_INTERVAL_MS;
|
||||
this.saveIntervalMs = envInterval ? parseInt(envInterval, 10) : SQLJSAdapter.DEFAULT_SAVE_INTERVAL_MS;
|
||||
if (isNaN(this.saveIntervalMs) || this.saveIntervalMs < 100 || this.saveIntervalMs > 60000) {
|
||||
logger_1.logger.warn(`Invalid SQLJS_SAVE_INTERVAL_MS value: ${envInterval} (must be 100-60000ms), ` +
|
||||
`using default ${SQLJSAdapter.DEFAULT_SAVE_INTERVAL_MS}ms`);
|
||||
this.saveIntervalMs = SQLJSAdapter.DEFAULT_SAVE_INTERVAL_MS;
|
||||
}
|
||||
logger_1.logger.debug(`SQLJSAdapter initialized with save interval: ${this.saveIntervalMs}ms`);
|
||||
}
|
||||
prepare(sql) {
|
||||
const stmt = this.db.prepare(sql);
|
||||
return new SQLJSStatement(stmt, () => this.scheduleSave());
|
||||
}
|
||||
exec(sql) {
|
||||
this.db.exec(sql);
|
||||
this.scheduleSave();
|
||||
}
|
||||
close() {
|
||||
if (this.closed) {
|
||||
logger_1.logger.debug('SQLJSAdapter already closed, skipping');
|
||||
return;
|
||||
}
|
||||
this.saveToFile();
|
||||
if (this.saveTimer) {
|
||||
clearTimeout(this.saveTimer);
|
||||
this.saveTimer = null;
|
||||
}
|
||||
this.db.close();
|
||||
this.closed = true;
|
||||
}
|
||||
pragma(key, value) {
|
||||
if (key === 'journal_mode' && value === 'WAL') {
|
||||
return 'memory';
|
||||
}
|
||||
return null;
|
||||
}
|
||||
get inTransaction() {
|
||||
return false;
|
||||
}
|
||||
transaction(fn) {
|
||||
try {
|
||||
this.exec('BEGIN');
|
||||
const result = fn();
|
||||
this.exec('COMMIT');
|
||||
return result;
|
||||
}
|
||||
catch (error) {
|
||||
this.exec('ROLLBACK');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
checkFTS5Support() {
|
||||
try {
|
||||
this.exec("CREATE VIRTUAL TABLE IF NOT EXISTS test_fts5 USING fts5(content);");
|
||||
this.exec("DROP TABLE IF EXISTS test_fts5;");
|
||||
return true;
|
||||
}
|
||||
catch (error) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
scheduleSave() {
|
||||
if (this.saveTimer) {
|
||||
clearTimeout(this.saveTimer);
|
||||
}
|
||||
this.saveTimer = setTimeout(() => {
|
||||
this.saveToFile();
|
||||
}, this.saveIntervalMs);
|
||||
}
|
||||
saveToFile() {
|
||||
try {
|
||||
const data = this.db.export();
|
||||
fsSync.writeFileSync(this.dbPath, data);
|
||||
logger_1.logger.debug(`Database saved to ${this.dbPath}`);
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error('Failed to save database', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
SQLJSAdapter.DEFAULT_SAVE_INTERVAL_MS = 5000;
|
||||
class BetterSQLiteStatement {
|
||||
constructor(stmt) {
|
||||
this.stmt = stmt;
|
||||
}
|
||||
run(...params) {
|
||||
return this.stmt.run(...params);
|
||||
}
|
||||
get(...params) {
|
||||
return this.stmt.get(...params);
|
||||
}
|
||||
all(...params) {
|
||||
return this.stmt.all(...params);
|
||||
}
|
||||
iterate(...params) {
|
||||
return this.stmt.iterate(...params);
|
||||
}
|
||||
pluck(toggle) {
|
||||
this.stmt.pluck(toggle);
|
||||
return this;
|
||||
}
|
||||
expand(toggle) {
|
||||
this.stmt.expand(toggle);
|
||||
return this;
|
||||
}
|
||||
raw(toggle) {
|
||||
this.stmt.raw(toggle);
|
||||
return this;
|
||||
}
|
||||
columns() {
|
||||
return this.stmt.columns();
|
||||
}
|
||||
bind(...params) {
|
||||
this.stmt.bind(...params);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
class SQLJSStatement {
|
||||
constructor(stmt, onModify) {
|
||||
this.stmt = stmt;
|
||||
this.onModify = onModify;
|
||||
this.boundParams = null;
|
||||
}
|
||||
run(...params) {
|
||||
try {
|
||||
if (params.length > 0) {
|
||||
this.bindParams(params);
|
||||
if (this.boundParams) {
|
||||
this.stmt.bind(this.boundParams);
|
||||
}
|
||||
}
|
||||
this.stmt.run();
|
||||
this.onModify();
|
||||
return {
|
||||
changes: 1,
|
||||
lastInsertRowid: 0
|
||||
};
|
||||
}
|
||||
catch (error) {
|
||||
this.stmt.reset();
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
get(...params) {
|
||||
try {
|
||||
if (params.length > 0) {
|
||||
this.bindParams(params);
|
||||
if (this.boundParams) {
|
||||
this.stmt.bind(this.boundParams);
|
||||
}
|
||||
}
|
||||
if (this.stmt.step()) {
|
||||
const result = this.stmt.getAsObject();
|
||||
this.stmt.reset();
|
||||
return this.convertIntegerColumns(result);
|
||||
}
|
||||
this.stmt.reset();
|
||||
return undefined;
|
||||
}
|
||||
catch (error) {
|
||||
this.stmt.reset();
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
all(...params) {
|
||||
try {
|
||||
if (params.length > 0) {
|
||||
this.bindParams(params);
|
||||
if (this.boundParams) {
|
||||
this.stmt.bind(this.boundParams);
|
||||
}
|
||||
}
|
||||
const results = [];
|
||||
while (this.stmt.step()) {
|
||||
results.push(this.convertIntegerColumns(this.stmt.getAsObject()));
|
||||
}
|
||||
this.stmt.reset();
|
||||
return results;
|
||||
}
|
||||
catch (error) {
|
||||
this.stmt.reset();
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
iterate(...params) {
|
||||
return this.all(...params)[Symbol.iterator]();
|
||||
}
|
||||
pluck(toggle) {
|
||||
return this;
|
||||
}
|
||||
expand(toggle) {
|
||||
return this;
|
||||
}
|
||||
raw(toggle) {
|
||||
return this;
|
||||
}
|
||||
columns() {
|
||||
return [];
|
||||
}
|
||||
bind(...params) {
|
||||
this.bindParams(params);
|
||||
return this;
|
||||
}
|
||||
bindParams(params) {
|
||||
if (params.length === 0) {
|
||||
this.boundParams = null;
|
||||
return;
|
||||
}
|
||||
if (params.length === 1 && typeof params[0] === 'object' && !Array.isArray(params[0]) && params[0] !== null) {
|
||||
this.boundParams = params[0];
|
||||
}
|
||||
else {
|
||||
this.boundParams = params.map(p => p === undefined ? null : p);
|
||||
}
|
||||
}
|
||||
convertIntegerColumns(row) {
|
||||
if (!row)
|
||||
return row;
|
||||
const integerColumns = ['is_ai_tool', 'is_trigger', 'is_webhook', 'is_versioned'];
|
||||
const converted = { ...row };
|
||||
for (const col of integerColumns) {
|
||||
if (col in converted && typeof converted[col] === 'string') {
|
||||
converted[col] = parseInt(converted[col], 10);
|
||||
}
|
||||
}
|
||||
return converted;
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=database-adapter.js.map
|
||||
1
dist/database/database-adapter.js.map
vendored
Normal file
1
dist/database/database-adapter.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
95
dist/database/node-repository.d.ts
vendored
Normal file
95
dist/database/node-repository.d.ts
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
import { DatabaseAdapter } from './database-adapter';
|
||||
import { ParsedNode } from '../parsers/node-parser';
|
||||
import { SQLiteStorageService } from '../services/sqlite-storage-service';
|
||||
export declare class NodeRepository {
|
||||
private db;
|
||||
constructor(dbOrService: DatabaseAdapter | SQLiteStorageService);
|
||||
saveNode(node: ParsedNode): void;
|
||||
getNode(nodeType: string): any;
|
||||
getAITools(): any[];
|
||||
private safeJsonParse;
|
||||
upsertNode(node: ParsedNode): void;
|
||||
getNodeByType(nodeType: string): any;
|
||||
getNodesByCategory(category: string): any[];
|
||||
searchNodes(query: string, mode?: 'OR' | 'AND' | 'FUZZY', limit?: number): any[];
|
||||
getAllNodes(limit?: number): any[];
|
||||
getNodeCount(): number;
|
||||
getAIToolNodes(): any[];
|
||||
getToolVariant(baseNodeType: string): any | null;
|
||||
getBaseNodeForToolVariant(toolNodeType: string): any | null;
|
||||
getToolVariants(): any[];
|
||||
getToolVariantCount(): number;
|
||||
getNodesByPackage(packageName: string): any[];
|
||||
searchNodeProperties(nodeType: string, query: string, maxResults?: number): any[];
|
||||
private parseNodeRow;
|
||||
getNodeOperations(nodeType: string, resource?: string): any[];
|
||||
getNodeResources(nodeType: string): any[];
|
||||
getOperationsForResource(nodeType: string, resource: string): any[];
|
||||
getAllOperations(): Map<string, any[]>;
|
||||
getAllResources(): Map<string, any[]>;
|
||||
getNodePropertyDefaults(nodeType: string): Record<string, any>;
|
||||
getDefaultOperationForResource(nodeType: string, resource?: string): string | undefined;
|
||||
saveNodeVersion(versionData: {
|
||||
nodeType: string;
|
||||
version: string;
|
||||
packageName: string;
|
||||
displayName: string;
|
||||
description?: string;
|
||||
category?: string;
|
||||
isCurrentMax?: boolean;
|
||||
propertiesSchema?: any;
|
||||
operations?: any;
|
||||
credentialsRequired?: any;
|
||||
outputs?: any;
|
||||
minimumN8nVersion?: string;
|
||||
breakingChanges?: any[];
|
||||
deprecatedProperties?: string[];
|
||||
addedProperties?: string[];
|
||||
releasedAt?: Date;
|
||||
}): void;
|
||||
getNodeVersions(nodeType: string): any[];
|
||||
getLatestNodeVersion(nodeType: string): any | null;
|
||||
getNodeVersion(nodeType: string, version: string): any | null;
|
||||
savePropertyChange(changeData: {
|
||||
nodeType: string;
|
||||
fromVersion: string;
|
||||
toVersion: string;
|
||||
propertyName: string;
|
||||
changeType: 'added' | 'removed' | 'renamed' | 'type_changed' | 'requirement_changed' | 'default_changed';
|
||||
isBreaking?: boolean;
|
||||
oldValue?: string;
|
||||
newValue?: string;
|
||||
migrationHint?: string;
|
||||
autoMigratable?: boolean;
|
||||
migrationStrategy?: any;
|
||||
severity?: 'LOW' | 'MEDIUM' | 'HIGH';
|
||||
}): void;
|
||||
getPropertyChanges(nodeType: string, fromVersion: string, toVersion: string): any[];
|
||||
getBreakingChanges(nodeType: string, fromVersion: string, toVersion?: string): any[];
|
||||
getAutoMigratableChanges(nodeType: string, fromVersion: string, toVersion: string): any[];
|
||||
hasVersionUpgradePath(nodeType: string, fromVersion: string, toVersion: string): boolean;
|
||||
getVersionedNodesCount(): number;
|
||||
private parseNodeVersionRow;
|
||||
private parsePropertyChangeRow;
|
||||
createWorkflowVersion(data: {
|
||||
workflowId: string;
|
||||
versionNumber: number;
|
||||
workflowName: string;
|
||||
workflowSnapshot: any;
|
||||
trigger: 'partial_update' | 'full_update' | 'autofix';
|
||||
operations?: any[];
|
||||
fixTypes?: string[];
|
||||
metadata?: any;
|
||||
}): number;
|
||||
getWorkflowVersions(workflowId: string, limit?: number): any[];
|
||||
getWorkflowVersion(versionId: number): any | null;
|
||||
getLatestWorkflowVersion(workflowId: string): any | null;
|
||||
deleteWorkflowVersion(versionId: number): void;
|
||||
deleteWorkflowVersionsByWorkflowId(workflowId: string): number;
|
||||
pruneWorkflowVersions(workflowId: string, keepCount: number): number;
|
||||
truncateWorkflowVersions(): number;
|
||||
getWorkflowVersionCount(workflowId: string): number;
|
||||
getVersionStorageStats(): any;
|
||||
private parseWorkflowVersionRow;
|
||||
}
|
||||
//# sourceMappingURL=node-repository.d.ts.map
|
||||
1
dist/database/node-repository.d.ts.map
vendored
Normal file
1
dist/database/node-repository.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"node-repository.d.ts","sourceRoot":"","sources":["../../src/database/node-repository.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,eAAe,EAAE,MAAM,oBAAoB,CAAC;AACrD,OAAO,EAAE,UAAU,EAAE,MAAM,wBAAwB,CAAC;AACpD,OAAO,EAAE,oBAAoB,EAAE,MAAM,oCAAoC,CAAC;AAG1E,qBAAa,cAAc;IACzB,OAAO,CAAC,EAAE,CAAkB;gBAEhB,WAAW,EAAE,eAAe,GAAG,oBAAoB;IAY/D,QAAQ,CAAC,IAAI,EAAE,UAAU,GAAG,IAAI;IAwChC,OAAO,CAAC,QAAQ,EAAE,MAAM,GAAG,GAAG;IA2B9B,UAAU,IAAI,GAAG,EAAE;IAgBnB,OAAO,CAAC,aAAa;IASrB,UAAU,CAAC,IAAI,EAAE,UAAU,GAAG,IAAI;IAIlC,aAAa,CAAC,QAAQ,EAAE,MAAM,GAAG,GAAG;IAIpC,kBAAkB,CAAC,QAAQ,EAAE,MAAM,GAAG,GAAG,EAAE;IAqB3C,WAAW,CAAC,KAAK,EAAE,MAAM,EAAE,IAAI,GAAE,IAAI,GAAG,KAAK,GAAG,OAAc,EAAE,KAAK,GAAE,MAAW,GAAG,GAAG,EAAE;IAwC1F,WAAW,CAAC,KAAK,CAAC,EAAE,MAAM,GAAG,GAAG,EAAE;IAUlC,YAAY,IAAI,MAAM;IAKtB,cAAc,IAAI,GAAG,EAAE;IAOvB,cAAc,CAAC,YAAY,EAAE,MAAM,GAAG,GAAG,GAAG,IAAI;IAYhD,yBAAyB,CAAC,YAAY,EAAE,MAAM,GAAG,GAAG,GAAG,IAAI;IAY3D,eAAe,IAAI,GAAG,EAAE;IAoBxB,mBAAmB,IAAI,MAAM;IAK7B,iBAAiB,CAAC,WAAW,EAAE,MAAM,GAAG,GAAG,EAAE;IAS7C,oBAAoB,CAAC,QAAQ,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,EAAE,UAAU,GAAE,MAAW,GAAG,GAAG,EAAE;IAmCrF,OAAO,CAAC,YAAY;IA4BpB,iBAAiB,CAAC,QAAQ,EAAE,MAAM,EAAE,QAAQ,CAAC,EAAE,MAAM,GAAG,GAAG,EAAE;IAmD7D,gBAAgB,CAAC,QAAQ,EAAE,MAAM,GAAG,GAAG,EAAE;IAmBzC,wBAAwB,CAAC,QAAQ,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,GAAG,GAAG,EAAE;IAyBnE,gBAAgB,IAAI,GAAG,CAAC,MAAM,EAAE,GAAG,EAAE,CAAC;IAiBtC,eAAe,IAAI,GAAG,CAAC,MAAM,EAAE,GAAG,EAAE,CAAC;IAiBrC,uBAAuB,CAAC,QAAQ,EAAE,MAAM,GAAG,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC;IAwB9D,8BAA8B,CAAC,QAAQ,EAAE,MAAM,EAAE,QAAQ,CAAC,EAAE,MAAM,GAAG,MAAM,GAAG,SAAS;IAuDvF,eAAe,CAAC,WAAW,EAAE;QAC3B,QAAQ,EAAE,MAAM,CAAC;QACjB,OAAO,EAAE,MAAM,CAAC;QAChB,WAAW,EAAE,MAAM,CAAC;QACpB,WAAW,EAAE,MAAM,CAAC;QACpB,WAAW,CAAC,EAAE,MAAM,CAAC;QACrB,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB,YAAY,CAAC,EAAE,OAAO,CAAC;QACvB,gBAAgB,CAAC,EAAE,GAAG,CAAC;QACvB,UAAU,CAAC,EAAE,GAAG,CAAC;QACjB,mBAAmB,CAAC,EAAE,GAAG,CAAC;QAC1B,OAAO,CAAC,EAAE,GAAG,CAAC;QACd,iBAAiB,CAAC,EAAE,MAAM,CAAC;QAC3B,eAAe,CAAC,EAAE,GAAG,EAAE,CAAC;QACxB,oBAAoB,CAAC,EAAE,MAAM,EAAE,CAAC;QAChC,eAAe,CAAC,EAAE,MAAM,EAAE,CAAC;QAC3B,UAAU,CAAC,EAAE,IAAI,CAAC;KACnB,GAAG,IAAI;IAkCR,eAAe,CAAC,QAAQ,EAAE,MAAM,GAAG,GAAG,EAAE;IAexC,oBAAoB,CAAC,QAAQ,EAAE,MAAM,GAAG,GAAG,GAAG,IAAI;IAgBlD,cAAc,CAAC,QAAQ,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,GAAG,GAAG,GAAG,IAAI;IAe7D,kBAAkB,CAAC,UAAU,EAAE;QAC7B,QAAQ,EAAE,MAAM,CAAC;QACjB,WAAW,EAAE,MAAM,CAAC;QACpB,SAAS,EAAE,MAAM,CAAC;QAClB,YAAY,EAAE,MAAM,CAAC;QACrB,UAAU,EAAE,OAAO,GAAG,SAAS,GAAG,SAAS,GAAG,cAAc,GAAG,qBAAqB,GAAG,iBAAiB,CAAC;QACzG,UAAU,CAAC,EAAE,OAAO,CAAC;QACrB,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB,aAAa,CAAC,EAAE,MAAM,CAAC;QACvB,cAAc,CAAC,EAAE,OAAO,CAAC;QACzB,iBAAiB,CAAC,EAAE,GAAG,CAAC;QACxB,QAAQ,CAAC,EAAE,KAAK,GAAG,QAAQ,GAAG,MAAM,CAAC;KACtC,GAAG,IAAI;IA4BR,kBAAkB,CAAC,QAAQ,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,GAAG,EAAE;IAgBnF,kBAAkB,CAAC,QAAQ,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,EAAE,SAAS,CAAC,EAAE,MAAM,GAAG,GAAG,EAAE;IA4BpF,wBAAwB,CAAC,QAAQ,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,GAAG,EAAE;IAkBzF,qBAAqB,CAAC,QAAQ,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,OAAO;IAcxF,sBAAsB,IAAI,MAAM;IAWhC,OAAO,CAAC,mBAAmB;IA0B3B,OAAO,CAAC,sBAAsB;IA0B9B,qBAAqB,CAAC,IAAI,EAAE;QAC1B,UAAU,EAAE,MAAM,CAAC;QACnB,aAAa,EAAE,MAAM,CAAC;QACtB,YAAY,EAAE,MAAM,CAAC;QACrB,gBAAgB,EAAE,GAAG,CAAC;QACtB,OAAO,EAAE,gBAAgB,GAAG,aAAa,GAAG,SAAS,CAAC;QACtD,UAAU,CAAC,EAAE,GAAG,EAAE,CAAC;QACnB,QAAQ,CAAC,EAAE,MAAM,EAAE,CAAC;QACpB,QAAQ,CAAC,EAAE,GAAG,CAAC;KAChB,GAAG,MAAM;IAyBV,mBAAmB,CAAC,UAAU,EAAE,MAAM,EAAE,KAAK,CAAC,EAAE,MAAM,GAAG,GAAG,EAAE;IAoB9D,kBAAkB,CAAC,SAAS,EAAE,MAAM,GAAG,GAAG,GAAG,IAAI;IAYjD,wBAAwB,CAAC,UAAU,EAAE,MAAM,GAAG,GAAG,GAAG,IAAI;IAexD,qBAAqB,CAAC,SAAS,EAAE,MAAM,GAAG,IAAI;IAS9C,kCAAkC,CAAC,UAAU,EAAE,MAAM,GAAG,MAAM;IAY9D,qBAAqB,CAAC,UAAU,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,MAAM;IAiCpE,wBAAwB,IAAI,MAAM;IAWlC,uBAAuB,CAAC,UAAU,EAAE,MAAM,GAAG,MAAM;IAWnD,sBAAsB,IAAI,GAAG;IAwC7B,OAAO,CAAC,uBAAuB;CAchC"}
|
||||
641
dist/database/node-repository.js
vendored
Normal file
641
dist/database/node-repository.js
vendored
Normal file
@@ -0,0 +1,641 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.NodeRepository = void 0;
|
||||
const sqlite_storage_service_1 = require("../services/sqlite-storage-service");
|
||||
const node_type_normalizer_1 = require("../utils/node-type-normalizer");
|
||||
class NodeRepository {
|
||||
constructor(dbOrService) {
|
||||
if (dbOrService instanceof sqlite_storage_service_1.SQLiteStorageService) {
|
||||
this.db = dbOrService.db;
|
||||
return;
|
||||
}
|
||||
this.db = dbOrService;
|
||||
}
|
||||
saveNode(node) {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT OR REPLACE INTO nodes (
|
||||
node_type, package_name, display_name, description,
|
||||
category, development_style, is_ai_tool, is_trigger,
|
||||
is_webhook, is_versioned, is_tool_variant, tool_variant_of,
|
||||
has_tool_variant, version, documentation,
|
||||
properties_schema, operations, credentials_required,
|
||||
outputs, output_names
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`);
|
||||
stmt.run(node.nodeType, node.packageName, node.displayName, node.description, node.category, node.style, node.isAITool ? 1 : 0, node.isTrigger ? 1 : 0, node.isWebhook ? 1 : 0, node.isVersioned ? 1 : 0, node.isToolVariant ? 1 : 0, node.toolVariantOf || null, node.hasToolVariant ? 1 : 0, node.version, node.documentation || null, JSON.stringify(node.properties, null, 2), JSON.stringify(node.operations, null, 2), JSON.stringify(node.credentials, null, 2), node.outputs ? JSON.stringify(node.outputs, null, 2) : null, node.outputNames ? JSON.stringify(node.outputNames, null, 2) : null);
|
||||
}
|
||||
getNode(nodeType) {
|
||||
const normalizedType = node_type_normalizer_1.NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM nodes WHERE node_type = ?
|
||||
`).get(normalizedType);
|
||||
if (!row && normalizedType !== nodeType) {
|
||||
const originalRow = this.db.prepare(`
|
||||
SELECT * FROM nodes WHERE node_type = ?
|
||||
`).get(nodeType);
|
||||
if (originalRow) {
|
||||
return this.parseNodeRow(originalRow);
|
||||
}
|
||||
}
|
||||
if (!row)
|
||||
return null;
|
||||
return this.parseNodeRow(row);
|
||||
}
|
||||
getAITools() {
|
||||
const rows = this.db.prepare(`
|
||||
SELECT node_type, display_name, description, package_name
|
||||
FROM nodes
|
||||
WHERE is_ai_tool = 1
|
||||
ORDER BY display_name
|
||||
`).all();
|
||||
return rows.map(row => ({
|
||||
nodeType: row.node_type,
|
||||
displayName: row.display_name,
|
||||
description: row.description,
|
||||
package: row.package_name
|
||||
}));
|
||||
}
|
||||
safeJsonParse(json, defaultValue) {
|
||||
try {
|
||||
return JSON.parse(json);
|
||||
}
|
||||
catch {
|
||||
return defaultValue;
|
||||
}
|
||||
}
|
||||
upsertNode(node) {
|
||||
this.saveNode(node);
|
||||
}
|
||||
getNodeByType(nodeType) {
|
||||
return this.getNode(nodeType);
|
||||
}
|
||||
getNodesByCategory(category) {
|
||||
const rows = this.db.prepare(`
|
||||
SELECT * FROM nodes WHERE category = ?
|
||||
ORDER BY display_name
|
||||
`).all(category);
|
||||
return rows.map(row => this.parseNodeRow(row));
|
||||
}
|
||||
searchNodes(query, mode = 'OR', limit = 20) {
|
||||
let sql = '';
|
||||
const params = [];
|
||||
if (mode === 'FUZZY') {
|
||||
sql = `
|
||||
SELECT * FROM nodes
|
||||
WHERE node_type LIKE ? OR display_name LIKE ? OR description LIKE ?
|
||||
ORDER BY display_name
|
||||
LIMIT ?
|
||||
`;
|
||||
const fuzzyQuery = `%${query}%`;
|
||||
params.push(fuzzyQuery, fuzzyQuery, fuzzyQuery, limit);
|
||||
}
|
||||
else {
|
||||
const words = query.split(/\s+/).filter(w => w.length > 0);
|
||||
const conditions = words.map(() => '(node_type LIKE ? OR display_name LIKE ? OR description LIKE ?)');
|
||||
const operator = mode === 'AND' ? ' AND ' : ' OR ';
|
||||
sql = `
|
||||
SELECT * FROM nodes
|
||||
WHERE ${conditions.join(operator)}
|
||||
ORDER BY display_name
|
||||
LIMIT ?
|
||||
`;
|
||||
for (const word of words) {
|
||||
const searchTerm = `%${word}%`;
|
||||
params.push(searchTerm, searchTerm, searchTerm);
|
||||
}
|
||||
params.push(limit);
|
||||
}
|
||||
const rows = this.db.prepare(sql).all(...params);
|
||||
return rows.map(row => this.parseNodeRow(row));
|
||||
}
|
||||
getAllNodes(limit) {
|
||||
let sql = 'SELECT * FROM nodes ORDER BY display_name';
|
||||
if (limit) {
|
||||
sql += ` LIMIT ${limit}`;
|
||||
}
|
||||
const rows = this.db.prepare(sql).all();
|
||||
return rows.map(row => this.parseNodeRow(row));
|
||||
}
|
||||
getNodeCount() {
|
||||
const result = this.db.prepare('SELECT COUNT(*) as count FROM nodes').get();
|
||||
return result.count;
|
||||
}
|
||||
getAIToolNodes() {
|
||||
return this.getAITools();
|
||||
}
|
||||
getToolVariant(baseNodeType) {
|
||||
if (!baseNodeType || typeof baseNodeType !== 'string' || !baseNodeType.includes('.')) {
|
||||
return null;
|
||||
}
|
||||
const toolNodeType = `${baseNodeType}Tool`;
|
||||
return this.getNode(toolNodeType);
|
||||
}
|
||||
getBaseNodeForToolVariant(toolNodeType) {
|
||||
const row = this.db.prepare(`
|
||||
SELECT tool_variant_of FROM nodes WHERE node_type = ?
|
||||
`).get(toolNodeType);
|
||||
if (!row?.tool_variant_of)
|
||||
return null;
|
||||
return this.getNode(row.tool_variant_of);
|
||||
}
|
||||
getToolVariants() {
|
||||
const rows = this.db.prepare(`
|
||||
SELECT node_type, display_name, description, package_name, tool_variant_of
|
||||
FROM nodes
|
||||
WHERE is_tool_variant = 1
|
||||
ORDER BY display_name
|
||||
`).all();
|
||||
return rows.map(row => ({
|
||||
nodeType: row.node_type,
|
||||
displayName: row.display_name,
|
||||
description: row.description,
|
||||
package: row.package_name,
|
||||
toolVariantOf: row.tool_variant_of
|
||||
}));
|
||||
}
|
||||
getToolVariantCount() {
|
||||
const result = this.db.prepare('SELECT COUNT(*) as count FROM nodes WHERE is_tool_variant = 1').get();
|
||||
return result.count;
|
||||
}
|
||||
getNodesByPackage(packageName) {
|
||||
const rows = this.db.prepare(`
|
||||
SELECT * FROM nodes WHERE package_name = ?
|
||||
ORDER BY display_name
|
||||
`).all(packageName);
|
||||
return rows.map(row => this.parseNodeRow(row));
|
||||
}
|
||||
searchNodeProperties(nodeType, query, maxResults = 20) {
|
||||
const node = this.getNode(nodeType);
|
||||
if (!node || !node.properties)
|
||||
return [];
|
||||
const results = [];
|
||||
const searchLower = query.toLowerCase();
|
||||
function searchProperties(properties, path = []) {
|
||||
for (const prop of properties) {
|
||||
if (results.length >= maxResults)
|
||||
break;
|
||||
const currentPath = [...path, prop.name || prop.displayName];
|
||||
const pathString = currentPath.join('.');
|
||||
if (prop.name?.toLowerCase().includes(searchLower) ||
|
||||
prop.displayName?.toLowerCase().includes(searchLower) ||
|
||||
prop.description?.toLowerCase().includes(searchLower)) {
|
||||
results.push({
|
||||
path: pathString,
|
||||
property: prop,
|
||||
description: prop.description
|
||||
});
|
||||
}
|
||||
if (prop.options) {
|
||||
searchProperties(prop.options, currentPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
searchProperties(node.properties);
|
||||
return results;
|
||||
}
|
||||
parseNodeRow(row) {
|
||||
return {
|
||||
nodeType: row.node_type,
|
||||
displayName: row.display_name,
|
||||
description: row.description,
|
||||
category: row.category,
|
||||
developmentStyle: row.development_style,
|
||||
package: row.package_name,
|
||||
isAITool: Number(row.is_ai_tool) === 1,
|
||||
isTrigger: Number(row.is_trigger) === 1,
|
||||
isWebhook: Number(row.is_webhook) === 1,
|
||||
isVersioned: Number(row.is_versioned) === 1,
|
||||
isToolVariant: Number(row.is_tool_variant) === 1,
|
||||
toolVariantOf: row.tool_variant_of || null,
|
||||
hasToolVariant: Number(row.has_tool_variant) === 1,
|
||||
version: row.version,
|
||||
properties: this.safeJsonParse(row.properties_schema, []),
|
||||
operations: this.safeJsonParse(row.operations, []),
|
||||
credentials: this.safeJsonParse(row.credentials_required, []),
|
||||
hasDocumentation: !!row.documentation,
|
||||
outputs: row.outputs ? this.safeJsonParse(row.outputs, null) : null,
|
||||
outputNames: row.output_names ? this.safeJsonParse(row.output_names, null) : null
|
||||
};
|
||||
}
|
||||
getNodeOperations(nodeType, resource) {
|
||||
const node = this.getNode(nodeType);
|
||||
if (!node)
|
||||
return [];
|
||||
const operations = [];
|
||||
if (node.operations) {
|
||||
if (Array.isArray(node.operations)) {
|
||||
operations.push(...node.operations);
|
||||
}
|
||||
else if (typeof node.operations === 'object') {
|
||||
if (resource && node.operations[resource]) {
|
||||
return node.operations[resource];
|
||||
}
|
||||
else {
|
||||
Object.values(node.operations).forEach(ops => {
|
||||
if (Array.isArray(ops)) {
|
||||
operations.push(...ops);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
if (node.properties && Array.isArray(node.properties)) {
|
||||
for (const prop of node.properties) {
|
||||
if (prop.name === 'operation' && prop.options) {
|
||||
if (resource && prop.displayOptions?.show?.resource) {
|
||||
const allowedResources = Array.isArray(prop.displayOptions.show.resource)
|
||||
? prop.displayOptions.show.resource
|
||||
: [prop.displayOptions.show.resource];
|
||||
if (!allowedResources.includes(resource)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
operations.push(...prop.options);
|
||||
}
|
||||
}
|
||||
}
|
||||
return operations;
|
||||
}
|
||||
getNodeResources(nodeType) {
|
||||
const node = this.getNode(nodeType);
|
||||
if (!node || !node.properties)
|
||||
return [];
|
||||
const resources = [];
|
||||
for (const prop of node.properties) {
|
||||
if (prop.name === 'resource' && prop.options) {
|
||||
resources.push(...prop.options);
|
||||
}
|
||||
}
|
||||
return resources;
|
||||
}
|
||||
getOperationsForResource(nodeType, resource) {
|
||||
const node = this.getNode(nodeType);
|
||||
if (!node || !node.properties)
|
||||
return [];
|
||||
const operations = [];
|
||||
for (const prop of node.properties) {
|
||||
if (prop.name === 'operation' && prop.displayOptions?.show?.resource) {
|
||||
const allowedResources = Array.isArray(prop.displayOptions.show.resource)
|
||||
? prop.displayOptions.show.resource
|
||||
: [prop.displayOptions.show.resource];
|
||||
if (allowedResources.includes(resource) && prop.options) {
|
||||
operations.push(...prop.options);
|
||||
}
|
||||
}
|
||||
}
|
||||
return operations;
|
||||
}
|
||||
getAllOperations() {
|
||||
const allOperations = new Map();
|
||||
const nodes = this.getAllNodes();
|
||||
for (const node of nodes) {
|
||||
const operations = this.getNodeOperations(node.nodeType);
|
||||
if (operations.length > 0) {
|
||||
allOperations.set(node.nodeType, operations);
|
||||
}
|
||||
}
|
||||
return allOperations;
|
||||
}
|
||||
getAllResources() {
|
||||
const allResources = new Map();
|
||||
const nodes = this.getAllNodes();
|
||||
for (const node of nodes) {
|
||||
const resources = this.getNodeResources(node.nodeType);
|
||||
if (resources.length > 0) {
|
||||
allResources.set(node.nodeType, resources);
|
||||
}
|
||||
}
|
||||
return allResources;
|
||||
}
|
||||
getNodePropertyDefaults(nodeType) {
|
||||
try {
|
||||
const node = this.getNode(nodeType);
|
||||
if (!node || !node.properties)
|
||||
return {};
|
||||
const defaults = {};
|
||||
for (const prop of node.properties) {
|
||||
if (prop.name && prop.default !== undefined) {
|
||||
defaults[prop.name] = prop.default;
|
||||
}
|
||||
}
|
||||
return defaults;
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`Error getting property defaults for ${nodeType}:`, error);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
getDefaultOperationForResource(nodeType, resource) {
|
||||
try {
|
||||
const node = this.getNode(nodeType);
|
||||
if (!node || !node.properties)
|
||||
return undefined;
|
||||
for (const prop of node.properties) {
|
||||
if (prop.name === 'operation') {
|
||||
if (resource && prop.displayOptions?.show?.resource) {
|
||||
const resourceDep = prop.displayOptions.show.resource;
|
||||
if (!Array.isArray(resourceDep) && typeof resourceDep !== 'string') {
|
||||
continue;
|
||||
}
|
||||
const allowedResources = Array.isArray(resourceDep)
|
||||
? resourceDep
|
||||
: [resourceDep];
|
||||
if (!allowedResources.includes(resource)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (prop.default !== undefined) {
|
||||
return prop.default;
|
||||
}
|
||||
if (prop.options && Array.isArray(prop.options) && prop.options.length > 0) {
|
||||
const firstOption = prop.options[0];
|
||||
return typeof firstOption === 'string' ? firstOption : firstOption.value;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`Error getting default operation for ${nodeType}:`, error);
|
||||
return undefined;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
saveNodeVersion(versionData) {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT OR REPLACE INTO node_versions (
|
||||
node_type, version, package_name, display_name, description,
|
||||
category, is_current_max, properties_schema, operations,
|
||||
credentials_required, outputs, minimum_n8n_version,
|
||||
breaking_changes, deprecated_properties, added_properties,
|
||||
released_at
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`);
|
||||
stmt.run(versionData.nodeType, versionData.version, versionData.packageName, versionData.displayName, versionData.description || null, versionData.category || null, versionData.isCurrentMax ? 1 : 0, versionData.propertiesSchema ? JSON.stringify(versionData.propertiesSchema) : null, versionData.operations ? JSON.stringify(versionData.operations) : null, versionData.credentialsRequired ? JSON.stringify(versionData.credentialsRequired) : null, versionData.outputs ? JSON.stringify(versionData.outputs) : null, versionData.minimumN8nVersion || null, versionData.breakingChanges ? JSON.stringify(versionData.breakingChanges) : null, versionData.deprecatedProperties ? JSON.stringify(versionData.deprecatedProperties) : null, versionData.addedProperties ? JSON.stringify(versionData.addedProperties) : null, versionData.releasedAt || null);
|
||||
}
|
||||
getNodeVersions(nodeType) {
|
||||
const normalizedType = node_type_normalizer_1.NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
const rows = this.db.prepare(`
|
||||
SELECT * FROM node_versions
|
||||
WHERE node_type = ?
|
||||
ORDER BY version DESC
|
||||
`).all(normalizedType);
|
||||
return rows.map(row => this.parseNodeVersionRow(row));
|
||||
}
|
||||
getLatestNodeVersion(nodeType) {
|
||||
const normalizedType = node_type_normalizer_1.NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM node_versions
|
||||
WHERE node_type = ? AND is_current_max = 1
|
||||
LIMIT 1
|
||||
`).get(normalizedType);
|
||||
if (!row)
|
||||
return null;
|
||||
return this.parseNodeVersionRow(row);
|
||||
}
|
||||
getNodeVersion(nodeType, version) {
|
||||
const normalizedType = node_type_normalizer_1.NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM node_versions
|
||||
WHERE node_type = ? AND version = ?
|
||||
`).get(normalizedType, version);
|
||||
if (!row)
|
||||
return null;
|
||||
return this.parseNodeVersionRow(row);
|
||||
}
|
||||
savePropertyChange(changeData) {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT INTO version_property_changes (
|
||||
node_type, from_version, to_version, property_name, change_type,
|
||||
is_breaking, old_value, new_value, migration_hint, auto_migratable,
|
||||
migration_strategy, severity
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`);
|
||||
stmt.run(changeData.nodeType, changeData.fromVersion, changeData.toVersion, changeData.propertyName, changeData.changeType, changeData.isBreaking ? 1 : 0, changeData.oldValue || null, changeData.newValue || null, changeData.migrationHint || null, changeData.autoMigratable ? 1 : 0, changeData.migrationStrategy ? JSON.stringify(changeData.migrationStrategy) : null, changeData.severity || 'MEDIUM');
|
||||
}
|
||||
getPropertyChanges(nodeType, fromVersion, toVersion) {
|
||||
const normalizedType = node_type_normalizer_1.NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
const rows = this.db.prepare(`
|
||||
SELECT * FROM version_property_changes
|
||||
WHERE node_type = ? AND from_version = ? AND to_version = ?
|
||||
ORDER BY severity DESC, property_name
|
||||
`).all(normalizedType, fromVersion, toVersion);
|
||||
return rows.map(row => this.parsePropertyChangeRow(row));
|
||||
}
|
||||
getBreakingChanges(nodeType, fromVersion, toVersion) {
|
||||
const normalizedType = node_type_normalizer_1.NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
let sql = `
|
||||
SELECT * FROM version_property_changes
|
||||
WHERE node_type = ? AND is_breaking = 1
|
||||
`;
|
||||
const params = [normalizedType];
|
||||
if (toVersion) {
|
||||
sql += ` AND from_version >= ? AND to_version <= ?`;
|
||||
params.push(fromVersion, toVersion);
|
||||
}
|
||||
else {
|
||||
sql += ` AND from_version >= ?`;
|
||||
params.push(fromVersion);
|
||||
}
|
||||
sql += ` ORDER BY from_version, to_version, severity DESC`;
|
||||
const rows = this.db.prepare(sql).all(...params);
|
||||
return rows.map(row => this.parsePropertyChangeRow(row));
|
||||
}
|
||||
getAutoMigratableChanges(nodeType, fromVersion, toVersion) {
|
||||
const normalizedType = node_type_normalizer_1.NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
const rows = this.db.prepare(`
|
||||
SELECT * FROM version_property_changes
|
||||
WHERE node_type = ?
|
||||
AND from_version = ?
|
||||
AND to_version = ?
|
||||
AND auto_migratable = 1
|
||||
ORDER BY severity DESC
|
||||
`).all(normalizedType, fromVersion, toVersion);
|
||||
return rows.map(row => this.parsePropertyChangeRow(row));
|
||||
}
|
||||
hasVersionUpgradePath(nodeType, fromVersion, toVersion) {
|
||||
const versions = this.getNodeVersions(nodeType);
|
||||
if (versions.length === 0)
|
||||
return false;
|
||||
const fromExists = versions.some(v => v.version === fromVersion);
|
||||
const toExists = versions.some(v => v.version === toVersion);
|
||||
return fromExists && toExists;
|
||||
}
|
||||
getVersionedNodesCount() {
|
||||
const result = this.db.prepare(`
|
||||
SELECT COUNT(DISTINCT node_type) as count
|
||||
FROM node_versions
|
||||
`).get();
|
||||
return result.count;
|
||||
}
|
||||
parseNodeVersionRow(row) {
|
||||
return {
|
||||
id: row.id,
|
||||
nodeType: row.node_type,
|
||||
version: row.version,
|
||||
packageName: row.package_name,
|
||||
displayName: row.display_name,
|
||||
description: row.description,
|
||||
category: row.category,
|
||||
isCurrentMax: Number(row.is_current_max) === 1,
|
||||
propertiesSchema: row.properties_schema ? this.safeJsonParse(row.properties_schema, []) : null,
|
||||
operations: row.operations ? this.safeJsonParse(row.operations, []) : null,
|
||||
credentialsRequired: row.credentials_required ? this.safeJsonParse(row.credentials_required, []) : null,
|
||||
outputs: row.outputs ? this.safeJsonParse(row.outputs, null) : null,
|
||||
minimumN8nVersion: row.minimum_n8n_version,
|
||||
breakingChanges: row.breaking_changes ? this.safeJsonParse(row.breaking_changes, []) : [],
|
||||
deprecatedProperties: row.deprecated_properties ? this.safeJsonParse(row.deprecated_properties, []) : [],
|
||||
addedProperties: row.added_properties ? this.safeJsonParse(row.added_properties, []) : [],
|
||||
releasedAt: row.released_at,
|
||||
createdAt: row.created_at
|
||||
};
|
||||
}
|
||||
parsePropertyChangeRow(row) {
|
||||
return {
|
||||
id: row.id,
|
||||
nodeType: row.node_type,
|
||||
fromVersion: row.from_version,
|
||||
toVersion: row.to_version,
|
||||
propertyName: row.property_name,
|
||||
changeType: row.change_type,
|
||||
isBreaking: Number(row.is_breaking) === 1,
|
||||
oldValue: row.old_value,
|
||||
newValue: row.new_value,
|
||||
migrationHint: row.migration_hint,
|
||||
autoMigratable: Number(row.auto_migratable) === 1,
|
||||
migrationStrategy: row.migration_strategy ? this.safeJsonParse(row.migration_strategy, null) : null,
|
||||
severity: row.severity,
|
||||
createdAt: row.created_at
|
||||
};
|
||||
}
|
||||
createWorkflowVersion(data) {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT INTO workflow_versions (
|
||||
workflow_id, version_number, workflow_name, workflow_snapshot,
|
||||
trigger, operations, fix_types, metadata
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`);
|
||||
const result = stmt.run(data.workflowId, data.versionNumber, data.workflowName, JSON.stringify(data.workflowSnapshot), data.trigger, data.operations ? JSON.stringify(data.operations) : null, data.fixTypes ? JSON.stringify(data.fixTypes) : null, data.metadata ? JSON.stringify(data.metadata) : null);
|
||||
return result.lastInsertRowid;
|
||||
}
|
||||
getWorkflowVersions(workflowId, limit) {
|
||||
let sql = `
|
||||
SELECT * FROM workflow_versions
|
||||
WHERE workflow_id = ?
|
||||
ORDER BY version_number DESC
|
||||
`;
|
||||
if (limit) {
|
||||
sql += ` LIMIT ?`;
|
||||
const rows = this.db.prepare(sql).all(workflowId, limit);
|
||||
return rows.map(row => this.parseWorkflowVersionRow(row));
|
||||
}
|
||||
const rows = this.db.prepare(sql).all(workflowId);
|
||||
return rows.map(row => this.parseWorkflowVersionRow(row));
|
||||
}
|
||||
getWorkflowVersion(versionId) {
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM workflow_versions WHERE id = ?
|
||||
`).get(versionId);
|
||||
if (!row)
|
||||
return null;
|
||||
return this.parseWorkflowVersionRow(row);
|
||||
}
|
||||
getLatestWorkflowVersion(workflowId) {
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM workflow_versions
|
||||
WHERE workflow_id = ?
|
||||
ORDER BY version_number DESC
|
||||
LIMIT 1
|
||||
`).get(workflowId);
|
||||
if (!row)
|
||||
return null;
|
||||
return this.parseWorkflowVersionRow(row);
|
||||
}
|
||||
deleteWorkflowVersion(versionId) {
|
||||
this.db.prepare(`
|
||||
DELETE FROM workflow_versions WHERE id = ?
|
||||
`).run(versionId);
|
||||
}
|
||||
deleteWorkflowVersionsByWorkflowId(workflowId) {
|
||||
const result = this.db.prepare(`
|
||||
DELETE FROM workflow_versions WHERE workflow_id = ?
|
||||
`).run(workflowId);
|
||||
return result.changes;
|
||||
}
|
||||
pruneWorkflowVersions(workflowId, keepCount) {
|
||||
const versions = this.db.prepare(`
|
||||
SELECT id FROM workflow_versions
|
||||
WHERE workflow_id = ?
|
||||
ORDER BY version_number DESC
|
||||
`).all(workflowId);
|
||||
if (versions.length <= keepCount) {
|
||||
return 0;
|
||||
}
|
||||
const idsToDelete = versions.slice(keepCount).map(v => v.id);
|
||||
if (idsToDelete.length === 0) {
|
||||
return 0;
|
||||
}
|
||||
const placeholders = idsToDelete.map(() => '?').join(',');
|
||||
const result = this.db.prepare(`
|
||||
DELETE FROM workflow_versions WHERE id IN (${placeholders})
|
||||
`).run(...idsToDelete);
|
||||
return result.changes;
|
||||
}
|
||||
truncateWorkflowVersions() {
|
||||
const result = this.db.prepare(`
|
||||
DELETE FROM workflow_versions
|
||||
`).run();
|
||||
return result.changes;
|
||||
}
|
||||
getWorkflowVersionCount(workflowId) {
|
||||
const result = this.db.prepare(`
|
||||
SELECT COUNT(*) as count FROM workflow_versions WHERE workflow_id = ?
|
||||
`).get(workflowId);
|
||||
return result.count;
|
||||
}
|
||||
getVersionStorageStats() {
|
||||
const totalResult = this.db.prepare(`
|
||||
SELECT COUNT(*) as count FROM workflow_versions
|
||||
`).get();
|
||||
const sizeResult = this.db.prepare(`
|
||||
SELECT SUM(LENGTH(workflow_snapshot)) as total_size FROM workflow_versions
|
||||
`).get();
|
||||
const byWorkflow = this.db.prepare(`
|
||||
SELECT
|
||||
workflow_id,
|
||||
workflow_name,
|
||||
COUNT(*) as version_count,
|
||||
SUM(LENGTH(workflow_snapshot)) as total_size,
|
||||
MAX(created_at) as last_backup
|
||||
FROM workflow_versions
|
||||
GROUP BY workflow_id
|
||||
ORDER BY version_count DESC
|
||||
`).all();
|
||||
return {
|
||||
totalVersions: totalResult.count,
|
||||
totalSize: sizeResult.total_size || 0,
|
||||
byWorkflow: byWorkflow.map(row => ({
|
||||
workflowId: row.workflow_id,
|
||||
workflowName: row.workflow_name,
|
||||
versionCount: row.version_count,
|
||||
totalSize: row.total_size,
|
||||
lastBackup: row.last_backup
|
||||
}))
|
||||
};
|
||||
}
|
||||
parseWorkflowVersionRow(row) {
|
||||
return {
|
||||
id: row.id,
|
||||
workflowId: row.workflow_id,
|
||||
versionNumber: row.version_number,
|
||||
workflowName: row.workflow_name,
|
||||
workflowSnapshot: this.safeJsonParse(row.workflow_snapshot, null),
|
||||
trigger: row.trigger,
|
||||
operations: row.operations ? this.safeJsonParse(row.operations, null) : null,
|
||||
fixTypes: row.fix_types ? this.safeJsonParse(row.fix_types, null) : null,
|
||||
metadata: row.metadata ? this.safeJsonParse(row.metadata, null) : null,
|
||||
createdAt: row.created_at
|
||||
};
|
||||
}
|
||||
}
|
||||
exports.NodeRepository = NodeRepository;
|
||||
//# sourceMappingURL=node-repository.js.map
|
||||
1
dist/database/node-repository.js.map
vendored
Normal file
1
dist/database/node-repository.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
10
dist/errors/validation-service-error.d.ts
vendored
Normal file
10
dist/errors/validation-service-error.d.ts
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
export declare class ValidationServiceError extends Error {
|
||||
readonly nodeType?: string | undefined;
|
||||
readonly property?: string | undefined;
|
||||
readonly cause?: Error | undefined;
|
||||
constructor(message: string, nodeType?: string | undefined, property?: string | undefined, cause?: Error | undefined);
|
||||
static jsonParseError(nodeType: string, cause: Error): ValidationServiceError;
|
||||
static nodeNotFound(nodeType: string): ValidationServiceError;
|
||||
static dataExtractionError(nodeType: string, dataType: string, cause?: Error): ValidationServiceError;
|
||||
}
|
||||
//# sourceMappingURL=validation-service-error.d.ts.map
|
||||
1
dist/errors/validation-service-error.d.ts.map
vendored
Normal file
1
dist/errors/validation-service-error.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"validation-service-error.d.ts","sourceRoot":"","sources":["../../src/errors/validation-service-error.ts"],"names":[],"mappings":"AAGA,qBAAa,sBAAuB,SAAQ,KAAK;aAG7B,QAAQ,CAAC,EAAE,MAAM;aACjB,QAAQ,CAAC,EAAE,MAAM;aACjB,KAAK,CAAC,EAAE,KAAK;gBAH7B,OAAO,EAAE,MAAM,EACC,QAAQ,CAAC,EAAE,MAAM,YAAA,EACjB,QAAQ,CAAC,EAAE,MAAM,YAAA,EACjB,KAAK,CAAC,EAAE,KAAK,YAAA;IAc/B,MAAM,CAAC,cAAc,CAAC,QAAQ,EAAE,MAAM,EAAE,KAAK,EAAE,KAAK,GAAG,sBAAsB;IAY7E,MAAM,CAAC,YAAY,CAAC,QAAQ,EAAE,MAAM,GAAG,sBAAsB;IAU7D,MAAM,CAAC,mBAAmB,CAAC,QAAQ,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,EAAE,KAAK,CAAC,EAAE,KAAK,GAAG,sBAAsB;CAQtG"}
|
||||
26
dist/errors/validation-service-error.js
vendored
Normal file
26
dist/errors/validation-service-error.js
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ValidationServiceError = void 0;
|
||||
class ValidationServiceError extends Error {
|
||||
constructor(message, nodeType, property, cause) {
|
||||
super(message);
|
||||
this.nodeType = nodeType;
|
||||
this.property = property;
|
||||
this.cause = cause;
|
||||
this.name = 'ValidationServiceError';
|
||||
if (Error.captureStackTrace) {
|
||||
Error.captureStackTrace(this, ValidationServiceError);
|
||||
}
|
||||
}
|
||||
static jsonParseError(nodeType, cause) {
|
||||
return new ValidationServiceError(`Failed to parse JSON data for node ${nodeType}`, nodeType, undefined, cause);
|
||||
}
|
||||
static nodeNotFound(nodeType) {
|
||||
return new ValidationServiceError(`Node type ${nodeType} not found in repository`, nodeType);
|
||||
}
|
||||
static dataExtractionError(nodeType, dataType, cause) {
|
||||
return new ValidationServiceError(`Failed to extract ${dataType} for node ${nodeType}`, nodeType, dataType, cause);
|
||||
}
|
||||
}
|
||||
exports.ValidationServiceError = ValidationServiceError;
|
||||
//# sourceMappingURL=validation-service-error.js.map
|
||||
1
dist/errors/validation-service-error.js.map
vendored
Normal file
1
dist/errors/validation-service-error.js.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"validation-service-error.js","sourceRoot":"","sources":["../../src/errors/validation-service-error.ts"],"names":[],"mappings":";;;AAGA,MAAa,sBAAuB,SAAQ,KAAK;IAC/C,YACE,OAAe,EACC,QAAiB,EACjB,QAAiB,EACjB,KAAa;QAE7B,KAAK,CAAC,OAAO,CAAC,CAAC;QAJC,aAAQ,GAAR,QAAQ,CAAS;QACjB,aAAQ,GAAR,QAAQ,CAAS;QACjB,UAAK,GAAL,KAAK,CAAQ;QAG7B,IAAI,CAAC,IAAI,GAAG,wBAAwB,CAAC;QAGrC,IAAI,KAAK,CAAC,iBAAiB,EAAE,CAAC;YAC5B,KAAK,CAAC,iBAAiB,CAAC,IAAI,EAAE,sBAAsB,CAAC,CAAC;QACxD,CAAC;IACH,CAAC;IAKD,MAAM,CAAC,cAAc,CAAC,QAAgB,EAAE,KAAY;QAClD,OAAO,IAAI,sBAAsB,CAC/B,sCAAsC,QAAQ,EAAE,EAChD,QAAQ,EACR,SAAS,EACT,KAAK,CACN,CAAC;IACJ,CAAC;IAKD,MAAM,CAAC,YAAY,CAAC,QAAgB;QAClC,OAAO,IAAI,sBAAsB,CAC/B,aAAa,QAAQ,0BAA0B,EAC/C,QAAQ,CACT,CAAC;IACJ,CAAC;IAKD,MAAM,CAAC,mBAAmB,CAAC,QAAgB,EAAE,QAAgB,EAAE,KAAa;QAC1E,OAAO,IAAI,sBAAsB,CAC/B,qBAAqB,QAAQ,aAAa,QAAQ,EAAE,EACpD,QAAQ,EACR,QAAQ,EACR,KAAK,CACN,CAAC;IACJ,CAAC;CACF;AAjDD,wDAiDC"}
|
||||
52
dist/http-server-single-session.d.ts
vendored
Normal file
52
dist/http-server-single-session.d.ts
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env node
|
||||
import express from 'express';
|
||||
import { InstanceContext } from './types/instance-context';
|
||||
import { SessionState } from './types/session-state';
|
||||
export declare class SingleSessionHTTPServer {
|
||||
private transports;
|
||||
private servers;
|
||||
private sessionMetadata;
|
||||
private sessionContexts;
|
||||
private contextSwitchLocks;
|
||||
private session;
|
||||
private consoleManager;
|
||||
private expressServer;
|
||||
private sessionTimeout;
|
||||
private authToken;
|
||||
private cleanupTimer;
|
||||
constructor();
|
||||
private startSessionCleanup;
|
||||
private cleanupExpiredSessions;
|
||||
private removeSession;
|
||||
private getActiveSessionCount;
|
||||
private canCreateSession;
|
||||
private isValidSessionId;
|
||||
private sanitizeErrorForClient;
|
||||
private updateSessionAccess;
|
||||
private switchSessionContext;
|
||||
private performContextSwitch;
|
||||
private getSessionMetrics;
|
||||
private loadAuthToken;
|
||||
private validateEnvironment;
|
||||
handleRequest(req: express.Request, res: express.Response, instanceContext?: InstanceContext): Promise<void>;
|
||||
private resetSessionSSE;
|
||||
private isExpired;
|
||||
private isSessionExpired;
|
||||
start(): Promise<void>;
|
||||
shutdown(): Promise<void>;
|
||||
getSessionInfo(): {
|
||||
active: boolean;
|
||||
sessionId?: string;
|
||||
age?: number;
|
||||
sessions?: {
|
||||
total: number;
|
||||
active: number;
|
||||
expired: number;
|
||||
max: number;
|
||||
sessionIds: string[];
|
||||
};
|
||||
};
|
||||
exportSessionState(): SessionState[];
|
||||
restoreSessionState(sessions: SessionState[]): number;
|
||||
}
|
||||
//# sourceMappingURL=http-server-single-session.d.ts.map
|
||||
1
dist/http-server-single-session.d.ts.map
vendored
Normal file
1
dist/http-server-single-session.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"http-server-single-session.d.ts","sourceRoot":"","sources":["../src/http-server-single-session.ts"],"names":[],"mappings":";AAMA,OAAO,OAAO,MAAM,SAAS,CAAC;AAoB9B,OAAO,EAAE,eAAe,EAA2B,MAAM,0BAA0B,CAAC;AACpF,OAAO,EAAE,YAAY,EAAE,MAAM,uBAAuB,CAAC;AAuErD,qBAAa,uBAAuB;IAElC,OAAO,CAAC,UAAU,CAA8D;IAChF,OAAO,CAAC,OAAO,CAA0D;IACzE,OAAO,CAAC,eAAe,CAAsE;IAC7F,OAAO,CAAC,eAAe,CAA4D;IACnF,OAAO,CAAC,kBAAkB,CAAyC;IACnE,OAAO,CAAC,OAAO,CAAwB;IACvC,OAAO,CAAC,cAAc,CAAwB;IAC9C,OAAO,CAAC,aAAa,CAAM;IAC3B,OAAO,CAAC,cAAc,CAAkB;IACxC,OAAO,CAAC,SAAS,CAAuB;IACxC,OAAO,CAAC,YAAY,CAA+B;;IAcnD,OAAO,CAAC,mBAAmB;IAmB3B,OAAO,CAAC,sBAAsB;YAqChB,aAAa;IAuC3B,OAAO,CAAC,qBAAqB;IAO7B,OAAO,CAAC,gBAAgB;IAkBxB,OAAO,CAAC,gBAAgB;IASxB,OAAO,CAAC,sBAAsB;IAkC9B,OAAO,CAAC,mBAAmB;YASb,oBAAoB;YAwBpB,oBAAoB;IAwBlC,OAAO,CAAC,iBAAiB;IAsBzB,OAAO,CAAC,aAAa;IA2BrB,OAAO,CAAC,mBAAmB;IAoDrB,aAAa,CACjB,GAAG,EAAE,OAAO,CAAC,OAAO,EACpB,GAAG,EAAE,OAAO,CAAC,QAAQ,EACrB,eAAe,CAAC,EAAE,eAAe,GAChC,OAAO,CAAC,IAAI,CAAC;YAmOF,eAAe;IA8C7B,OAAO,CAAC,SAAS;IAYjB,OAAO,CAAC,gBAAgB;IASlB,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;IAgnBtB,QAAQ,IAAI,OAAO,CAAC,IAAI,CAAC;IAkD/B,cAAc,IAAI;QAChB,MAAM,EAAE,OAAO,CAAC;QAChB,SAAS,CAAC,EAAE,MAAM,CAAC;QACnB,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,QAAQ,CAAC,EAAE;YACT,KAAK,EAAE,MAAM,CAAC;YACd,MAAM,EAAE,MAAM,CAAC;YACf,OAAO,EAAE,MAAM,CAAC;YAChB,GAAG,EAAE,MAAM,CAAC;YACZ,UAAU,EAAE,MAAM,EAAE,CAAC;SACtB,CAAC;KACH;IAmDM,kBAAkB,IAAI,YAAY,EAAE;IAoEpC,mBAAmB,CAAC,QAAQ,EAAE,YAAY,EAAE,GAAG,MAAM;CAsG7D"}
|
||||
1180
dist/http-server-single-session.js
vendored
Normal file
1180
dist/http-server-single-session.js
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
dist/http-server-single-session.js.map
vendored
Normal file
1
dist/http-server-single-session.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
9
dist/http-server.d.ts
vendored
Normal file
9
dist/http-server.d.ts
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env node
|
||||
export declare function loadAuthToken(): string | null;
|
||||
export declare function startFixedHTTPServer(): Promise<void>;
|
||||
declare module './mcp/server' {
|
||||
interface N8NDocumentationMCPServer {
|
||||
executeTool(name: string, args: any): Promise<any>;
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=http-server.d.ts.map
|
||||
1
dist/http-server.d.ts.map
vendored
Normal file
1
dist/http-server.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"http-server.d.ts","sourceRoot":"","sources":["../src/http-server.ts"],"names":[],"mappings":";AA0CA,wBAAgB,aAAa,IAAI,MAAM,GAAG,IAAI,CAsB7C;AA+DD,wBAAsB,oBAAoB,kBA+dzC;AAGD,OAAO,QAAQ,cAAc,CAAC;IAC5B,UAAU,yBAAyB;QACjC,WAAW,CAAC,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,GAAG,GAAG,OAAO,CAAC,GAAG,CAAC,CAAC;KACpD;CACF"}
|
||||
478
dist/http-server.js
vendored
Normal file
478
dist/http-server.js
vendored
Normal file
@@ -0,0 +1,478 @@
|
||||
#!/usr/bin/env node
|
||||
"use strict";
|
||||
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.loadAuthToken = loadAuthToken;
|
||||
exports.startFixedHTTPServer = startFixedHTTPServer;
|
||||
const express_1 = __importDefault(require("express"));
|
||||
const tools_1 = require("./mcp/tools");
|
||||
const tools_n8n_manager_1 = require("./mcp/tools-n8n-manager");
|
||||
const server_1 = require("./mcp/server");
|
||||
const logger_1 = require("./utils/logger");
|
||||
const auth_1 = require("./utils/auth");
|
||||
const version_1 = require("./utils/version");
|
||||
const n8n_api_1 = require("./config/n8n-api");
|
||||
const dotenv_1 = __importDefault(require("dotenv"));
|
||||
const fs_1 = require("fs");
|
||||
const url_detector_1 = require("./utils/url-detector");
|
||||
const protocol_version_1 = require("./utils/protocol-version");
|
||||
dotenv_1.default.config();
|
||||
let expressServer;
|
||||
let authToken = null;
|
||||
function loadAuthToken() {
|
||||
if (process.env.AUTH_TOKEN) {
|
||||
logger_1.logger.info('Using AUTH_TOKEN from environment variable');
|
||||
return process.env.AUTH_TOKEN;
|
||||
}
|
||||
if (process.env.AUTH_TOKEN_FILE) {
|
||||
try {
|
||||
const token = (0, fs_1.readFileSync)(process.env.AUTH_TOKEN_FILE, 'utf-8').trim();
|
||||
logger_1.logger.info(`Loaded AUTH_TOKEN from file: ${process.env.AUTH_TOKEN_FILE}`);
|
||||
return token;
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error(`Failed to read AUTH_TOKEN_FILE: ${process.env.AUTH_TOKEN_FILE}`, error);
|
||||
console.error(`ERROR: Failed to read AUTH_TOKEN_FILE: ${process.env.AUTH_TOKEN_FILE}`);
|
||||
console.error(error instanceof Error ? error.message : 'Unknown error');
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
function validateEnvironment() {
|
||||
authToken = loadAuthToken();
|
||||
if (!authToken || authToken.trim() === '') {
|
||||
logger_1.logger.error('No authentication token found or token is empty');
|
||||
console.error('ERROR: AUTH_TOKEN is required for HTTP mode and cannot be empty');
|
||||
console.error('Set AUTH_TOKEN environment variable or AUTH_TOKEN_FILE pointing to a file containing the token');
|
||||
console.error('Generate AUTH_TOKEN with: openssl rand -base64 32');
|
||||
process.exit(1);
|
||||
}
|
||||
authToken = authToken.trim();
|
||||
if (authToken.length < 32) {
|
||||
logger_1.logger.warn('AUTH_TOKEN should be at least 32 characters for security');
|
||||
console.warn('WARNING: AUTH_TOKEN should be at least 32 characters for security');
|
||||
}
|
||||
if (authToken === 'REPLACE_THIS_AUTH_TOKEN_32_CHARS_MIN_abcdefgh') {
|
||||
logger_1.logger.warn('⚠️ SECURITY WARNING: Using default AUTH_TOKEN - CHANGE IMMEDIATELY!');
|
||||
logger_1.logger.warn('Generate secure token with: openssl rand -base64 32');
|
||||
if (process.env.MCP_MODE === 'http') {
|
||||
console.warn('\n⚠️ SECURITY WARNING ⚠️');
|
||||
console.warn('Using default AUTH_TOKEN - CHANGE IMMEDIATELY!');
|
||||
console.warn('Generate secure token: openssl rand -base64 32');
|
||||
console.warn('Update via Railway dashboard environment variables\n');
|
||||
}
|
||||
}
|
||||
}
|
||||
async function shutdown() {
|
||||
logger_1.logger.info('Shutting down HTTP server...');
|
||||
console.log('Shutting down HTTP server...');
|
||||
if (expressServer) {
|
||||
expressServer.close(() => {
|
||||
logger_1.logger.info('HTTP server closed');
|
||||
console.log('HTTP server closed');
|
||||
process.exit(0);
|
||||
});
|
||||
setTimeout(() => {
|
||||
logger_1.logger.error('Forced shutdown after timeout');
|
||||
process.exit(1);
|
||||
}, 10000);
|
||||
}
|
||||
else {
|
||||
process.exit(0);
|
||||
}
|
||||
}
|
||||
async function startFixedHTTPServer() {
|
||||
validateEnvironment();
|
||||
const app = (0, express_1.default)();
|
||||
const trustProxy = process.env.TRUST_PROXY ? Number(process.env.TRUST_PROXY) : 0;
|
||||
if (trustProxy > 0) {
|
||||
app.set('trust proxy', trustProxy);
|
||||
logger_1.logger.info(`Trust proxy enabled with ${trustProxy} hop(s)`);
|
||||
}
|
||||
app.use((req, res, next) => {
|
||||
res.setHeader('X-Content-Type-Options', 'nosniff');
|
||||
res.setHeader('X-Frame-Options', 'DENY');
|
||||
res.setHeader('X-XSS-Protection', '1; mode=block');
|
||||
res.setHeader('Strict-Transport-Security', 'max-age=31536000; includeSubDomains');
|
||||
next();
|
||||
});
|
||||
app.use((req, res, next) => {
|
||||
const allowedOrigin = process.env.CORS_ORIGIN || '*';
|
||||
res.setHeader('Access-Control-Allow-Origin', allowedOrigin);
|
||||
res.setHeader('Access-Control-Allow-Methods', 'POST, GET, OPTIONS');
|
||||
res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization, Accept');
|
||||
res.setHeader('Access-Control-Max-Age', '86400');
|
||||
if (req.method === 'OPTIONS') {
|
||||
res.sendStatus(204);
|
||||
return;
|
||||
}
|
||||
next();
|
||||
});
|
||||
app.use((req, res, next) => {
|
||||
logger_1.logger.info(`${req.method} ${req.path}`, {
|
||||
ip: req.ip,
|
||||
userAgent: req.get('user-agent'),
|
||||
contentLength: req.get('content-length')
|
||||
});
|
||||
next();
|
||||
});
|
||||
const mcpServer = new server_1.N8NDocumentationMCPServer();
|
||||
logger_1.logger.info('Created persistent MCP server instance');
|
||||
app.get('/', (req, res) => {
|
||||
const port = parseInt(process.env.PORT || '3000');
|
||||
const host = process.env.HOST || '0.0.0.0';
|
||||
const baseUrl = (0, url_detector_1.detectBaseUrl)(req, host, port);
|
||||
const endpoints = (0, url_detector_1.formatEndpointUrls)(baseUrl);
|
||||
res.json({
|
||||
name: 'n8n Documentation MCP Server',
|
||||
version: version_1.PROJECT_VERSION,
|
||||
description: 'Model Context Protocol server providing comprehensive n8n node documentation and workflow management',
|
||||
endpoints: {
|
||||
health: {
|
||||
url: endpoints.health,
|
||||
method: 'GET',
|
||||
description: 'Health check and status information'
|
||||
},
|
||||
mcp: {
|
||||
url: endpoints.mcp,
|
||||
method: 'GET/POST',
|
||||
description: 'MCP endpoint - GET for info, POST for JSON-RPC'
|
||||
}
|
||||
},
|
||||
authentication: {
|
||||
type: 'Bearer Token',
|
||||
header: 'Authorization: Bearer <token>',
|
||||
required_for: ['POST /mcp']
|
||||
},
|
||||
documentation: 'https://github.com/czlonkowski/n8n-mcp'
|
||||
});
|
||||
});
|
||||
app.get('/health', (req, res) => {
|
||||
res.json({
|
||||
status: 'ok',
|
||||
mode: 'http-fixed',
|
||||
version: version_1.PROJECT_VERSION,
|
||||
uptime: Math.floor(process.uptime()),
|
||||
memory: {
|
||||
used: Math.round(process.memoryUsage().heapUsed / 1024 / 1024),
|
||||
total: Math.round(process.memoryUsage().heapTotal / 1024 / 1024),
|
||||
unit: 'MB'
|
||||
},
|
||||
timestamp: new Date().toISOString()
|
||||
});
|
||||
});
|
||||
app.get('/version', (req, res) => {
|
||||
res.json({
|
||||
version: version_1.PROJECT_VERSION,
|
||||
buildTime: new Date().toISOString(),
|
||||
tools: tools_1.n8nDocumentationToolsFinal.map(t => t.name),
|
||||
commit: process.env.GIT_COMMIT || 'unknown'
|
||||
});
|
||||
});
|
||||
app.get('/test-tools', async (req, res) => {
|
||||
try {
|
||||
const result = await mcpServer.executeTool('get_node_essentials', { nodeType: 'nodes-base.httpRequest' });
|
||||
res.json({ status: 'ok', hasData: !!result, toolCount: tools_1.n8nDocumentationToolsFinal.length });
|
||||
}
|
||||
catch (error) {
|
||||
res.json({ status: 'error', message: error instanceof Error ? error.message : 'Unknown error' });
|
||||
}
|
||||
});
|
||||
app.get('/mcp', (req, res) => {
|
||||
res.json({
|
||||
description: 'n8n Documentation MCP Server',
|
||||
version: version_1.PROJECT_VERSION,
|
||||
endpoints: {
|
||||
mcp: {
|
||||
method: 'POST',
|
||||
path: '/mcp',
|
||||
description: 'Main MCP JSON-RPC endpoint',
|
||||
authentication: 'Bearer token required'
|
||||
},
|
||||
health: {
|
||||
method: 'GET',
|
||||
path: '/health',
|
||||
description: 'Health check endpoint',
|
||||
authentication: 'None'
|
||||
},
|
||||
root: {
|
||||
method: 'GET',
|
||||
path: '/',
|
||||
description: 'API information',
|
||||
authentication: 'None'
|
||||
}
|
||||
},
|
||||
documentation: 'https://github.com/czlonkowski/n8n-mcp'
|
||||
});
|
||||
});
|
||||
app.post('/mcp', async (req, res) => {
|
||||
const startTime = Date.now();
|
||||
const authHeader = req.headers.authorization;
|
||||
if (!authHeader) {
|
||||
logger_1.logger.warn('Authentication failed: Missing Authorization header', {
|
||||
ip: req.ip,
|
||||
userAgent: req.get('user-agent'),
|
||||
reason: 'no_auth_header'
|
||||
});
|
||||
res.status(401).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32001,
|
||||
message: 'Unauthorized'
|
||||
},
|
||||
id: null
|
||||
});
|
||||
return;
|
||||
}
|
||||
if (!authHeader.startsWith('Bearer ')) {
|
||||
logger_1.logger.warn('Authentication failed: Invalid Authorization header format (expected Bearer token)', {
|
||||
ip: req.ip,
|
||||
userAgent: req.get('user-agent'),
|
||||
reason: 'invalid_auth_format',
|
||||
headerPrefix: authHeader.substring(0, Math.min(authHeader.length, 10)) + '...'
|
||||
});
|
||||
res.status(401).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32001,
|
||||
message: 'Unauthorized'
|
||||
},
|
||||
id: null
|
||||
});
|
||||
return;
|
||||
}
|
||||
const token = authHeader.slice(7).trim();
|
||||
const isValidToken = authToken &&
|
||||
auth_1.AuthManager.timingSafeCompare(token, authToken);
|
||||
if (!isValidToken) {
|
||||
logger_1.logger.warn('Authentication failed: Invalid token', {
|
||||
ip: req.ip,
|
||||
userAgent: req.get('user-agent'),
|
||||
reason: 'invalid_token'
|
||||
});
|
||||
res.status(401).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32001,
|
||||
message: 'Unauthorized'
|
||||
},
|
||||
id: null
|
||||
});
|
||||
return;
|
||||
}
|
||||
try {
|
||||
let body = '';
|
||||
req.on('data', chunk => {
|
||||
body += chunk.toString();
|
||||
});
|
||||
req.on('end', async () => {
|
||||
try {
|
||||
const jsonRpcRequest = JSON.parse(body);
|
||||
logger_1.logger.debug('Received JSON-RPC request:', { method: jsonRpcRequest.method });
|
||||
let response;
|
||||
switch (jsonRpcRequest.method) {
|
||||
case 'initialize':
|
||||
const negotiationResult = (0, protocol_version_1.negotiateProtocolVersion)(jsonRpcRequest.params?.protocolVersion, jsonRpcRequest.params?.clientInfo, req.get('user-agent'), req.headers);
|
||||
(0, protocol_version_1.logProtocolNegotiation)(negotiationResult, logger_1.logger, 'HTTP_SERVER_INITIALIZE');
|
||||
response = {
|
||||
jsonrpc: '2.0',
|
||||
result: {
|
||||
protocolVersion: negotiationResult.version,
|
||||
capabilities: {
|
||||
tools: {},
|
||||
resources: {}
|
||||
},
|
||||
serverInfo: {
|
||||
name: 'n8n-documentation-mcp',
|
||||
version: version_1.PROJECT_VERSION
|
||||
}
|
||||
},
|
||||
id: jsonRpcRequest.id
|
||||
};
|
||||
break;
|
||||
case 'tools/list':
|
||||
const tools = [...tools_1.n8nDocumentationToolsFinal];
|
||||
if ((0, n8n_api_1.isN8nApiConfigured)()) {
|
||||
tools.push(...tools_n8n_manager_1.n8nManagementTools);
|
||||
}
|
||||
response = {
|
||||
jsonrpc: '2.0',
|
||||
result: {
|
||||
tools
|
||||
},
|
||||
id: jsonRpcRequest.id
|
||||
};
|
||||
break;
|
||||
case 'tools/call':
|
||||
const toolName = jsonRpcRequest.params?.name;
|
||||
const toolArgs = jsonRpcRequest.params?.arguments || {};
|
||||
try {
|
||||
const result = await mcpServer.executeTool(toolName, toolArgs);
|
||||
let responseText = JSON.stringify(result, null, 2);
|
||||
const mcpResult = {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: responseText
|
||||
}
|
||||
]
|
||||
};
|
||||
if (toolName.startsWith('validate_')) {
|
||||
const resultSize = responseText.length;
|
||||
if (resultSize > 1000000) {
|
||||
logger_1.logger.warn(`Validation tool ${toolName} response is very large (${resultSize} chars). ` +
|
||||
`Truncating for HTTP transport safety.`);
|
||||
mcpResult.content[0].text = responseText.substring(0, 999000) +
|
||||
'\n\n[Response truncated due to size limits]';
|
||||
}
|
||||
else {
|
||||
mcpResult.structuredContent = result;
|
||||
}
|
||||
}
|
||||
response = {
|
||||
jsonrpc: '2.0',
|
||||
result: mcpResult,
|
||||
id: jsonRpcRequest.id
|
||||
};
|
||||
}
|
||||
catch (error) {
|
||||
response = {
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32603,
|
||||
message: `Error executing tool ${toolName}: ${error instanceof Error ? error.message : 'Unknown error'}`
|
||||
},
|
||||
id: jsonRpcRequest.id
|
||||
};
|
||||
}
|
||||
break;
|
||||
default:
|
||||
response = {
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32601,
|
||||
message: `Method not found: ${jsonRpcRequest.method}`
|
||||
},
|
||||
id: jsonRpcRequest.id
|
||||
};
|
||||
}
|
||||
res.setHeader('Content-Type', 'application/json');
|
||||
res.json(response);
|
||||
const duration = Date.now() - startTime;
|
||||
logger_1.logger.info('MCP request completed', {
|
||||
duration,
|
||||
method: jsonRpcRequest.method
|
||||
});
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error('Error processing request:', error);
|
||||
res.status(400).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32700,
|
||||
message: 'Parse error',
|
||||
data: error instanceof Error ? error.message : 'Unknown error'
|
||||
},
|
||||
id: null
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error('MCP request error:', error);
|
||||
if (!res.headersSent) {
|
||||
res.status(500).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32603,
|
||||
message: 'Internal server error',
|
||||
data: process.env.NODE_ENV === 'development'
|
||||
? error.message
|
||||
: undefined
|
||||
},
|
||||
id: null
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
app.use((req, res) => {
|
||||
res.status(404).json({
|
||||
error: 'Not found',
|
||||
message: `Cannot ${req.method} ${req.path}`
|
||||
});
|
||||
});
|
||||
app.use((err, req, res, next) => {
|
||||
logger_1.logger.error('Express error handler:', err);
|
||||
if (!res.headersSent) {
|
||||
res.status(500).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32603,
|
||||
message: 'Internal server error',
|
||||
data: process.env.NODE_ENV === 'development' ? err.message : undefined
|
||||
},
|
||||
id: null
|
||||
});
|
||||
}
|
||||
});
|
||||
const port = parseInt(process.env.PORT || '3000');
|
||||
const host = process.env.HOST || '0.0.0.0';
|
||||
expressServer = app.listen(port, host, () => {
|
||||
logger_1.logger.info(`n8n MCP Fixed HTTP Server started`, { port, host });
|
||||
const baseUrl = (0, url_detector_1.getStartupBaseUrl)(host, port);
|
||||
const endpoints = (0, url_detector_1.formatEndpointUrls)(baseUrl);
|
||||
console.log(`n8n MCP Fixed HTTP Server running on ${host}:${port}`);
|
||||
console.log(`Health check: ${endpoints.health}`);
|
||||
console.log(`MCP endpoint: ${endpoints.mcp}`);
|
||||
console.log('\nPress Ctrl+C to stop the server');
|
||||
if (authToken === 'REPLACE_THIS_AUTH_TOKEN_32_CHARS_MIN_abcdefgh') {
|
||||
setInterval(() => {
|
||||
logger_1.logger.warn('⚠️ Still using default AUTH_TOKEN - security risk!');
|
||||
if (process.env.MCP_MODE === 'http') {
|
||||
console.warn('⚠️ REMINDER: Still using default AUTH_TOKEN - please change it!');
|
||||
}
|
||||
}, 300000);
|
||||
}
|
||||
if (process.env.BASE_URL || process.env.PUBLIC_URL) {
|
||||
console.log(`\nPublic URL configured: ${baseUrl}`);
|
||||
}
|
||||
else if (process.env.TRUST_PROXY && Number(process.env.TRUST_PROXY) > 0) {
|
||||
console.log(`\nNote: TRUST_PROXY is enabled. URLs will be auto-detected from proxy headers.`);
|
||||
}
|
||||
});
|
||||
expressServer.on('error', (error) => {
|
||||
if (error.code === 'EADDRINUSE') {
|
||||
logger_1.logger.error(`Port ${port} is already in use`);
|
||||
console.error(`ERROR: Port ${port} is already in use`);
|
||||
process.exit(1);
|
||||
}
|
||||
else {
|
||||
logger_1.logger.error('Server error:', error);
|
||||
console.error('Server error:', error);
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
process.on('SIGTERM', shutdown);
|
||||
process.on('SIGINT', shutdown);
|
||||
process.on('uncaughtException', (error) => {
|
||||
logger_1.logger.error('Uncaught exception:', error);
|
||||
console.error('Uncaught exception:', error);
|
||||
shutdown();
|
||||
});
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
logger_1.logger.error('Unhandled rejection:', reason);
|
||||
console.error('Unhandled rejection at:', promise, 'reason:', reason);
|
||||
shutdown();
|
||||
});
|
||||
}
|
||||
if (typeof require !== 'undefined' && require.main === module) {
|
||||
startFixedHTTPServer().catch(error => {
|
||||
logger_1.logger.error('Failed to start Fixed HTTP server:', error);
|
||||
console.error('Failed to start Fixed HTTP server:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
}
|
||||
//# sourceMappingURL=http-server.js.map
|
||||
1
dist/http-server.js.map
vendored
Normal file
1
dist/http-server.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
11
dist/index.d.ts
vendored
Normal file
11
dist/index.d.ts
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
export { N8NMCPEngine, EngineHealth, EngineOptions } from './mcp-engine';
|
||||
export { SingleSessionHTTPServer } from './http-server-single-session';
|
||||
export { ConsoleManager } from './utils/console-manager';
|
||||
export { N8NDocumentationMCPServer } from './mcp/server';
|
||||
export type { InstanceContext } from './types/instance-context';
|
||||
export { validateInstanceContext, isInstanceContext } from './types/instance-context';
|
||||
export type { SessionState } from './types/session-state';
|
||||
export type { Tool, CallToolResult, ListToolsResult } from '@modelcontextprotocol/sdk/types.js';
|
||||
import N8NMCPEngine from './mcp-engine';
|
||||
export default N8NMCPEngine;
|
||||
//# sourceMappingURL=index.d.ts.map
|
||||
1
dist/index.d.ts.map
vendored
Normal file
1
dist/index.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAOA,OAAO,EAAE,YAAY,EAAE,YAAY,EAAE,aAAa,EAAE,MAAM,cAAc,CAAC;AACzE,OAAO,EAAE,uBAAuB,EAAE,MAAM,8BAA8B,CAAC;AACvE,OAAO,EAAE,cAAc,EAAE,MAAM,yBAAyB,CAAC;AACzD,OAAO,EAAE,yBAAyB,EAAE,MAAM,cAAc,CAAC;AAGzD,YAAY,EACV,eAAe,EAChB,MAAM,0BAA0B,CAAC;AAClC,OAAO,EACL,uBAAuB,EACvB,iBAAiB,EAClB,MAAM,0BAA0B,CAAC;AAClC,YAAY,EACV,YAAY,EACb,MAAM,uBAAuB,CAAC;AAG/B,YAAY,EACV,IAAI,EACJ,cAAc,EACd,eAAe,EAChB,MAAM,oCAAoC,CAAC;AAG5C,OAAO,YAAY,MAAM,cAAc,CAAC;AACxC,eAAe,YAAY,CAAC"}
|
||||
20
dist/index.js
vendored
Normal file
20
dist/index.js
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
"use strict";
|
||||
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.isInstanceContext = exports.validateInstanceContext = exports.N8NDocumentationMCPServer = exports.ConsoleManager = exports.SingleSessionHTTPServer = exports.N8NMCPEngine = void 0;
|
||||
var mcp_engine_1 = require("./mcp-engine");
|
||||
Object.defineProperty(exports, "N8NMCPEngine", { enumerable: true, get: function () { return mcp_engine_1.N8NMCPEngine; } });
|
||||
var http_server_single_session_1 = require("./http-server-single-session");
|
||||
Object.defineProperty(exports, "SingleSessionHTTPServer", { enumerable: true, get: function () { return http_server_single_session_1.SingleSessionHTTPServer; } });
|
||||
var console_manager_1 = require("./utils/console-manager");
|
||||
Object.defineProperty(exports, "ConsoleManager", { enumerable: true, get: function () { return console_manager_1.ConsoleManager; } });
|
||||
var server_1 = require("./mcp/server");
|
||||
Object.defineProperty(exports, "N8NDocumentationMCPServer", { enumerable: true, get: function () { return server_1.N8NDocumentationMCPServer; } });
|
||||
var instance_context_1 = require("./types/instance-context");
|
||||
Object.defineProperty(exports, "validateInstanceContext", { enumerable: true, get: function () { return instance_context_1.validateInstanceContext; } });
|
||||
Object.defineProperty(exports, "isInstanceContext", { enumerable: true, get: function () { return instance_context_1.isInstanceContext; } });
|
||||
const mcp_engine_2 = __importDefault(require("./mcp-engine"));
|
||||
exports.default = mcp_engine_2.default;
|
||||
//# sourceMappingURL=index.js.map
|
||||
1
dist/index.js.map
vendored
Normal file
1
dist/index.js.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":";;;;;;AAOA,2CAAyE;AAAhE,0GAAA,YAAY,OAAA;AACrB,2EAAuE;AAA9D,qIAAA,uBAAuB,OAAA;AAChC,2DAAyD;AAAhD,iHAAA,cAAc,OAAA;AACvB,uCAAyD;AAAhD,mHAAA,yBAAyB,OAAA;AAMlC,6DAGkC;AAFhC,2HAAA,uBAAuB,OAAA;AACvB,qHAAA,iBAAiB,OAAA;AAcnB,8DAAwC;AACxC,kBAAe,oBAAY,CAAC"}
|
||||
11
dist/loaders/node-loader.d.ts
vendored
Normal file
11
dist/loaders/node-loader.d.ts
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
export interface LoadedNode {
|
||||
packageName: string;
|
||||
nodeName: string;
|
||||
NodeClass: any;
|
||||
}
|
||||
export declare class N8nNodeLoader {
|
||||
private readonly CORE_PACKAGES;
|
||||
loadAllNodes(): Promise<LoadedNode[]>;
|
||||
private loadPackageNodes;
|
||||
}
|
||||
//# sourceMappingURL=node-loader.d.ts.map
|
||||
1
dist/loaders/node-loader.d.ts.map
vendored
Normal file
1
dist/loaders/node-loader.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"node-loader.d.ts","sourceRoot":"","sources":["../../src/loaders/node-loader.ts"],"names":[],"mappings":"AAEA,MAAM,WAAW,UAAU;IACzB,WAAW,EAAE,MAAM,CAAC;IACpB,QAAQ,EAAE,MAAM,CAAC;IACjB,SAAS,EAAE,GAAG,CAAC;CAChB;AAED,qBAAa,aAAa;IACxB,OAAO,CAAC,QAAQ,CAAC,aAAa,CAG5B;IAEI,YAAY,IAAI,OAAO,CAAC,UAAU,EAAE,CAAC;YAmB7B,gBAAgB;CAqD/B"}
|
||||
79
dist/loaders/node-loader.js
vendored
Normal file
79
dist/loaders/node-loader.js
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
"use strict";
|
||||
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.N8nNodeLoader = void 0;
|
||||
const path_1 = __importDefault(require("path"));
|
||||
class N8nNodeLoader {
|
||||
constructor() {
|
||||
this.CORE_PACKAGES = [
|
||||
{ name: 'n8n-nodes-base', path: 'n8n-nodes-base' },
|
||||
{ name: '@n8n/n8n-nodes-langchain', path: '@n8n/n8n-nodes-langchain' }
|
||||
];
|
||||
}
|
||||
async loadAllNodes() {
|
||||
const results = [];
|
||||
for (const pkg of this.CORE_PACKAGES) {
|
||||
try {
|
||||
console.log(`\n📦 Loading package: ${pkg.name} from ${pkg.path}`);
|
||||
const packageJson = require(`${pkg.path}/package.json`);
|
||||
console.log(` Found ${Object.keys(packageJson.n8n?.nodes || {}).length} nodes in package.json`);
|
||||
const nodes = await this.loadPackageNodes(pkg.name, pkg.path, packageJson);
|
||||
results.push(...nodes);
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`Failed to load ${pkg.name}:`, error);
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
async loadPackageNodes(packageName, packagePath, packageJson) {
|
||||
const n8nConfig = packageJson.n8n || {};
|
||||
const nodes = [];
|
||||
const nodesList = n8nConfig.nodes || [];
|
||||
if (Array.isArray(nodesList)) {
|
||||
for (const nodePath of nodesList) {
|
||||
try {
|
||||
const fullPath = require.resolve(`${packagePath}/${nodePath}`);
|
||||
const nodeModule = require(fullPath);
|
||||
const nodeNameMatch = nodePath.match(/\/([^\/]+)\.node\.(js|ts)$/);
|
||||
const nodeName = nodeNameMatch ? nodeNameMatch[1] : path_1.default.basename(nodePath, '.node.js');
|
||||
const NodeClass = nodeModule.default || nodeModule[nodeName] || Object.values(nodeModule)[0];
|
||||
if (NodeClass) {
|
||||
nodes.push({ packageName, nodeName, NodeClass });
|
||||
console.log(` ✓ Loaded ${nodeName} from ${packageName}`);
|
||||
}
|
||||
else {
|
||||
console.warn(` ⚠ No valid export found for ${nodeName} in ${packageName}`);
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
console.error(` ✗ Failed to load node from ${packageName}/${nodePath}:`, error.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (const [nodeName, nodePath] of Object.entries(nodesList)) {
|
||||
try {
|
||||
const fullPath = require.resolve(`${packagePath}/${nodePath}`);
|
||||
const nodeModule = require(fullPath);
|
||||
const NodeClass = nodeModule.default || nodeModule[nodeName] || Object.values(nodeModule)[0];
|
||||
if (NodeClass) {
|
||||
nodes.push({ packageName, nodeName, NodeClass });
|
||||
console.log(` ✓ Loaded ${nodeName} from ${packageName}`);
|
||||
}
|
||||
else {
|
||||
console.warn(` ⚠ No valid export found for ${nodeName} in ${packageName}`);
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
console.error(` ✗ Failed to load node ${nodeName} from ${packageName}:`, error.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
return nodes;
|
||||
}
|
||||
}
|
||||
exports.N8nNodeLoader = N8nNodeLoader;
|
||||
//# sourceMappingURL=node-loader.js.map
|
||||
1
dist/loaders/node-loader.js.map
vendored
Normal file
1
dist/loaders/node-loader.js.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"node-loader.js","sourceRoot":"","sources":["../../src/loaders/node-loader.ts"],"names":[],"mappings":";;;;;;AAAA,gDAAwB;AAQxB,MAAa,aAAa;IAA1B;QACmB,kBAAa,GAAG;YAC/B,EAAE,IAAI,EAAE,gBAAgB,EAAE,IAAI,EAAE,gBAAgB,EAAE;YAClD,EAAE,IAAI,EAAE,0BAA0B,EAAE,IAAI,EAAE,0BAA0B,EAAE;SACvE,CAAC;IA0EJ,CAAC;IAxEC,KAAK,CAAC,YAAY;QAChB,MAAM,OAAO,GAAiB,EAAE,CAAC;QAEjC,KAAK,MAAM,GAAG,IAAI,IAAI,CAAC,aAAa,EAAE,CAAC;YACrC,IAAI,CAAC;gBACH,OAAO,CAAC,GAAG,CAAC,yBAAyB,GAAG,CAAC,IAAI,SAAS,GAAG,CAAC,IAAI,EAAE,CAAC,CAAC;gBAElE,MAAM,WAAW,GAAG,OAAO,CAAC,GAAG,GAAG,CAAC,IAAI,eAAe,CAAC,CAAC;gBACxD,OAAO,CAAC,GAAG,CAAC,WAAW,MAAM,CAAC,IAAI,CAAC,WAAW,CAAC,GAAG,EAAE,KAAK,IAAI,EAAE,CAAC,CAAC,MAAM,wBAAwB,CAAC,CAAC;gBACjG,MAAM,KAAK,GAAG,MAAM,IAAI,CAAC,gBAAgB,CAAC,GAAG,CAAC,IAAI,EAAE,GAAG,CAAC,IAAI,EAAE,WAAW,CAAC,CAAC;gBAC3E,OAAO,CAAC,IAAI,CAAC,GAAG,KAAK,CAAC,CAAC;YACzB,CAAC;YAAC,OAAO,KAAK,EAAE,CAAC;gBACf,OAAO,CAAC,KAAK,CAAC,kBAAkB,GAAG,CAAC,IAAI,GAAG,EAAE,KAAK,CAAC,CAAC;YACtD,CAAC;QACH,CAAC;QAED,OAAO,OAAO,CAAC;IACjB,CAAC;IAEO,KAAK,CAAC,gBAAgB,CAAC,WAAmB,EAAE,WAAmB,EAAE,WAAgB;QACvF,MAAM,SAAS,GAAG,WAAW,CAAC,GAAG,IAAI,EAAE,CAAC;QACxC,MAAM,KAAK,GAAiB,EAAE,CAAC;QAG/B,MAAM,SAAS,GAAG,SAAS,CAAC,KAAK,IAAI,EAAE,CAAC;QAExC,IAAI,KAAK,CAAC,OAAO,CAAC,SAAS,CAAC,EAAE,CAAC;YAE7B,KAAK,MAAM,QAAQ,IAAI,SAAS,EAAE,CAAC;gBACjC,IAAI,CAAC;oBACH,MAAM,QAAQ,GAAG,OAAO,CAAC,OAAO,CAAC,GAAG,WAAW,IAAI,QAAQ,EAAE,CAAC,CAAC;oBAC/D,MAAM,UAAU,GAAG,OAAO,CAAC,QAAQ,CAAC,CAAC;oBAGrC,MAAM,aAAa,GAAG,QAAQ,CAAC,KAAK,CAAC,4BAA4B,CAAC,CAAC;oBACnE,MAAM,QAAQ,GAAG,aAAa,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,cAAI,CAAC,QAAQ,CAAC,QAAQ,EAAE,UAAU,CAAC,CAAC;oBAGxF,MAAM,SAAS,GAAG,UAAU,CAAC,OAAO,IAAI,UAAU,CAAC,QAAQ,CAAC,IAAI,MAAM,CAAC,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC;oBAC7F,IAAI,SAAS,EAAE,CAAC;wBACd,KAAK,CAAC,IAAI,CAAC,EAAE,WAAW,EAAE,QAAQ,EAAE,SAAS,EAAE,CAAC,CAAC;wBACjD,OAAO,CAAC,GAAG,CAAC,cAAc,QAAQ,SAAS,WAAW,EAAE,CAAC,CAAC;oBAC5D,CAAC;yBAAM,CAAC;wBACN,OAAO,CAAC,IAAI,CAAC,iCAAiC,QAAQ,OAAO,WAAW,EAAE,CAAC,CAAC;oBAC9E,CAAC;gBACH,CAAC;gBAAC,OAAO,KAAK,EAAE,CAAC;oBACf,OAAO,CAAC,KAAK,CAAC,gCAAgC,WAAW,IAAI,QAAQ,GAAG,EAAG,KAAe,CAAC,OAAO,CAAC,CAAC;gBACtG,CAAC;YACH,CAAC;QACH,CAAC;aAAM,CAAC;YAEN,KAAK,MAAM,CAAC,QAAQ,EAAE,QAAQ,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,SAAS,CAAC,EAAE,CAAC;gBAC7D,IAAI,CAAC;oBACH,MAAM,QAAQ,GAAG,OAAO,CAAC,OAAO,CAAC,GAAG,WAAW,IAAI,QAAkB,EAAE,CAAC,CAAC;oBACzE,MAAM,UAAU,GAAG,OAAO,CAAC,QAAQ,CAAC,CAAC;oBAGrC,MAAM,SAAS,GAAG,UAAU,CAAC,OAAO,IAAI,UAAU,CAAC,QAAQ,CAAC,IAAI,MAAM,CAAC,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC;oBAC7F,IAAI,SAAS,EAAE,CAAC;wBACd,KAAK,CAAC,IAAI,CAAC,EAAE,WAAW,EAAE,QAAQ,EAAE,SAAS,EAAE,CAAC,CAAC;wBACjD,OAAO,CAAC,GAAG,CAAC,cAAc,QAAQ,SAAS,WAAW,EAAE,CAAC,CAAC;oBAC5D,CAAC;yBAAM,CAAC;wBACN,OAAO,CAAC,IAAI,CAAC,iCAAiC,QAAQ,OAAO,WAAW,EAAE,CAAC,CAAC;oBAC9E,CAAC;gBACH,CAAC;gBAAC,OAAO,KAAK,EAAE,CAAC;oBACf,OAAO,CAAC,KAAK,CAAC,2BAA2B,QAAQ,SAAS,WAAW,GAAG,EAAG,KAAe,CAAC,OAAO,CAAC,CAAC;gBACtG,CAAC;YACH,CAAC;QACH,CAAC;QAED,OAAO,KAAK,CAAC;IACf,CAAC;CACF;AA9ED,sCA8EC"}
|
||||
7
dist/mappers/docs-mapper.d.ts
vendored
Normal file
7
dist/mappers/docs-mapper.d.ts
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
export declare class DocsMapper {
|
||||
private docsPath;
|
||||
private readonly KNOWN_FIXES;
|
||||
fetchDocumentation(nodeType: string): Promise<string | null>;
|
||||
private enhanceLoopNodeDocumentation;
|
||||
}
|
||||
//# sourceMappingURL=docs-mapper.d.ts.map
|
||||
1
dist/mappers/docs-mapper.d.ts.map
vendored
Normal file
1
dist/mappers/docs-mapper.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"docs-mapper.d.ts","sourceRoot":"","sources":["../../src/mappers/docs-mapper.ts"],"names":[],"mappings":"AAGA,qBAAa,UAAU;IACrB,OAAO,CAAC,QAAQ,CAAwC;IAGxD,OAAO,CAAC,QAAQ,CAAC,WAAW,CAU1B;IAEI,kBAAkB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;IAkDlE,OAAO,CAAC,4BAA4B;CAmDrC"}
|
||||
106
dist/mappers/docs-mapper.js
vendored
Normal file
106
dist/mappers/docs-mapper.js
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
"use strict";
|
||||
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.DocsMapper = void 0;
|
||||
const fs_1 = require("fs");
|
||||
const path_1 = __importDefault(require("path"));
|
||||
class DocsMapper {
|
||||
constructor() {
|
||||
this.docsPath = path_1.default.join(process.cwd(), 'n8n-docs');
|
||||
this.KNOWN_FIXES = {
|
||||
'httpRequest': 'httprequest',
|
||||
'code': 'code',
|
||||
'webhook': 'webhook',
|
||||
'respondToWebhook': 'respondtowebhook',
|
||||
'n8n-nodes-base.httpRequest': 'httprequest',
|
||||
'n8n-nodes-base.code': 'code',
|
||||
'n8n-nodes-base.webhook': 'webhook',
|
||||
'n8n-nodes-base.respondToWebhook': 'respondtowebhook'
|
||||
};
|
||||
}
|
||||
async fetchDocumentation(nodeType) {
|
||||
const fixedType = this.KNOWN_FIXES[nodeType] || nodeType;
|
||||
const nodeName = fixedType.split('.').pop()?.toLowerCase();
|
||||
if (!nodeName) {
|
||||
console.log(`⚠️ Could not extract node name from: ${nodeType}`);
|
||||
return null;
|
||||
}
|
||||
console.log(`📄 Looking for docs for: ${nodeType} -> ${nodeName}`);
|
||||
const possiblePaths = [
|
||||
`docs/integrations/builtin/core-nodes/n8n-nodes-base.${nodeName}.md`,
|
||||
`docs/integrations/builtin/app-nodes/n8n-nodes-base.${nodeName}.md`,
|
||||
`docs/integrations/builtin/trigger-nodes/n8n-nodes-base.${nodeName}.md`,
|
||||
`docs/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.${nodeName}.md`,
|
||||
`docs/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.${nodeName}.md`,
|
||||
`docs/integrations/builtin/core-nodes/n8n-nodes-base.${nodeName}/index.md`,
|
||||
`docs/integrations/builtin/app-nodes/n8n-nodes-base.${nodeName}/index.md`,
|
||||
`docs/integrations/builtin/trigger-nodes/n8n-nodes-base.${nodeName}/index.md`,
|
||||
`docs/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.${nodeName}/index.md`,
|
||||
`docs/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.${nodeName}/index.md`
|
||||
];
|
||||
for (const relativePath of possiblePaths) {
|
||||
try {
|
||||
const fullPath = path_1.default.join(this.docsPath, relativePath);
|
||||
let content = await fs_1.promises.readFile(fullPath, 'utf-8');
|
||||
console.log(` ✓ Found docs at: ${relativePath}`);
|
||||
content = this.enhanceLoopNodeDocumentation(nodeType, content);
|
||||
return content;
|
||||
}
|
||||
catch (error) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
console.log(` ✗ No docs found for ${nodeName}`);
|
||||
return null;
|
||||
}
|
||||
enhanceLoopNodeDocumentation(nodeType, content) {
|
||||
if (nodeType.includes('splitInBatches')) {
|
||||
const outputGuidance = `
|
||||
|
||||
## CRITICAL OUTPUT CONNECTION INFORMATION
|
||||
|
||||
**⚠️ OUTPUT INDICES ARE COUNTERINTUITIVE ⚠️**
|
||||
|
||||
The SplitInBatches node has TWO outputs with specific indices:
|
||||
- **Output 0 (index 0) = "done"**: Receives final processed data when loop completes
|
||||
- **Output 1 (index 1) = "loop"**: Receives current batch data during iteration
|
||||
|
||||
### Correct Connection Pattern:
|
||||
1. Connect nodes that PROCESS items inside the loop to **Output 1 ("loop")**
|
||||
2. Connect nodes that run AFTER the loop completes to **Output 0 ("done")**
|
||||
3. The last processing node in the loop must connect back to the SplitInBatches node
|
||||
|
||||
### Common Mistake:
|
||||
AI assistants often connect these backwards because the logical flow (loop first, then done) doesn't match the technical indices (done=0, loop=1).
|
||||
|
||||
`;
|
||||
const insertPoint = content.indexOf('## When to use');
|
||||
if (insertPoint > -1) {
|
||||
content = content.slice(0, insertPoint) + outputGuidance + content.slice(insertPoint);
|
||||
}
|
||||
else {
|
||||
content = outputGuidance + '\n' + content;
|
||||
}
|
||||
}
|
||||
if (nodeType.includes('.if')) {
|
||||
const outputGuidance = `
|
||||
|
||||
## Output Connection Information
|
||||
|
||||
The IF node has TWO outputs:
|
||||
- **Output 0 (index 0) = "true"**: Items that match the condition
|
||||
- **Output 1 (index 1) = "false"**: Items that do not match the condition
|
||||
|
||||
`;
|
||||
const insertPoint = content.indexOf('## Node parameters');
|
||||
if (insertPoint > -1) {
|
||||
content = content.slice(0, insertPoint) + outputGuidance + content.slice(insertPoint);
|
||||
}
|
||||
}
|
||||
return content;
|
||||
}
|
||||
}
|
||||
exports.DocsMapper = DocsMapper;
|
||||
//# sourceMappingURL=docs-mapper.js.map
|
||||
1
dist/mappers/docs-mapper.js.map
vendored
Normal file
1
dist/mappers/docs-mapper.js.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"docs-mapper.js","sourceRoot":"","sources":["../../src/mappers/docs-mapper.ts"],"names":[],"mappings":";;;;;;AAAA,2BAAoC;AACpC,gDAAwB;AAExB,MAAa,UAAU;IAAvB;QACU,aAAQ,GAAG,cAAI,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,EAAE,UAAU,CAAC,CAAC;QAGvC,gBAAW,GAA2B;YACrD,aAAa,EAAE,aAAa;YAC5B,MAAM,EAAE,MAAM;YACd,SAAS,EAAE,SAAS;YACpB,kBAAkB,EAAE,kBAAkB;YAEtC,4BAA4B,EAAE,aAAa;YAC3C,qBAAqB,EAAE,MAAM;YAC7B,wBAAwB,EAAE,SAAS;YACnC,iCAAiC,EAAE,kBAAkB;SACtD,CAAC;IAuGJ,CAAC;IArGC,KAAK,CAAC,kBAAkB,CAAC,QAAgB;QAEvC,MAAM,SAAS,GAAG,IAAI,CAAC,WAAW,CAAC,QAAQ,CAAC,IAAI,QAAQ,CAAC;QAGzD,MAAM,QAAQ,GAAG,SAAS,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,GAAG,EAAE,EAAE,WAAW,EAAE,CAAC;QAC3D,IAAI,CAAC,QAAQ,EAAE,CAAC;YACd,OAAO,CAAC,GAAG,CAAC,yCAAyC,QAAQ,EAAE,CAAC,CAAC;YACjE,OAAO,IAAI,CAAC;QACd,CAAC;QAED,OAAO,CAAC,GAAG,CAAC,4BAA4B,QAAQ,OAAO,QAAQ,EAAE,CAAC,CAAC;QAGnE,MAAM,aAAa,GAAG;YAEpB,uDAAuD,QAAQ,KAAK;YACpE,sDAAsD,QAAQ,KAAK;YACnE,0DAA0D,QAAQ,KAAK;YACvE,0EAA0E,QAAQ,KAAK;YACvF,yEAAyE,QAAQ,KAAK;YAEtF,uDAAuD,QAAQ,WAAW;YAC1E,sDAAsD,QAAQ,WAAW;YACzE,0DAA0D,QAAQ,WAAW;YAC7E,0EAA0E,QAAQ,WAAW;YAC7F,yEAAyE,QAAQ,WAAW;SAC7F,CAAC;QAGF,KAAK,MAAM,YAAY,IAAI,aAAa,EAAE,CAAC;YACzC,IAAI,CAAC;gBACH,MAAM,QAAQ,GAAG,cAAI,CAAC,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE,YAAY,CAAC,CAAC;gBACxD,IAAI,OAAO,GAAG,MAAM,aAAE,CAAC,QAAQ,CAAC,QAAQ,EAAE,OAAO,CAAC,CAAC;gBACnD,OAAO,CAAC,GAAG,CAAC,sBAAsB,YAAY,EAAE,CAAC,CAAC;gBAGlD,OAAO,GAAG,IAAI,CAAC,4BAA4B,CAAC,QAAQ,EAAE,OAAO,CAAC,CAAC;gBAE/D,OAAO,OAAO,CAAC;YACjB,CAAC;YAAC,OAAO,KAAK,EAAE,CAAC;gBAEf,SAAS;YACX,CAAC;QACH,CAAC;QAED,OAAO,CAAC,GAAG,CAAC,yBAAyB,QAAQ,EAAE,CAAC,CAAC;QACjD,OAAO,IAAI,CAAC;IACd,CAAC;IAEO,4BAA4B,CAAC,QAAgB,EAAE,OAAe;QAEpE,IAAI,QAAQ,CAAC,QAAQ,CAAC,gBAAgB,CAAC,EAAE,CAAC;YACxC,MAAM,cAAc,GAAG;;;;;;;;;;;;;;;;;;CAkB5B,CAAC;YAEI,MAAM,WAAW,GAAG,OAAO,CAAC,OAAO,CAAC,gBAAgB,CAAC,CAAC;YACtD,IAAI,WAAW,GAAG,CAAC,CAAC,EAAE,CAAC;gBACrB,OAAO,GAAG,OAAO,CAAC,KAAK,CAAC,CAAC,EAAE,WAAW,CAAC,GAAG,cAAc,GAAG,OAAO,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC;YACxF,CAAC;iBAAM,CAAC;gBAEN,OAAO,GAAG,cAAc,GAAG,IAAI,GAAG,OAAO,CAAC;YAC5C,CAAC;QACH,CAAC;QAGD,IAAI,QAAQ,CAAC,QAAQ,CAAC,KAAK,CAAC,EAAE,CAAC;YAC7B,MAAM,cAAc,GAAG;;;;;;;;CAQ5B,CAAC;YACI,MAAM,WAAW,GAAG,OAAO,CAAC,OAAO,CAAC,oBAAoB,CAAC,CAAC;YAC1D,IAAI,WAAW,GAAG,CAAC,CAAC,EAAE,CAAC;gBACrB,OAAO,GAAG,OAAO,CAAC,KAAK,CAAC,CAAC,EAAE,WAAW,CAAC,GAAG,cAAc,GAAG,OAAO,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC;YACxF,CAAC;QACH,CAAC;QAED,OAAO,OAAO,CAAC;IACjB,CAAC;CACF;AArHD,gCAqHC"}
|
||||
36
dist/mcp-engine.d.ts
vendored
Normal file
36
dist/mcp-engine.d.ts
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
import { Request, Response } from 'express';
|
||||
import { InstanceContext } from './types/instance-context';
|
||||
import { SessionState } from './types/session-state';
|
||||
export interface EngineHealth {
|
||||
status: 'healthy' | 'unhealthy';
|
||||
uptime: number;
|
||||
sessionActive: boolean;
|
||||
memoryUsage: {
|
||||
used: number;
|
||||
total: number;
|
||||
unit: string;
|
||||
};
|
||||
version: string;
|
||||
}
|
||||
export interface EngineOptions {
|
||||
sessionTimeout?: number;
|
||||
logLevel?: 'error' | 'warn' | 'info' | 'debug';
|
||||
}
|
||||
export declare class N8NMCPEngine {
|
||||
private server;
|
||||
private startTime;
|
||||
constructor(options?: EngineOptions);
|
||||
processRequest(req: Request, res: Response, instanceContext?: InstanceContext): Promise<void>;
|
||||
healthCheck(): Promise<EngineHealth>;
|
||||
getSessionInfo(): {
|
||||
active: boolean;
|
||||
sessionId?: string;
|
||||
age?: number;
|
||||
};
|
||||
exportSessionState(): SessionState[];
|
||||
restoreSessionState(sessions: SessionState[]): number;
|
||||
shutdown(): Promise<void>;
|
||||
start(): Promise<void>;
|
||||
}
|
||||
export default N8NMCPEngine;
|
||||
//# sourceMappingURL=mcp-engine.d.ts.map
|
||||
1
dist/mcp-engine.d.ts.map
vendored
Normal file
1
dist/mcp-engine.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"mcp-engine.d.ts","sourceRoot":"","sources":["../src/mcp-engine.ts"],"names":[],"mappings":"AAOA,OAAO,EAAE,OAAO,EAAE,QAAQ,EAAE,MAAM,SAAS,CAAC;AAG5C,OAAO,EAAE,eAAe,EAAE,MAAM,0BAA0B,CAAC;AAC3D,OAAO,EAAE,YAAY,EAAE,MAAM,uBAAuB,CAAC;AAErD,MAAM,WAAW,YAAY;IAC3B,MAAM,EAAE,SAAS,GAAG,WAAW,CAAC;IAChC,MAAM,EAAE,MAAM,CAAC;IACf,aAAa,EAAE,OAAO,CAAC;IACvB,WAAW,EAAE;QACX,IAAI,EAAE,MAAM,CAAC;QACb,KAAK,EAAE,MAAM,CAAC;QACd,IAAI,EAAE,MAAM,CAAC;KACd,CAAC;IACF,OAAO,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,aAAa;IAC5B,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,QAAQ,CAAC,EAAE,OAAO,GAAG,MAAM,GAAG,MAAM,GAAG,OAAO,CAAC;CAChD;AAED,qBAAa,YAAY;IACvB,OAAO,CAAC,MAAM,CAA0B;IACxC,OAAO,CAAC,SAAS,CAAO;gBAEZ,OAAO,GAAE,aAAkB;IA8BjC,cAAc,CAClB,GAAG,EAAE,OAAO,EACZ,GAAG,EAAE,QAAQ,EACb,eAAe,CAAC,EAAE,eAAe,GAChC,OAAO,CAAC,IAAI,CAAC;IAkBV,WAAW,IAAI,OAAO,CAAC,YAAY,CAAC;IAgC1C,cAAc,IAAI;QAAE,MAAM,EAAE,OAAO,CAAC;QAAC,SAAS,CAAC,EAAE,MAAM,CAAC;QAAC,GAAG,CAAC,EAAE,MAAM,CAAA;KAAE;IAoBvE,kBAAkB,IAAI,YAAY,EAAE;IAwBpC,mBAAmB,CAAC,QAAQ,EAAE,YAAY,EAAE,GAAG,MAAM;IAiB/C,QAAQ,IAAI,OAAO,CAAC,IAAI,CAAC;IASzB,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;CAG7B;AA2CD,eAAe,YAAY,CAAC"}
|
||||
77
dist/mcp-engine.js
vendored
Normal file
77
dist/mcp-engine.js
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.N8NMCPEngine = void 0;
|
||||
const http_server_single_session_1 = require("./http-server-single-session");
|
||||
const logger_1 = require("./utils/logger");
|
||||
class N8NMCPEngine {
|
||||
constructor(options = {}) {
|
||||
this.server = new http_server_single_session_1.SingleSessionHTTPServer();
|
||||
this.startTime = new Date();
|
||||
if (options.logLevel) {
|
||||
process.env.LOG_LEVEL = options.logLevel;
|
||||
}
|
||||
}
|
||||
async processRequest(req, res, instanceContext) {
|
||||
try {
|
||||
await this.server.handleRequest(req, res, instanceContext);
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error('Engine processRequest error:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
async healthCheck() {
|
||||
try {
|
||||
const sessionInfo = this.server.getSessionInfo();
|
||||
const memoryUsage = process.memoryUsage();
|
||||
return {
|
||||
status: 'healthy',
|
||||
uptime: Math.floor((Date.now() - this.startTime.getTime()) / 1000),
|
||||
sessionActive: sessionInfo.active,
|
||||
memoryUsage: {
|
||||
used: Math.round(memoryUsage.heapUsed / 1024 / 1024),
|
||||
total: Math.round(memoryUsage.heapTotal / 1024 / 1024),
|
||||
unit: 'MB'
|
||||
},
|
||||
version: '2.24.1'
|
||||
};
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error('Health check failed:', error);
|
||||
return {
|
||||
status: 'unhealthy',
|
||||
uptime: 0,
|
||||
sessionActive: false,
|
||||
memoryUsage: { used: 0, total: 0, unit: 'MB' },
|
||||
version: '2.24.1'
|
||||
};
|
||||
}
|
||||
}
|
||||
getSessionInfo() {
|
||||
return this.server.getSessionInfo();
|
||||
}
|
||||
exportSessionState() {
|
||||
if (!this.server) {
|
||||
logger_1.logger.warn('Cannot export sessions: server not initialized');
|
||||
return [];
|
||||
}
|
||||
return this.server.exportSessionState();
|
||||
}
|
||||
restoreSessionState(sessions) {
|
||||
if (!this.server) {
|
||||
logger_1.logger.warn('Cannot restore sessions: server not initialized');
|
||||
return 0;
|
||||
}
|
||||
return this.server.restoreSessionState(sessions);
|
||||
}
|
||||
async shutdown() {
|
||||
logger_1.logger.info('Shutting down N8N MCP Engine...');
|
||||
await this.server.shutdown();
|
||||
}
|
||||
async start() {
|
||||
await this.server.start();
|
||||
}
|
||||
}
|
||||
exports.N8NMCPEngine = N8NMCPEngine;
|
||||
exports.default = N8NMCPEngine;
|
||||
//# sourceMappingURL=mcp-engine.js.map
|
||||
1
dist/mcp-engine.js.map
vendored
Normal file
1
dist/mcp-engine.js.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"mcp-engine.js","sourceRoot":"","sources":["../src/mcp-engine.ts"],"names":[],"mappings":";;;AAQA,6EAAuE;AACvE,2CAAwC;AAqBxC,MAAa,YAAY;IAIvB,YAAY,UAAyB,EAAE;QACrC,IAAI,CAAC,MAAM,GAAG,IAAI,oDAAuB,EAAE,CAAC;QAC5C,IAAI,CAAC,SAAS,GAAG,IAAI,IAAI,EAAE,CAAC;QAE5B,IAAI,OAAO,CAAC,QAAQ,EAAE,CAAC;YACrB,OAAO,CAAC,GAAG,CAAC,SAAS,GAAG,OAAO,CAAC,QAAQ,CAAC;QAC3C,CAAC;IACH,CAAC;IAuBD,KAAK,CAAC,cAAc,CAClB,GAAY,EACZ,GAAa,EACb,eAAiC;QAEjC,IAAI,CAAC;YACH,MAAM,IAAI,CAAC,MAAM,CAAC,aAAa,CAAC,GAAG,EAAE,GAAG,EAAE,eAAe,CAAC,CAAC;QAC7D,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YACf,eAAM,CAAC,KAAK,CAAC,8BAA8B,EAAE,KAAK,CAAC,CAAC;YACpD,MAAM,KAAK,CAAC;QACd,CAAC;IACH,CAAC;IAWD,KAAK,CAAC,WAAW;QACf,IAAI,CAAC;YACH,MAAM,WAAW,GAAG,IAAI,CAAC,MAAM,CAAC,cAAc,EAAE,CAAC;YACjD,MAAM,WAAW,GAAG,OAAO,CAAC,WAAW,EAAE,CAAC;YAE1C,OAAO;gBACL,MAAM,EAAE,SAAS;gBACjB,MAAM,EAAE,IAAI,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,GAAG,EAAE,GAAG,IAAI,CAAC,SAAS,CAAC,OAAO,EAAE,CAAC,GAAG,IAAI,CAAC;gBAClE,aAAa,EAAE,WAAW,CAAC,MAAM;gBACjC,WAAW,EAAE;oBACX,IAAI,EAAE,IAAI,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,GAAG,IAAI,GAAG,IAAI,CAAC;oBACpD,KAAK,EAAE,IAAI,CAAC,KAAK,CAAC,WAAW,CAAC,SAAS,GAAG,IAAI,GAAG,IAAI,CAAC;oBACtD,IAAI,EAAE,IAAI;iBACX;gBACD,OAAO,EAAE,QAAQ;aAClB,CAAC;QACJ,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YACf,eAAM,CAAC,KAAK,CAAC,sBAAsB,EAAE,KAAK,CAAC,CAAC;YAC5C,OAAO;gBACL,MAAM,EAAE,WAAW;gBACnB,MAAM,EAAE,CAAC;gBACT,aAAa,EAAE,KAAK;gBACpB,WAAW,EAAE,EAAE,IAAI,EAAE,CAAC,EAAE,KAAK,EAAE,CAAC,EAAE,IAAI,EAAE,IAAI,EAAE;gBAC9C,OAAO,EAAE,QAAQ;aAClB,CAAC;QACJ,CAAC;IACH,CAAC;IAMD,cAAc;QACZ,OAAO,IAAI,CAAC,MAAM,CAAC,cAAc,EAAE,CAAC;IACtC,CAAC;IAkBD,kBAAkB;QAChB,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;YACjB,eAAM,CAAC,IAAI,CAAC,gDAAgD,CAAC,CAAC;YAC9D,OAAO,EAAE,CAAC;QACZ,CAAC;QACD,OAAO,IAAI,CAAC,MAAM,CAAC,kBAAkB,EAAE,CAAC;IAC1C,CAAC;IAkBD,mBAAmB,CAAC,QAAwB;QAC1C,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;YACjB,eAAM,CAAC,IAAI,CAAC,iDAAiD,CAAC,CAAC;YAC/D,OAAO,CAAC,CAAC;QACX,CAAC;QACD,OAAO,IAAI,CAAC,MAAM,CAAC,mBAAmB,CAAC,QAAQ,CAAC,CAAC;IACnD,CAAC;IAWD,KAAK,CAAC,QAAQ;QACZ,eAAM,CAAC,IAAI,CAAC,iCAAiC,CAAC,CAAC;QAC/C,MAAM,IAAI,CAAC,MAAM,CAAC,QAAQ,EAAE,CAAC;IAC/B,CAAC;IAMD,KAAK,CAAC,KAAK;QACT,MAAM,IAAI,CAAC,MAAM,CAAC,KAAK,EAAE,CAAC;IAC5B,CAAC;CACF;AAjKD,oCAiKC;AA2CD,kBAAe,YAAY,CAAC"}
|
||||
47
dist/mcp-tools-engine.d.ts
vendored
Normal file
47
dist/mcp-tools-engine.d.ts
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
import { NodeRepository } from './database/node-repository';
|
||||
import { WorkflowValidationResult } from './services/workflow-validator';
|
||||
export declare class MCPEngine {
|
||||
private repository;
|
||||
private workflowValidator;
|
||||
constructor(repository: NodeRepository);
|
||||
listNodes(args?: any): Promise<any[]>;
|
||||
searchNodes(args: any): Promise<any[]>;
|
||||
getNodeInfo(args: any): Promise<any>;
|
||||
getNodeEssentials(args: any): Promise<{
|
||||
nodeType: any;
|
||||
displayName: any;
|
||||
description: any;
|
||||
category: any;
|
||||
required: import("./services/property-filter").SimplifiedProperty[];
|
||||
common: import("./services/property-filter").SimplifiedProperty[];
|
||||
} | null>;
|
||||
getNodeDocumentation(args: any): Promise<any>;
|
||||
validateNodeOperation(args: any): Promise<import("./services/config-validator").ValidationResult | {
|
||||
valid: boolean;
|
||||
errors: {
|
||||
type: string;
|
||||
property: string;
|
||||
message: string;
|
||||
}[];
|
||||
warnings: never[];
|
||||
suggestions: never[];
|
||||
visibleProperties: never[];
|
||||
hiddenProperties: never[];
|
||||
}>;
|
||||
validateNodeMinimal(args: any): Promise<{
|
||||
missingFields: never[];
|
||||
error: string;
|
||||
} | {
|
||||
missingFields: string[];
|
||||
error?: undefined;
|
||||
}>;
|
||||
searchNodeProperties(args: any): Promise<any[]>;
|
||||
listAITools(args: any): Promise<any[]>;
|
||||
getDatabaseStatistics(args: any): Promise<{
|
||||
totalNodes: number;
|
||||
aiToolsCount: number;
|
||||
categories: string[];
|
||||
}>;
|
||||
validateWorkflow(args: any): Promise<WorkflowValidationResult>;
|
||||
}
|
||||
//# sourceMappingURL=mcp-tools-engine.d.ts.map
|
||||
1
dist/mcp-tools-engine.d.ts.map
vendored
Normal file
1
dist/mcp-tools-engine.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"mcp-tools-engine.d.ts","sourceRoot":"","sources":["../src/mcp-tools-engine.ts"],"names":[],"mappings":"AAIA,OAAO,EAAE,cAAc,EAAE,MAAM,4BAA4B,CAAC;AAK5D,OAAO,EAAqB,wBAAwB,EAAE,MAAM,+BAA+B,CAAC;AAE5F,qBAAa,SAAS;IAGR,OAAO,CAAC,UAAU;IAF9B,OAAO,CAAC,iBAAiB,CAAoB;gBAEzB,UAAU,EAAE,cAAc;IAIxC,SAAS,CAAC,IAAI,GAAE,GAAQ;IAIxB,WAAW,CAAC,IAAI,EAAE,GAAG;IAIrB,WAAW,CAAC,IAAI,EAAE,GAAG;IAIrB,iBAAiB,CAAC,IAAI,EAAE,GAAG;;;;;;;;IAgB3B,oBAAoB,CAAC,IAAI,EAAE,GAAG;IAK9B,qBAAqB,CAAC,IAAI,EAAE,GAAG;;;;;;;;;;;;IAqB/B,mBAAmB,CAAC,IAAI,EAAE,GAAG;;;;;;;IAmB7B,oBAAoB,CAAC,IAAI,EAAE,GAAG;IAI9B,WAAW,CAAC,IAAI,EAAE,GAAG;IAIrB,qBAAqB,CAAC,IAAI,EAAE,GAAG;;;;;IAU/B,gBAAgB,CAAC,IAAI,EAAE,GAAG,GAAG,OAAO,CAAC,wBAAwB,CAAC;CAGrE"}
|
||||
89
dist/mcp-tools-engine.js
vendored
Normal file
89
dist/mcp-tools-engine.js
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.MCPEngine = void 0;
|
||||
const property_filter_1 = require("./services/property-filter");
|
||||
const config_validator_1 = require("./services/config-validator");
|
||||
const enhanced_config_validator_1 = require("./services/enhanced-config-validator");
|
||||
const workflow_validator_1 = require("./services/workflow-validator");
|
||||
class MCPEngine {
|
||||
constructor(repository) {
|
||||
this.repository = repository;
|
||||
this.workflowValidator = new workflow_validator_1.WorkflowValidator(repository, enhanced_config_validator_1.EnhancedConfigValidator);
|
||||
}
|
||||
async listNodes(args = {}) {
|
||||
return this.repository.getAllNodes(args.limit);
|
||||
}
|
||||
async searchNodes(args) {
|
||||
return this.repository.searchNodes(args.query, args.mode || 'OR', args.limit || 20);
|
||||
}
|
||||
async getNodeInfo(args) {
|
||||
return this.repository.getNodeByType(args.nodeType);
|
||||
}
|
||||
async getNodeEssentials(args) {
|
||||
const node = await this.repository.getNodeByType(args.nodeType);
|
||||
if (!node)
|
||||
return null;
|
||||
const essentials = property_filter_1.PropertyFilter.getEssentials(node.properties || [], args.nodeType);
|
||||
return {
|
||||
nodeType: node.nodeType,
|
||||
displayName: node.displayName,
|
||||
description: node.description,
|
||||
category: node.category,
|
||||
required: essentials.required,
|
||||
common: essentials.common
|
||||
};
|
||||
}
|
||||
async getNodeDocumentation(args) {
|
||||
const node = await this.repository.getNodeByType(args.nodeType);
|
||||
return node?.documentation || null;
|
||||
}
|
||||
async validateNodeOperation(args) {
|
||||
const node = await this.repository.getNodeByType(args.nodeType);
|
||||
if (!node) {
|
||||
return {
|
||||
valid: false,
|
||||
errors: [{ type: 'invalid_configuration', property: '', message: 'Node type not found' }],
|
||||
warnings: [],
|
||||
suggestions: [],
|
||||
visibleProperties: [],
|
||||
hiddenProperties: []
|
||||
};
|
||||
}
|
||||
const userProvidedKeys = new Set(Object.keys(args.config || {}));
|
||||
return config_validator_1.ConfigValidator.validate(args.nodeType, args.config, node.properties || [], userProvidedKeys);
|
||||
}
|
||||
async validateNodeMinimal(args) {
|
||||
const node = await this.repository.getNodeByType(args.nodeType);
|
||||
if (!node) {
|
||||
return { missingFields: [], error: 'Node type not found' };
|
||||
}
|
||||
const missingFields = [];
|
||||
const requiredFields = property_filter_1.PropertyFilter.getEssentials(node.properties || [], args.nodeType).required;
|
||||
for (const field of requiredFields) {
|
||||
if (!args.config[field.name]) {
|
||||
missingFields.push(field.name);
|
||||
}
|
||||
}
|
||||
return { missingFields };
|
||||
}
|
||||
async searchNodeProperties(args) {
|
||||
return this.repository.searchNodeProperties(args.nodeType, args.query, args.maxResults || 20);
|
||||
}
|
||||
async listAITools(args) {
|
||||
return this.repository.getAIToolNodes();
|
||||
}
|
||||
async getDatabaseStatistics(args) {
|
||||
const count = await this.repository.getNodeCount();
|
||||
const aiTools = await this.repository.getAIToolNodes();
|
||||
return {
|
||||
totalNodes: count,
|
||||
aiToolsCount: aiTools.length,
|
||||
categories: ['trigger', 'transform', 'output', 'input']
|
||||
};
|
||||
}
|
||||
async validateWorkflow(args) {
|
||||
return this.workflowValidator.validateWorkflow(args.workflow, args.options);
|
||||
}
|
||||
}
|
||||
exports.MCPEngine = MCPEngine;
|
||||
//# sourceMappingURL=mcp-tools-engine.js.map
|
||||
1
dist/mcp-tools-engine.js.map
vendored
Normal file
1
dist/mcp-tools-engine.js.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"mcp-tools-engine.js","sourceRoot":"","sources":["../src/mcp-tools-engine.ts"],"names":[],"mappings":";;;AAKA,gEAA4D;AAE5D,kEAA8D;AAC9D,oFAA+E;AAC/E,sEAA4F;AAE5F,MAAa,SAAS;IAGpB,YAAoB,UAA0B;QAA1B,eAAU,GAAV,UAAU,CAAgB;QAC5C,IAAI,CAAC,iBAAiB,GAAG,IAAI,sCAAiB,CAAC,UAAU,EAAE,mDAAuB,CAAC,CAAC;IACtF,CAAC;IAED,KAAK,CAAC,SAAS,CAAC,OAAY,EAAE;QAC5B,OAAO,IAAI,CAAC,UAAU,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;IACjD,CAAC;IAED,KAAK,CAAC,WAAW,CAAC,IAAS;QACzB,OAAO,IAAI,CAAC,UAAU,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE,IAAI,CAAC,IAAI,IAAI,IAAI,EAAE,IAAI,CAAC,KAAK,IAAI,EAAE,CAAC,CAAC;IACtF,CAAC;IAED,KAAK,CAAC,WAAW,CAAC,IAAS;QACzB,OAAO,IAAI,CAAC,UAAU,CAAC,aAAa,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;IACtD,CAAC;IAED,KAAK,CAAC,iBAAiB,CAAC,IAAS;QAC/B,MAAM,IAAI,GAAG,MAAM,IAAI,CAAC,UAAU,CAAC,aAAa,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;QAChE,IAAI,CAAC,IAAI;YAAE,OAAO,IAAI,CAAC;QAGvB,MAAM,UAAU,GAAG,gCAAc,CAAC,aAAa,CAAC,IAAI,CAAC,UAAU,IAAI,EAAE,EAAE,IAAI,CAAC,QAAQ,CAAC,CAAC;QACtF,OAAO;YACL,QAAQ,EAAE,IAAI,CAAC,QAAQ;YACvB,WAAW,EAAE,IAAI,CAAC,WAAW;YAC7B,WAAW,EAAE,IAAI,CAAC,WAAW;YAC7B,QAAQ,EAAE,IAAI,CAAC,QAAQ;YACvB,QAAQ,EAAE,UAAU,CAAC,QAAQ;YAC7B,MAAM,EAAE,UAAU,CAAC,MAAM;SAC1B,CAAC;IACJ,CAAC;IAED,KAAK,CAAC,oBAAoB,CAAC,IAAS;QAClC,MAAM,IAAI,GAAG,MAAM,IAAI,CAAC,UAAU,CAAC,aAAa,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;QAChE,OAAO,IAAI,EAAE,aAAa,IAAI,IAAI,CAAC;IACrC,CAAC;IAED,KAAK,CAAC,qBAAqB,CAAC,IAAS;QAEnC,MAAM,IAAI,GAAG,MAAM,IAAI,CAAC,UAAU,CAAC,aAAa,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;QAChE,IAAI,CAAC,IAAI,EAAE,CAAC;YACV,OAAO;gBACL,KAAK,EAAE,KAAK;gBACZ,MAAM,EAAE,CAAC,EAAE,IAAI,EAAE,uBAAuB,EAAE,QAAQ,EAAE,EAAE,EAAE,OAAO,EAAE,qBAAqB,EAAE,CAAC;gBACzF,QAAQ,EAAE,EAAE;gBACZ,WAAW,EAAE,EAAE;gBACf,iBAAiB,EAAE,EAAE;gBACrB,gBAAgB,EAAE,EAAE;aACrB,CAAC;QACJ,CAAC;QAID,MAAM,gBAAgB,GAAG,IAAI,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,MAAM,IAAI,EAAE,CAAC,CAAC,CAAC;QAEjE,OAAO,kCAAe,CAAC,QAAQ,CAAC,IAAI,CAAC,QAAQ,EAAE,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,UAAU,IAAI,EAAE,EAAE,gBAAgB,CAAC,CAAC;IACvG,CAAC;IAED,KAAK,CAAC,mBAAmB,CAAC,IAAS;QAEjC,MAAM,IAAI,GAAG,MAAM,IAAI,CAAC,UAAU,CAAC,aAAa,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;QAChE,IAAI,CAAC,IAAI,EAAE,CAAC;YACV,OAAO,EAAE,aAAa,EAAE,EAAE,EAAE,KAAK,EAAE,qBAAqB,EAAE,CAAC;QAC7D,CAAC;QAED,MAAM,aAAa,GAAa,EAAE,CAAC;QACnC,MAAM,cAAc,GAAG,gCAAc,CAAC,aAAa,CAAC,IAAI,CAAC,UAAU,IAAI,EAAE,EAAE,IAAI,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC;QAEnG,KAAK,MAAM,KAAK,IAAI,cAAc,EAAE,CAAC;YACnC,IAAI,CAAC,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC;gBAC7B,aAAa,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC;YACjC,CAAC;QACH,CAAC;QAED,OAAO,EAAE,aAAa,EAAE,CAAC;IAC3B,CAAC;IAED,KAAK,CAAC,oBAAoB,CAAC,IAAS;QAClC,OAAO,IAAI,CAAC,UAAU,CAAC,oBAAoB,CAAC,IAAI,CAAC,QAAQ,EAAE,IAAI,CAAC,KAAK,EAAE,IAAI,CAAC,UAAU,IAAI,EAAE,CAAC,CAAC;IAChG,CAAC;IAED,KAAK,CAAC,WAAW,CAAC,IAAS;QACzB,OAAO,IAAI,CAAC,UAAU,CAAC,cAAc,EAAE,CAAC;IAC1C,CAAC;IAED,KAAK,CAAC,qBAAqB,CAAC,IAAS;QACnC,MAAM,KAAK,GAAG,MAAM,IAAI,CAAC,UAAU,CAAC,YAAY,EAAE,CAAC;QACnD,MAAM,OAAO,GAAG,MAAM,IAAI,CAAC,UAAU,CAAC,cAAc,EAAE,CAAC;QACvD,OAAO;YACL,UAAU,EAAE,KAAK;YACjB,YAAY,EAAE,OAAO,CAAC,MAAM;YAC5B,UAAU,EAAE,CAAC,SAAS,EAAE,WAAW,EAAE,QAAQ,EAAE,OAAO,CAAC;SACxD,CAAC;IACJ,CAAC;IAED,KAAK,CAAC,gBAAgB,CAAC,IAAS;QAC9B,OAAO,IAAI,CAAC,iBAAiB,CAAC,gBAAgB,CAAC,IAAI,CAAC,QAAQ,EAAE,IAAI,CAAC,OAAO,CAAC,CAAC;IAC9E,CAAC;CACF;AArGD,8BAqGC"}
|
||||
29
dist/mcp/handlers-n8n-manager.d.ts
vendored
Normal file
29
dist/mcp/handlers-n8n-manager.d.ts
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
import { N8nApiClient } from '../services/n8n-api-client';
|
||||
import { McpToolResponse } from '../types/n8n-api';
|
||||
import { NodeRepository } from '../database/node-repository';
|
||||
import { InstanceContext } from '../types/instance-context';
|
||||
import { TemplateService } from '../templates/template-service';
|
||||
export declare function getInstanceCacheStatistics(): string;
|
||||
export declare function getInstanceCacheMetrics(): import("../utils/cache-utils").CacheMetrics;
|
||||
export declare function clearInstanceCache(): void;
|
||||
export declare function getN8nApiClient(context?: InstanceContext): N8nApiClient | null;
|
||||
export declare function handleCreateWorkflow(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleGetWorkflow(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleGetWorkflowDetails(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleGetWorkflowStructure(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleGetWorkflowMinimal(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleUpdateWorkflow(args: unknown, repository: NodeRepository, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleDeleteWorkflow(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleListWorkflows(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleValidateWorkflow(args: unknown, repository: NodeRepository, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleAutofixWorkflow(args: unknown, repository: NodeRepository, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleTestWorkflow(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleGetExecution(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleListExecutions(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleDeleteExecution(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleHealthCheck(context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleDiagnostic(request: any, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleWorkflowVersions(args: unknown, repository: NodeRepository, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleDeployTemplate(args: unknown, templateService: TemplateService, repository: NodeRepository, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleTriggerWebhookWorkflow(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
//# sourceMappingURL=handlers-n8n-manager.d.ts.map
|
||||
1
dist/mcp/handlers-n8n-manager.d.ts.map
vendored
Normal file
1
dist/mcp/handlers-n8n-manager.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"handlers-n8n-manager.d.ts","sourceRoot":"","sources":["../../src/mcp/handlers-n8n-manager.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,MAAM,4BAA4B,CAAC;AAE1D,OAAO,EAML,eAAe,EAGhB,MAAM,kBAAkB,CAAC;AAkB1B,OAAO,EAAE,cAAc,EAAE,MAAM,6BAA6B,CAAC;AAC7D,OAAO,EAAE,eAAe,EAA2B,MAAM,2BAA2B,CAAC;AAOrF,OAAO,EAAE,eAAe,EAAE,MAAM,+BAA+B,CAAC;AAqNhE,wBAAgB,0BAA0B,IAAI,MAAM,CAEnD;AAMD,wBAAgB,uBAAuB,gDAEtC;AAKD,wBAAgB,kBAAkB,IAAI,IAAI,CAIzC;AAED,wBAAgB,eAAe,CAAC,OAAO,CAAC,EAAE,eAAe,GAAG,YAAY,GAAG,IAAI,CAgF9E;AAqHD,wBAAsB,oBAAoB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAmF7G;AAED,wBAAsB,iBAAiB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAiC1G;AAED,wBAAsB,wBAAwB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAoDjH;AAED,wBAAsB,0BAA0B,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAmDnH;AAED,wBAAsB,wBAAwB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAyCjH;AAED,wBAAsB,oBAAoB,CACxC,IAAI,EAAE,OAAO,EACb,UAAU,EAAE,cAAc,EAC1B,OAAO,CAAC,EAAE,eAAe,GACxB,OAAO,CAAC,eAAe,CAAC,CA8H1B;AAeD,wBAAsB,oBAAoB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAsC7G;AAED,wBAAsB,mBAAmB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAiE5G;AAED,wBAAsB,sBAAsB,CAC1C,IAAI,EAAE,OAAO,EACb,UAAU,EAAE,cAAc,EAC1B,OAAO,CAAC,EAAE,eAAe,GACxB,OAAO,CAAC,eAAe,CAAC,CA0F1B;AAED,wBAAsB,qBAAqB,CACzC,IAAI,EAAE,OAAO,EACb,UAAU,EAAE,cAAc,EAC1B,OAAO,CAAC,EAAE,eAAe,GACxB,OAAO,CAAC,eAAe,CAAC,CAoK1B;AAQD,wBAAsB,kBAAkB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAwJ3G;AAED,wBAAsB,kBAAkB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CA8H3G;AAED,wBAAsB,oBAAoB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAgD7G;AAED,wBAAsB,qBAAqB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAiC9G;AAID,wBAAsB,iBAAiB,CAAC,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAwG3F;AAkLD,wBAAsB,gBAAgB,CAAC,OAAO,EAAE,GAAG,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAkQxG;AAED,wBAAsB,sBAAsB,CAC1C,IAAI,EAAE,OAAO,EACb,UAAU,EAAE,cAAc,EAC1B,OAAO,CAAC,EAAE,eAAe,GACxB,OAAO,CAAC,eAAe,CAAC,CAsL1B;AA+BD,wBAAsB,oBAAoB,CACxC,IAAI,EAAE,OAAO,EACb,eAAe,EAAE,eAAe,EAChC,UAAU,EAAE,cAAc,EAC1B,OAAO,CAAC,EAAE,eAAe,GACxB,OAAO,CAAC,eAAe,CAAC,CAoM1B;AAQD,wBAAsB,4BAA4B,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAyErH"}
|
||||
2026
dist/mcp/handlers-n8n-manager.js
vendored
Normal file
2026
dist/mcp/handlers-n8n-manager.js
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
dist/mcp/handlers-n8n-manager.js.map
vendored
Normal file
1
dist/mcp/handlers-n8n-manager.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
5
dist/mcp/handlers-workflow-diff.d.ts
vendored
Normal file
5
dist/mcp/handlers-workflow-diff.d.ts
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
import { McpToolResponse } from '../types/n8n-api';
|
||||
import { InstanceContext } from '../types/instance-context';
|
||||
import { NodeRepository } from '../database/node-repository';
|
||||
export declare function handleUpdatePartialWorkflow(args: unknown, repository: NodeRepository, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
//# sourceMappingURL=handlers-workflow-diff.d.ts.map
|
||||
1
dist/mcp/handlers-workflow-diff.d.ts.map
vendored
Normal file
1
dist/mcp/handlers-workflow-diff.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"handlers-workflow-diff.d.ts","sourceRoot":"","sources":["../../src/mcp/handlers-workflow-diff.ts"],"names":[],"mappings":"AAMA,OAAO,EAAE,eAAe,EAAE,MAAM,kBAAkB,CAAC;AAMnD,OAAO,EAAE,eAAe,EAAE,MAAM,2BAA2B,CAAC;AAE5D,OAAO,EAAE,cAAc,EAAE,MAAM,6BAA6B,CAAC;AA0D7D,wBAAsB,2BAA2B,CAC/C,IAAI,EAAE,OAAO,EACb,UAAU,EAAE,cAAc,EAC1B,OAAO,CAAC,EAAE,eAAe,GACxB,OAAO,CAAC,eAAe,CAAC,CA6V1B"}
|
||||
461
dist/mcp/handlers-workflow-diff.js
vendored
Normal file
461
dist/mcp/handlers-workflow-diff.js
vendored
Normal file
@@ -0,0 +1,461 @@
|
||||
"use strict";
|
||||
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
||||
if (k2 === undefined) k2 = k;
|
||||
var desc = Object.getOwnPropertyDescriptor(m, k);
|
||||
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
||||
desc = { enumerable: true, get: function() { return m[k]; } };
|
||||
}
|
||||
Object.defineProperty(o, k2, desc);
|
||||
}) : (function(o, m, k, k2) {
|
||||
if (k2 === undefined) k2 = k;
|
||||
o[k2] = m[k];
|
||||
}));
|
||||
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
||||
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
||||
}) : function(o, v) {
|
||||
o["default"] = v;
|
||||
});
|
||||
var __importStar = (this && this.__importStar) || (function () {
|
||||
var ownKeys = function(o) {
|
||||
ownKeys = Object.getOwnPropertyNames || function (o) {
|
||||
var ar = [];
|
||||
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
||||
return ar;
|
||||
};
|
||||
return ownKeys(o);
|
||||
};
|
||||
return function (mod) {
|
||||
if (mod && mod.__esModule) return mod;
|
||||
var result = {};
|
||||
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
||||
__setModuleDefault(result, mod);
|
||||
return result;
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.handleUpdatePartialWorkflow = handleUpdatePartialWorkflow;
|
||||
const zod_1 = require("zod");
|
||||
const workflow_diff_engine_1 = require("../services/workflow-diff-engine");
|
||||
const handlers_n8n_manager_1 = require("./handlers-n8n-manager");
|
||||
const n8n_errors_1 = require("../utils/n8n-errors");
|
||||
const logger_1 = require("../utils/logger");
|
||||
const n8n_validation_1 = require("../services/n8n-validation");
|
||||
const workflow_versioning_service_1 = require("../services/workflow-versioning-service");
|
||||
const workflow_validator_1 = require("../services/workflow-validator");
|
||||
const enhanced_config_validator_1 = require("../services/enhanced-config-validator");
|
||||
let cachedValidator = null;
|
||||
function getValidator(repository) {
|
||||
if (!cachedValidator) {
|
||||
cachedValidator = new workflow_validator_1.WorkflowValidator(repository, enhanced_config_validator_1.EnhancedConfigValidator);
|
||||
}
|
||||
return cachedValidator;
|
||||
}
|
||||
const workflowDiffSchema = zod_1.z.object({
|
||||
id: zod_1.z.string(),
|
||||
operations: zod_1.z.array(zod_1.z.object({
|
||||
type: zod_1.z.string(),
|
||||
description: zod_1.z.string().optional(),
|
||||
node: zod_1.z.any().optional(),
|
||||
nodeId: zod_1.z.string().optional(),
|
||||
nodeName: zod_1.z.string().optional(),
|
||||
updates: zod_1.z.any().optional(),
|
||||
position: zod_1.z.tuple([zod_1.z.number(), zod_1.z.number()]).optional(),
|
||||
source: zod_1.z.string().optional(),
|
||||
target: zod_1.z.string().optional(),
|
||||
from: zod_1.z.string().optional(),
|
||||
to: zod_1.z.string().optional(),
|
||||
sourceOutput: zod_1.z.string().optional(),
|
||||
targetInput: zod_1.z.string().optional(),
|
||||
sourceIndex: zod_1.z.number().optional(),
|
||||
targetIndex: zod_1.z.number().optional(),
|
||||
branch: zod_1.z.enum(['true', 'false']).optional(),
|
||||
case: zod_1.z.number().optional(),
|
||||
ignoreErrors: zod_1.z.boolean().optional(),
|
||||
dryRun: zod_1.z.boolean().optional(),
|
||||
connections: zod_1.z.any().optional(),
|
||||
settings: zod_1.z.any().optional(),
|
||||
name: zod_1.z.string().optional(),
|
||||
tag: zod_1.z.string().optional(),
|
||||
})),
|
||||
validateOnly: zod_1.z.boolean().optional(),
|
||||
continueOnError: zod_1.z.boolean().optional(),
|
||||
createBackup: zod_1.z.boolean().optional(),
|
||||
intent: zod_1.z.string().optional(),
|
||||
});
|
||||
async function handleUpdatePartialWorkflow(args, repository, context) {
|
||||
const startTime = Date.now();
|
||||
const sessionId = `mutation_${Date.now()}_${Math.random().toString(36).slice(2, 11)}`;
|
||||
let workflowBefore = null;
|
||||
let validationBefore = null;
|
||||
let validationAfter = null;
|
||||
try {
|
||||
if (process.env.DEBUG_MCP === 'true') {
|
||||
logger_1.logger.debug('Workflow diff request received', {
|
||||
argsType: typeof args,
|
||||
hasWorkflowId: args && typeof args === 'object' && 'workflowId' in args,
|
||||
operationCount: args && typeof args === 'object' && 'operations' in args ?
|
||||
args.operations?.length : 0
|
||||
});
|
||||
}
|
||||
const input = workflowDiffSchema.parse(args);
|
||||
const client = (0, handlers_n8n_manager_1.getN8nApiClient)(context);
|
||||
if (!client) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'n8n API not configured. Please set N8N_API_URL and N8N_API_KEY environment variables.'
|
||||
};
|
||||
}
|
||||
let workflow;
|
||||
try {
|
||||
workflow = await client.getWorkflow(input.id);
|
||||
workflowBefore = JSON.parse(JSON.stringify(workflow));
|
||||
try {
|
||||
const validator = getValidator(repository);
|
||||
validationBefore = await validator.validateWorkflow(workflowBefore, {
|
||||
validateNodes: true,
|
||||
validateConnections: true,
|
||||
validateExpressions: true,
|
||||
profile: 'runtime'
|
||||
});
|
||||
}
|
||||
catch (validationError) {
|
||||
logger_1.logger.debug('Pre-mutation validation failed (non-blocking):', validationError);
|
||||
validationBefore = {
|
||||
valid: false,
|
||||
errors: [{ type: 'validation_error', message: 'Validation failed' }]
|
||||
};
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
if (error instanceof n8n_errors_1.N8nApiError) {
|
||||
return {
|
||||
success: false,
|
||||
error: (0, n8n_errors_1.getUserFriendlyErrorMessage)(error),
|
||||
code: error.code
|
||||
};
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
if (input.createBackup !== false && !input.validateOnly) {
|
||||
try {
|
||||
const versioningService = new workflow_versioning_service_1.WorkflowVersioningService(repository, client);
|
||||
const backupResult = await versioningService.createBackup(input.id, workflow, {
|
||||
trigger: 'partial_update',
|
||||
operations: input.operations
|
||||
});
|
||||
logger_1.logger.info('Workflow backup created', {
|
||||
workflowId: input.id,
|
||||
versionId: backupResult.versionId,
|
||||
versionNumber: backupResult.versionNumber,
|
||||
pruned: backupResult.pruned
|
||||
});
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.warn('Failed to create workflow backup', {
|
||||
workflowId: input.id,
|
||||
error: error.message
|
||||
});
|
||||
}
|
||||
}
|
||||
const diffEngine = new workflow_diff_engine_1.WorkflowDiffEngine();
|
||||
const diffRequest = input;
|
||||
const diffResult = await diffEngine.applyDiff(workflow, diffRequest);
|
||||
if (!diffResult.success) {
|
||||
if (diffRequest.continueOnError && diffResult.workflow && diffResult.operationsApplied && diffResult.operationsApplied > 0) {
|
||||
logger_1.logger.info(`continueOnError mode: Applying ${diffResult.operationsApplied} successful operations despite ${diffResult.failed?.length || 0} failures`);
|
||||
}
|
||||
else {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Failed to apply diff operations',
|
||||
details: {
|
||||
errors: diffResult.errors,
|
||||
warnings: diffResult.warnings,
|
||||
operationsApplied: diffResult.operationsApplied,
|
||||
applied: diffResult.applied,
|
||||
failed: diffResult.failed
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
if (input.validateOnly) {
|
||||
return {
|
||||
success: true,
|
||||
message: diffResult.message,
|
||||
data: {
|
||||
valid: true,
|
||||
operationsToApply: input.operations.length
|
||||
},
|
||||
details: {
|
||||
warnings: diffResult.warnings
|
||||
}
|
||||
};
|
||||
}
|
||||
if (diffResult.workflow) {
|
||||
const structureErrors = (0, n8n_validation_1.validateWorkflowStructure)(diffResult.workflow);
|
||||
if (structureErrors.length > 0) {
|
||||
const skipValidation = process.env.SKIP_WORKFLOW_VALIDATION === 'true';
|
||||
logger_1.logger.warn('Workflow structure validation failed after applying diff operations', {
|
||||
workflowId: input.id,
|
||||
errors: structureErrors,
|
||||
blocking: !skipValidation
|
||||
});
|
||||
const errorTypes = new Set();
|
||||
structureErrors.forEach(err => {
|
||||
if (err.includes('operator') || err.includes('singleValue'))
|
||||
errorTypes.add('operator_issues');
|
||||
if (err.includes('connection') || err.includes('referenced'))
|
||||
errorTypes.add('connection_issues');
|
||||
if (err.includes('Missing') || err.includes('missing'))
|
||||
errorTypes.add('missing_metadata');
|
||||
if (err.includes('branch') || err.includes('output'))
|
||||
errorTypes.add('branch_mismatch');
|
||||
});
|
||||
const recoverySteps = [];
|
||||
if (errorTypes.has('operator_issues')) {
|
||||
recoverySteps.push('Operator structure issue detected. Use validate_node_operation to check specific nodes.');
|
||||
recoverySteps.push('Binary operators (equals, contains, greaterThan, etc.) must NOT have singleValue:true');
|
||||
recoverySteps.push('Unary operators (isEmpty, isNotEmpty, true, false) REQUIRE singleValue:true');
|
||||
}
|
||||
if (errorTypes.has('connection_issues')) {
|
||||
recoverySteps.push('Connection validation failed. Check all node connections reference existing nodes.');
|
||||
recoverySteps.push('Use cleanStaleConnections operation to remove connections to non-existent nodes.');
|
||||
}
|
||||
if (errorTypes.has('missing_metadata')) {
|
||||
recoverySteps.push('Missing metadata detected. Ensure filter-based nodes (IF v2.2+, Switch v3.2+) have complete conditions.options.');
|
||||
recoverySteps.push('Required options: {version: 2, leftValue: "", caseSensitive: true, typeValidation: "strict"}');
|
||||
}
|
||||
if (errorTypes.has('branch_mismatch')) {
|
||||
recoverySteps.push('Branch count mismatch. Ensure Switch nodes have outputs for all rules (e.g., 3 rules = 3 output branches).');
|
||||
}
|
||||
if (recoverySteps.length === 0) {
|
||||
recoverySteps.push('Review the validation errors listed above');
|
||||
recoverySteps.push('Fix issues using updateNode or cleanStaleConnections operations');
|
||||
recoverySteps.push('Run validate_workflow again to verify fixes');
|
||||
}
|
||||
const errorMessage = structureErrors.length === 1
|
||||
? `Workflow validation failed: ${structureErrors[0]}`
|
||||
: `Workflow validation failed with ${structureErrors.length} structural issues`;
|
||||
if (!skipValidation) {
|
||||
return {
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
details: {
|
||||
errors: structureErrors,
|
||||
errorCount: structureErrors.length,
|
||||
operationsApplied: diffResult.operationsApplied,
|
||||
applied: diffResult.applied,
|
||||
recoveryGuidance: recoverySteps,
|
||||
note: 'Operations were applied but created an invalid workflow structure. The workflow was NOT saved to n8n to prevent UI rendering errors.',
|
||||
autoSanitizationNote: 'Auto-sanitization runs on all nodes during updates to fix operator structures and add missing metadata. However, it cannot fix all issues (e.g., broken connections, branch mismatches). Use the recovery guidance above to resolve remaining issues.'
|
||||
}
|
||||
};
|
||||
}
|
||||
logger_1.logger.info('Workflow validation skipped (SKIP_WORKFLOW_VALIDATION=true): Allowing workflow with validation warnings to proceed', {
|
||||
workflowId: input.id,
|
||||
warningCount: structureErrors.length
|
||||
});
|
||||
}
|
||||
}
|
||||
try {
|
||||
const updatedWorkflow = await client.updateWorkflow(input.id, diffResult.workflow);
|
||||
let finalWorkflow = updatedWorkflow;
|
||||
let activationMessage = '';
|
||||
try {
|
||||
const validator = getValidator(repository);
|
||||
validationAfter = await validator.validateWorkflow(finalWorkflow, {
|
||||
validateNodes: true,
|
||||
validateConnections: true,
|
||||
validateExpressions: true,
|
||||
profile: 'runtime'
|
||||
});
|
||||
}
|
||||
catch (validationError) {
|
||||
logger_1.logger.debug('Post-mutation validation failed (non-blocking):', validationError);
|
||||
validationAfter = {
|
||||
valid: false,
|
||||
errors: [{ type: 'validation_error', message: 'Validation failed' }]
|
||||
};
|
||||
}
|
||||
if (diffResult.shouldActivate) {
|
||||
try {
|
||||
finalWorkflow = await client.activateWorkflow(input.id);
|
||||
activationMessage = ' Workflow activated.';
|
||||
}
|
||||
catch (activationError) {
|
||||
logger_1.logger.error('Failed to activate workflow after update', activationError);
|
||||
return {
|
||||
success: false,
|
||||
error: 'Workflow updated successfully but activation failed',
|
||||
details: {
|
||||
workflowUpdated: true,
|
||||
activationError: activationError instanceof Error ? activationError.message : 'Unknown error'
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
else if (diffResult.shouldDeactivate) {
|
||||
try {
|
||||
finalWorkflow = await client.deactivateWorkflow(input.id);
|
||||
activationMessage = ' Workflow deactivated.';
|
||||
}
|
||||
catch (deactivationError) {
|
||||
logger_1.logger.error('Failed to deactivate workflow after update', deactivationError);
|
||||
return {
|
||||
success: false,
|
||||
error: 'Workflow updated successfully but deactivation failed',
|
||||
details: {
|
||||
workflowUpdated: true,
|
||||
deactivationError: deactivationError instanceof Error ? deactivationError.message : 'Unknown error'
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
if (workflowBefore && !input.validateOnly) {
|
||||
trackWorkflowMutation({
|
||||
sessionId,
|
||||
toolName: 'n8n_update_partial_workflow',
|
||||
userIntent: input.intent || 'Partial workflow update',
|
||||
operations: input.operations,
|
||||
workflowBefore,
|
||||
workflowAfter: finalWorkflow,
|
||||
validationBefore,
|
||||
validationAfter,
|
||||
mutationSuccess: true,
|
||||
durationMs: Date.now() - startTime,
|
||||
}).catch(err => {
|
||||
logger_1.logger.debug('Failed to track mutation telemetry:', err);
|
||||
});
|
||||
}
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
id: finalWorkflow.id,
|
||||
name: finalWorkflow.name,
|
||||
active: finalWorkflow.active,
|
||||
nodeCount: finalWorkflow.nodes?.length || 0,
|
||||
operationsApplied: diffResult.operationsApplied
|
||||
},
|
||||
message: `Workflow "${finalWorkflow.name}" updated successfully. Applied ${diffResult.operationsApplied} operations.${activationMessage} Use n8n_get_workflow with mode 'structure' to verify current state.`,
|
||||
details: {
|
||||
applied: diffResult.applied,
|
||||
failed: diffResult.failed,
|
||||
errors: diffResult.errors,
|
||||
warnings: diffResult.warnings
|
||||
}
|
||||
};
|
||||
}
|
||||
catch (error) {
|
||||
if (workflowBefore && !input.validateOnly) {
|
||||
trackWorkflowMutation({
|
||||
sessionId,
|
||||
toolName: 'n8n_update_partial_workflow',
|
||||
userIntent: input.intent || 'Partial workflow update',
|
||||
operations: input.operations,
|
||||
workflowBefore,
|
||||
workflowAfter: workflowBefore,
|
||||
validationBefore,
|
||||
validationAfter: validationBefore,
|
||||
mutationSuccess: false,
|
||||
mutationError: error instanceof Error ? error.message : 'Unknown error',
|
||||
durationMs: Date.now() - startTime,
|
||||
}).catch(err => {
|
||||
logger_1.logger.warn('Failed to track mutation telemetry for failed operation:', err);
|
||||
});
|
||||
}
|
||||
if (error instanceof n8n_errors_1.N8nApiError) {
|
||||
return {
|
||||
success: false,
|
||||
error: (0, n8n_errors_1.getUserFriendlyErrorMessage)(error),
|
||||
code: error.code,
|
||||
details: error.details
|
||||
};
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
if (error instanceof zod_1.z.ZodError) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Invalid input',
|
||||
details: { errors: error.errors }
|
||||
};
|
||||
}
|
||||
logger_1.logger.error('Failed to update partial workflow', error);
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Unknown error occurred'
|
||||
};
|
||||
}
|
||||
}
|
||||
function inferIntentFromOperations(operations) {
|
||||
if (!operations || operations.length === 0) {
|
||||
return 'Partial workflow update';
|
||||
}
|
||||
const opTypes = operations.map((op) => op.type);
|
||||
const opCount = operations.length;
|
||||
if (opCount === 1) {
|
||||
const op = operations[0];
|
||||
switch (op.type) {
|
||||
case 'addNode':
|
||||
return `Add ${op.node?.type || 'node'}`;
|
||||
case 'removeNode':
|
||||
return `Remove node ${op.nodeName || op.nodeId || ''}`.trim();
|
||||
case 'updateNode':
|
||||
return `Update node ${op.nodeName || op.nodeId || ''}`.trim();
|
||||
case 'addConnection':
|
||||
return `Connect ${op.source || 'node'} to ${op.target || 'node'}`;
|
||||
case 'removeConnection':
|
||||
return `Disconnect ${op.source || 'node'} from ${op.target || 'node'}`;
|
||||
case 'rewireConnection':
|
||||
return `Rewire ${op.source || 'node'} from ${op.from || ''} to ${op.to || ''}`.trim();
|
||||
case 'updateName':
|
||||
return `Rename workflow to "${op.name || ''}"`;
|
||||
case 'activateWorkflow':
|
||||
return 'Activate workflow';
|
||||
case 'deactivateWorkflow':
|
||||
return 'Deactivate workflow';
|
||||
default:
|
||||
return `Workflow ${op.type}`;
|
||||
}
|
||||
}
|
||||
const typeSet = new Set(opTypes);
|
||||
const summary = [];
|
||||
if (typeSet.has('addNode')) {
|
||||
const count = opTypes.filter((t) => t === 'addNode').length;
|
||||
summary.push(`add ${count} node${count > 1 ? 's' : ''}`);
|
||||
}
|
||||
if (typeSet.has('removeNode')) {
|
||||
const count = opTypes.filter((t) => t === 'removeNode').length;
|
||||
summary.push(`remove ${count} node${count > 1 ? 's' : ''}`);
|
||||
}
|
||||
if (typeSet.has('updateNode')) {
|
||||
const count = opTypes.filter((t) => t === 'updateNode').length;
|
||||
summary.push(`update ${count} node${count > 1 ? 's' : ''}`);
|
||||
}
|
||||
if (typeSet.has('addConnection') || typeSet.has('rewireConnection')) {
|
||||
summary.push('modify connections');
|
||||
}
|
||||
if (typeSet.has('updateName') || typeSet.has('updateSettings')) {
|
||||
summary.push('update metadata');
|
||||
}
|
||||
return summary.length > 0
|
||||
? `Workflow update: ${summary.join(', ')}`
|
||||
: `Workflow update: ${opCount} operations`;
|
||||
}
|
||||
async function trackWorkflowMutation(data) {
|
||||
try {
|
||||
if (!data.userIntent ||
|
||||
data.userIntent === 'Partial workflow update' ||
|
||||
data.userIntent.length < 10) {
|
||||
data.userIntent = inferIntentFromOperations(data.operations);
|
||||
}
|
||||
const { telemetry } = await Promise.resolve().then(() => __importStar(require('../telemetry/telemetry-manager.js')));
|
||||
await telemetry.trackWorkflowMutation(data);
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.debug('Telemetry tracking failed:', error);
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=handlers-workflow-diff.js.map
|
||||
1
dist/mcp/handlers-workflow-diff.js.map
vendored
Normal file
1
dist/mcp/handlers-workflow-diff.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
3
dist/mcp/index.d.ts
vendored
Normal file
3
dist/mcp/index.d.ts
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
#!/usr/bin/env node
|
||||
export {};
|
||||
//# sourceMappingURL=index.d.ts.map
|
||||
1
dist/mcp/index.d.ts.map
vendored
Normal file
1
dist/mcp/index.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/mcp/index.ts"],"names":[],"mappings":""}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user