mirror of
https://github.com/czlonkowski/n8n-mcp.git
synced 2026-01-30 22:42:04 +00:00
Compare commits
407 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a9c4400a92 | ||
|
|
533b105f03 | ||
|
|
28667736cd | ||
|
|
211ae72f96 | ||
|
|
ce2c94c1a5 | ||
|
|
861005eeed | ||
|
|
7b0ff990ec | ||
|
|
25cb8bb455 | ||
|
|
2713db6d10 | ||
|
|
f10772a9d2 | ||
|
|
808088f25e | ||
|
|
20663dad0d | ||
|
|
705d31c35e | ||
|
|
d60182eeb8 | ||
|
|
a40f6a5077 | ||
|
|
fa216e4d13 | ||
|
|
562f4b0c4e | ||
|
|
0f13e7aeee | ||
|
|
551445bcd5 | ||
|
|
c6f3733fbd | ||
|
|
6e85c68d62 | ||
|
|
fa7d0b420e | ||
|
|
47510ef6da | ||
|
|
b92e511463 | ||
|
|
130dd44ea5 | ||
|
|
560e0c5b86 | ||
|
|
cdaa29e7a2 | ||
|
|
60479e0eb4 | ||
|
|
90407f845d | ||
|
|
b5759c4146 | ||
|
|
1524fd5a08 | ||
|
|
5057481e70 | ||
|
|
a70d96a373 | ||
|
|
934124fa7b | ||
|
|
c2dc7c9c51 | ||
|
|
527e9874ab | ||
|
|
ef9b6f6341 | ||
|
|
3188d209b7 | ||
|
|
33690c5650 | ||
|
|
ddf9556759 | ||
|
|
7d9b456887 | ||
|
|
2f5a857142 | ||
|
|
e7dd04b471 | ||
|
|
c7e7bda505 | ||
|
|
bac4936c6d | ||
|
|
25784142fe | ||
|
|
f770043d3d | ||
|
|
1be06c217f | ||
|
|
c974947c84 | ||
|
|
ff69e4ccca | ||
|
|
9ee4b9492f | ||
|
|
4df9558b3e | ||
|
|
05424f66af | ||
|
|
5d2c5df53e | ||
|
|
f5cf1e2934 | ||
|
|
9050967cd6 | ||
|
|
717d6f927f | ||
|
|
fc37907348 | ||
|
|
47d9f55dc5 | ||
|
|
5575630711 | ||
|
|
1bbfaabbc2 | ||
|
|
597bd290b6 | ||
|
|
99c5907b71 | ||
|
|
77151e013e | ||
|
|
14f3b9c12a | ||
|
|
eb362febd6 | ||
|
|
821ace310e | ||
|
|
53252adc68 | ||
|
|
2010d77ed8 | ||
|
|
caf9383ba1 | ||
|
|
8728a808ac | ||
|
|
60ab66d64d | ||
|
|
eee52a7f53 | ||
|
|
a66cb18cce | ||
|
|
0e0f0998af | ||
|
|
08a4be8370 | ||
|
|
3578f2cc31 | ||
|
|
4d3b8fbc91 | ||
|
|
5688384113 | ||
|
|
346fa3c8d2 | ||
|
|
3d5ceae43f | ||
|
|
1834d474a5 | ||
|
|
a4ef1efaf8 | ||
|
|
65f51ad8b5 | ||
|
|
af6efe9e88 | ||
|
|
3f427f9528 | ||
|
|
18b8747005 | ||
|
|
749f1c53eb | ||
|
|
892c4ed70a | ||
|
|
590dc087ac | ||
|
|
ee7229b4db | ||
|
|
b6683b8381 | ||
|
|
b2300429fd | ||
|
|
b87f638e52 | ||
|
|
1f94427d54 | ||
|
|
2eb459c80c | ||
|
|
79ef853e8c | ||
|
|
2682be33b8 | ||
|
|
9f291154f2 | ||
|
|
bfff497020 | ||
|
|
e522aec08c | ||
|
|
817bf7d211 | ||
|
|
9a3520adb7 | ||
|
|
ced7fafcbf | ||
|
|
ad4b521402 | ||
|
|
b18f6ec7a4 | ||
|
|
95ea6ca0bb | ||
|
|
a4c7e097e8 | ||
|
|
0778c55d85 | ||
|
|
913ff31164 | ||
|
|
952a97ef73 | ||
|
|
56114f041b | ||
|
|
c52a3dd253 | ||
|
|
bc156fce2a | ||
|
|
aaa6be6d74 | ||
|
|
3806efdbd8 | ||
|
|
0e26ea6a68 | ||
|
|
1bfbf05561 | ||
|
|
f23e09934d | ||
|
|
5ea00e12a2 | ||
|
|
04e7c53b59 | ||
|
|
c7f8614de1 | ||
|
|
5702a64a01 | ||
|
|
551fea841b | ||
|
|
eac4e67101 | ||
|
|
c76ffd9fb1 | ||
|
|
7300957d13 | ||
|
|
32a25e2706 | ||
|
|
ab6b554692 | ||
|
|
32264da107 | ||
|
|
ef1cf747a3 | ||
|
|
dbdc88d629 | ||
|
|
538618b1bc | ||
|
|
41830c88fe | ||
|
|
0d2d9bdd52 | ||
|
|
05f68b8ea1 | ||
|
|
5881304ed8 | ||
|
|
0f5b0d9463 | ||
|
|
4399899255 | ||
|
|
8d20c64f5c | ||
|
|
fe1309151a | ||
|
|
dd62040155 | ||
|
|
112b40119c | ||
|
|
318986f546 | ||
|
|
aa8a6a7069 | ||
|
|
e11a885b0d | ||
|
|
ee99cb7ba1 | ||
|
|
66cb66b31b | ||
|
|
b67d6ba353 | ||
|
|
3ba5584df9 | ||
|
|
be0211d826 | ||
|
|
0d71a16f83 | ||
|
|
085f6db7a2 | ||
|
|
b6bc3b732e | ||
|
|
c16c9a2398 | ||
|
|
1d34ad81d5 | ||
|
|
4566253bdc | ||
|
|
54c598717c | ||
|
|
8b5b01de98 | ||
|
|
275e573d8d | ||
|
|
6256105053 | ||
|
|
1f43784315 | ||
|
|
80e3391773 | ||
|
|
c580a3dde4 | ||
|
|
fc8fb66900 | ||
|
|
4625ebf64d | ||
|
|
43dea68f0b | ||
|
|
dc62fd66cb | ||
|
|
a94ff0586c | ||
|
|
29b2b1d4c1 | ||
|
|
fa6ff89516 | ||
|
|
34811eaf69 | ||
|
|
52c9902efd | ||
|
|
fba8b2a490 | ||
|
|
275e4f8cef | ||
|
|
4016ac42ef | ||
|
|
b8227ff775 | ||
|
|
f61fd9b429 | ||
|
|
4b36ed6a95 | ||
|
|
f072b2e003 | ||
|
|
cfd2325ca4 | ||
|
|
978347e8d0 | ||
|
|
1b7dd3b517 | ||
|
|
c52bbcbb83 | ||
|
|
5fb63cd725 | ||
|
|
36eb8e3864 | ||
|
|
51278f52e9 | ||
|
|
6479ac2bf5 | ||
|
|
08d43bd7fb | ||
|
|
914805f5ea | ||
|
|
08a1d42f09 | ||
|
|
ae11738ac7 | ||
|
|
6e365714e2 | ||
|
|
a2cc37bdf7 | ||
|
|
cf3c66c0ea | ||
|
|
f33b626179 | ||
|
|
2113714ec2 | ||
|
|
49757e3c22 | ||
|
|
dd521d0d87 | ||
|
|
331883f944 | ||
|
|
f3164e202f | ||
|
|
8e2e1dce62 | ||
|
|
b986beef2c | ||
|
|
943f5862a3 | ||
|
|
2c536a25fd | ||
|
|
e95ac7c335 | ||
|
|
e2c8fd0125 | ||
|
|
3332eb09fc | ||
|
|
bd03412fc8 | ||
|
|
73fa494735 | ||
|
|
67d8f5d4d4 | ||
|
|
d2a250e23d | ||
|
|
710f054b93 | ||
|
|
fd65727632 | ||
|
|
5d9936a909 | ||
|
|
de95fb21ba | ||
|
|
2bcd7c757b | ||
|
|
50439e2aa1 | ||
|
|
96cb9eca0f | ||
|
|
36dc8b489c | ||
|
|
cffd5e8b2e | ||
|
|
1ad2c6f6d2 | ||
|
|
28cff8c77b | ||
|
|
0818b4d56c | ||
|
|
5e2a6bdb9c | ||
|
|
ec9d8fdb7e | ||
|
|
ddc4de8c3e | ||
|
|
c67659a7c3 | ||
|
|
4cf8bb5c98 | ||
|
|
53b5dc312d | ||
|
|
1eedb43e9f | ||
|
|
81dfbbbd77 | ||
|
|
3ba3f101b3 | ||
|
|
92eb4ef34f | ||
|
|
ccbe04f007 | ||
|
|
91ad08493c | ||
|
|
7bb021163f | ||
|
|
59ae78f03a | ||
|
|
cb224de01f | ||
|
|
fd9ea985f2 | ||
|
|
225bb06cd5 | ||
|
|
2627028be3 | ||
|
|
cc9fe69449 | ||
|
|
0144484f96 | ||
|
|
2b7bc48699 | ||
|
|
0ec02fa0da | ||
|
|
d207cc3723 | ||
|
|
eeb4b6ac3e | ||
|
|
06cbb40213 | ||
|
|
9a00a99011 | ||
|
|
36aedd5050 | ||
|
|
59f49c47ab | ||
|
|
b106550520 | ||
|
|
e1be4473a3 | ||
|
|
b12a927a10 | ||
|
|
08abdb7937 | ||
|
|
95bb002577 | ||
|
|
36e02c68d3 | ||
|
|
3078273d93 | ||
|
|
aeb74102e5 | ||
|
|
af949b09a5 | ||
|
|
44568a6edd | ||
|
|
59e4cb85ac | ||
|
|
f78f53e731 | ||
|
|
c6e0e528d1 | ||
|
|
34bafe240d | ||
|
|
f139d38c81 | ||
|
|
aeaba3b9ca | ||
|
|
a7bfa73479 | ||
|
|
ee125c52f8 | ||
|
|
f9194ee74c | ||
|
|
2a85000411 | ||
|
|
653f395666 | ||
|
|
cfe3c5e584 | ||
|
|
67c3c9c9c8 | ||
|
|
6d50cf93f0 | ||
|
|
de9f222cfe | ||
|
|
da593400d2 | ||
|
|
126d09c66b | ||
|
|
4f81962953 | ||
|
|
9e7a0e0487 | ||
|
|
a7dc07abab | ||
|
|
1c56eb0daa | ||
|
|
fcf778c79d | ||
|
|
c519cd5060 | ||
|
|
69f3a31d41 | ||
|
|
bd8a7f68ac | ||
|
|
abc6a31302 | ||
|
|
57459c27e3 | ||
|
|
9380602439 | ||
|
|
a696af8cfa | ||
|
|
b467bec93e | ||
|
|
6e042467b2 | ||
|
|
287b9aa819 | ||
|
|
3331b72df4 | ||
|
|
c0d7145a5a | ||
|
|
08e906739f | ||
|
|
ae329c3bb6 | ||
|
|
1cfbdc3bdf | ||
|
|
b3d42b3390 | ||
|
|
4feb905bd0 | ||
|
|
ad1f611d2a | ||
|
|
02574e5555 | ||
|
|
b27d245dab | ||
|
|
ecf0d50a63 | ||
|
|
1db9ecf33f | ||
|
|
fc973d83db | ||
|
|
2e19eaa309 | ||
|
|
73db3dfdfe | ||
|
|
7fcfa8f696 | ||
|
|
c8cdd3c0b5 | ||
|
|
62d01ab237 | ||
|
|
00289e90d7 | ||
|
|
5c01624c3a | ||
|
|
dad3a442d9 | ||
|
|
7a402bc7ad | ||
|
|
88e288f8f6 | ||
|
|
12a7f1e8bf | ||
|
|
2f18a2bb9a | ||
|
|
9b94e3be9c | ||
|
|
9e1a4129c0 | ||
|
|
4b764c6110 | ||
|
|
c3b691cedf | ||
|
|
4bf8f7006d | ||
|
|
2a9a3b9410 | ||
|
|
cd27d78bfd | ||
|
|
8d1ae278ee | ||
|
|
a84dbd6a15 | ||
|
|
1728495146 | ||
|
|
2305aaab9e | ||
|
|
f74427bdb5 | ||
|
|
fe59688e03 | ||
|
|
675989971c | ||
|
|
d875ac1e0c | ||
|
|
5bf1bc46e9 | ||
|
|
3bab53a3be | ||
|
|
8ffda534be | ||
|
|
0bf0e1cd74 | ||
|
|
9fb847a16f | ||
|
|
bf999232a3 | ||
|
|
59e476fdf0 | ||
|
|
711cecb90d | ||
|
|
582c9aac53 | ||
|
|
997cc93a0a | ||
|
|
2f234780dd | ||
|
|
99518f71cf | ||
|
|
fe1e3640af | ||
|
|
aef9d983e2 | ||
|
|
e252a36e3f | ||
|
|
39e13c451f | ||
|
|
a8e0b1ed34 | ||
|
|
ed7de10fd2 | ||
|
|
b7fa12667b | ||
|
|
4854a50854 | ||
|
|
cb5691f17d | ||
|
|
6d45ff8bcb | ||
|
|
64b9cf47a7 | ||
|
|
f4dff6b8e1 | ||
|
|
ec0d2e8a6e | ||
|
|
a1db133a50 | ||
|
|
d8bab6e667 | ||
|
|
3728a9cc67 | ||
|
|
47e6a7846c | ||
|
|
cabda2a0f8 | ||
|
|
34cb8f8c44 | ||
|
|
48df87f76c | ||
|
|
540c5270c6 | ||
|
|
6210378687 | ||
|
|
8c2b1cfbbe | ||
|
|
d862f4961d | ||
|
|
2057f98e76 | ||
|
|
fff47f9f9d | ||
|
|
87cc84f593 | ||
|
|
8405497263 | ||
|
|
7a66f71c23 | ||
|
|
9cbbc6bb67 | ||
|
|
fbce712714 | ||
|
|
f13685fcd7 | ||
|
|
89b1ef2354 | ||
|
|
951d5b7e1b | ||
|
|
263753254a | ||
|
|
2896e393d3 | ||
|
|
9fa1c44149 | ||
|
|
e217d022d6 | ||
|
|
ca150287c9 | ||
|
|
5825a85ccc | ||
|
|
fecc584145 | ||
|
|
09bbcd7001 | ||
|
|
c2195d7da6 | ||
|
|
d8c5c7d4df | ||
|
|
2716207d72 | ||
|
|
a5cf4193e4 | ||
|
|
a1a9ff63d2 | ||
|
|
676c693885 | ||
|
|
e14c647b7d | ||
|
|
481d74c249 | ||
|
|
6f21a717cd | ||
|
|
75b55776f2 | ||
|
|
fa04ece8ea | ||
|
|
acfffbb0f2 | ||
|
|
3b2be46119 | ||
|
|
671c175d71 | ||
|
|
09e69df5a7 | ||
|
|
f150802bed | ||
|
|
5960d2826e | ||
|
|
78abda601a | ||
|
|
247c8d74af |
@@ -1,6 +1,7 @@
|
||||
---
|
||||
name: code-reviewer
|
||||
description: Use this agent when you need to review recently written or modified code for quality, security, and maintainability. This agent should be invoked immediately after code changes are made to ensure high standards are maintained. Examples:\n\n<example>\nContext: The user has created a code-review agent that should be called after writing code.\nuser: "Please write a function that validates email addresses"\nassistant: "I'll create an email validation function for you:"\n<function implementation>\nassistant: "Now let me use the code-reviewer agent to review this code for quality and security"\n<commentary>\nSince new code was just written, the code-reviewer agent should be invoked to ensure it meets quality standards.\n</commentary>\n</example>\n\n<example>\nContext: The user has modified existing code and wants it reviewed.\nuser: "I've updated the authentication logic, can you check if it's secure?"\nassistant: "I'll use the code-reviewer agent to analyze your authentication changes for security and best practices"\n<commentary>\nThe user has made changes to security-critical code, so the code-reviewer agent is the appropriate tool to ensure the modifications are secure and well-implemented.\n</commentary>\n</example>
|
||||
model: inherit
|
||||
---
|
||||
|
||||
You are a senior code reviewer with extensive experience in software engineering, security, and best practices. Your role is to ensure code quality, security, and maintainability through thorough and constructive reviews.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: n8n-mcp-tester
|
||||
description: Use this agent when you need to test n8n-mcp functionality after implementing fixes or new features. This agent should be invoked proactively after code changes to validate that the MCP tools work correctly with real n8n workflows. The agent tests the actual MCP server tools, not just unit tests. Examples:\n\n<example>\nContext: Developer has just fixed a validation bug in the n8n-mcp server\nuser: "I've fixed the validation issue for HTTP Request nodes"\nassistant: "I've completed the fix. Now let me build the project and then test it."\n<builds project>\nassistant: "Project built successfully. Please reload the MCP server in Claude Desktop to apply the changes."\nuser: "Reloaded"\nassistant: "Great! Now let me invoke the n8n-mcp-tester agent to verify the fix works correctly."\n<commentary>\nSince a fix has been implemented and the MCP server reloaded, use the n8n-mcp-tester agent to validate the fix works in real-world usage.\n</commentary>\n</example>\n\n<example>\nContext: Developer has added a new MCP tool for workflow validation\nuser: "I've implemented the new workflow validation tool"\nassistant: "Implementation complete. Let me build and prepare for testing."\n<builds project>\nassistant: "Build successful. Please reload the MCP server to load the new tool."\nuser: "Done"\nassistant: "Perfect! I'll now use the n8n-mcp-tester agent to test the new workflow validation tool."\n<commentary>\nAfter implementing new MCP functionality and reloading the server, invoke n8n-mcp-tester to verify it works correctly.\n</commentary>\n</example>
|
||||
tools: Glob, Grep, LS, Read, WebFetch, TodoWrite, WebSearch, mcp__puppeteer__puppeteer_navigate, mcp__puppeteer__puppeteer_screenshot, mcp__puppeteer__puppeteer_click, mcp__puppeteer__puppeteer_fill, mcp__puppeteer__puppeteer_select, mcp__puppeteer__puppeteer_hover, mcp__puppeteer__puppeteer_evaluate, ListMcpResourcesTool, ReadMcpResourceTool, mcp__supabase__list_organizations, mcp__supabase__get_organization, mcp__supabase__list_projects, mcp__supabase__get_project, mcp__supabase__get_cost, mcp__supabase__confirm_cost, mcp__supabase__create_project, mcp__supabase__pause_project, mcp__supabase__restore_project, mcp__supabase__create_branch, mcp__supabase__list_branches, mcp__supabase__delete_branch, mcp__supabase__merge_branch, mcp__supabase__reset_branch, mcp__supabase__rebase_branch, mcp__supabase__list_tables, mcp__supabase__list_extensions, mcp__supabase__list_migrations, mcp__supabase__apply_migration, mcp__supabase__execute_sql, mcp__supabase__get_logs, mcp__supabase__get_advisors, mcp__supabase__get_project_url, mcp__supabase__get_anon_key, mcp__supabase__generate_typescript_types, mcp__supabase__search_docs, mcp__supabase__list_edge_functions, mcp__supabase__deploy_edge_function, mcp__n8n-mcp__tools_documentation, mcp__n8n-mcp__list_nodes, mcp__n8n-mcp__get_node_info, mcp__n8n-mcp__search_nodes, mcp__n8n-mcp__list_ai_tools, mcp__n8n-mcp__get_node_documentation, mcp__n8n-mcp__get_database_statistics, mcp__n8n-mcp__get_node_essentials, mcp__n8n-mcp__search_node_properties, mcp__n8n-mcp__get_node_for_task, mcp__n8n-mcp__list_tasks, mcp__n8n-mcp__validate_node_operation, mcp__n8n-mcp__validate_node_minimal, mcp__n8n-mcp__get_property_dependencies, mcp__n8n-mcp__get_node_as_tool_info, mcp__n8n-mcp__list_node_templates, mcp__n8n-mcp__get_template, mcp__n8n-mcp__search_templates, mcp__n8n-mcp__get_templates_for_task, mcp__n8n-mcp__validate_workflow, mcp__n8n-mcp__validate_workflow_connections, mcp__n8n-mcp__validate_workflow_expressions, mcp__n8n-mcp__n8n_create_workflow, mcp__n8n-mcp__n8n_get_workflow, mcp__n8n-mcp__n8n_get_workflow_details, mcp__n8n-mcp__n8n_get_workflow_structure, mcp__n8n-mcp__n8n_get_workflow_minimal, mcp__n8n-mcp__n8n_update_full_workflow, mcp__n8n-mcp__n8n_update_partial_workflow, mcp__n8n-mcp__n8n_delete_workflow, mcp__n8n-mcp__n8n_list_workflows, mcp__n8n-mcp__n8n_validate_workflow, mcp__n8n-mcp__n8n_trigger_webhook_workflow, mcp__n8n-mcp__n8n_get_execution, mcp__n8n-mcp__n8n_list_executions, mcp__n8n-mcp__n8n_delete_execution, mcp__n8n-mcp__n8n_health_check, mcp__n8n-mcp__n8n_list_available_tools, mcp__n8n-mcp__n8n_diagnostic
|
||||
tools: Glob, Grep, Read, WebFetch, TodoWrite, WebSearch, mcp__supabase__create_branch, mcp__supabase__list_branches, mcp__supabase__delete_branch, mcp__supabase__merge_branch, mcp__supabase__reset_branch, mcp__supabase__rebase_branch, mcp__supabase__list_tables, mcp__supabase__list_extensions, mcp__supabase__list_migrations, mcp__supabase__apply_migration, mcp__supabase__execute_sql, mcp__supabase__get_logs, mcp__supabase__get_advisors, mcp__supabase__get_project_url, mcp__supabase__generate_typescript_types, mcp__supabase__search_docs, mcp__supabase__list_edge_functions, mcp__supabase__deploy_edge_function, mcp__n8n-mcp__tools_documentation, mcp__n8n-mcp__search_nodes, mcp__n8n-mcp__get_template, mcp__n8n-mcp__search_templates, mcp__n8n-mcp__validate_workflow, mcp__n8n-mcp__n8n_create_workflow, mcp__n8n-mcp__n8n_get_workflow, mcp__n8n-mcp__n8n_update_full_workflow, mcp__n8n-mcp__n8n_update_partial_workflow, mcp__n8n-mcp__n8n_delete_workflow, mcp__n8n-mcp__n8n_list_workflows, mcp__n8n-mcp__n8n_validate_workflow, mcp__n8n-mcp__n8n_trigger_webhook_workflow, mcp__n8n-mcp__n8n_health_check, mcp__brightdata-mcp__search_engine, mcp__brightdata-mcp__scrape_as_markdown, mcp__brightdata-mcp__search_engine_batch, mcp__brightdata-mcp__scrape_batch, mcp__supabase__get_publishable_keys, mcp__supabase__get_edge_function, mcp__n8n-mcp__get_node, mcp__n8n-mcp__validate_node, mcp__n8n-mcp__n8n_autofix_workflow, mcp__n8n-mcp__n8n_executions, mcp__n8n-mcp__n8n_workflow_versions, mcp__n8n-mcp__n8n_deploy_template, mcp__ide__getDiagnostics, mcp__ide__executeCode
|
||||
model: sonnet
|
||||
---
|
||||
|
||||
|
||||
@@ -26,4 +26,8 @@ USE_NGINX=false
|
||||
# N8N_API_URL=https://your-n8n-instance.com
|
||||
# N8N_API_KEY=your-api-key-here
|
||||
# N8N_API_TIMEOUT=30000
|
||||
# N8N_API_MAX_RETRIES=3
|
||||
# N8N_API_MAX_RETRIES=3
|
||||
|
||||
# Optional: Disable specific tools (comma-separated list)
|
||||
# Example: DISABLED_TOOLS=n8n_diagnostic,n8n_health_check
|
||||
# DISABLED_TOOLS=
|
||||
93
.env.example
93
.env.example
@@ -37,9 +37,11 @@ MCP_SERVER_HOST=localhost
|
||||
# Server mode: stdio (local) or http (remote)
|
||||
MCP_MODE=stdio
|
||||
|
||||
# Use fixed HTTP implementation (recommended for stability)
|
||||
# Set to true to bypass StreamableHTTPServerTransport issues
|
||||
USE_FIXED_HTTP=true
|
||||
# DEPRECATED: USE_FIXED_HTTP is deprecated as of v2.31.8
|
||||
# The fixed HTTP implementation does not support SSE streaming required by
|
||||
# clients like OpenAI Codex. Use the default SingleSessionHTTPServer instead.
|
||||
# See: https://github.com/czlonkowski/n8n-mcp/issues/524
|
||||
# USE_FIXED_HTTP=true # DO NOT USE - deprecated
|
||||
|
||||
# HTTP Server Configuration (only used when MCP_MODE=http)
|
||||
PORT=3000
|
||||
@@ -69,6 +71,57 @@ AUTH_TOKEN=your-secure-token-here
|
||||
# Default: 0 (disabled)
|
||||
# TRUST_PROXY=0
|
||||
|
||||
# =========================
|
||||
# SECURITY CONFIGURATION
|
||||
# =========================
|
||||
|
||||
# Rate Limiting Configuration
|
||||
# Protects authentication endpoint from brute force attacks
|
||||
# Window: Time period in milliseconds (default: 900000 = 15 minutes)
|
||||
# Max: Maximum authentication attempts per IP within window (default: 20)
|
||||
# AUTH_RATE_LIMIT_WINDOW=900000
|
||||
# AUTH_RATE_LIMIT_MAX=20
|
||||
|
||||
# SSRF Protection Mode
|
||||
# Prevents webhooks from accessing internal networks and cloud metadata
|
||||
#
|
||||
# Modes:
|
||||
# - strict (default): Block localhost + private IPs + cloud metadata
|
||||
# Use for: Production deployments, cloud environments
|
||||
# Security: Maximum
|
||||
#
|
||||
# - moderate: Allow localhost, block private IPs + cloud metadata
|
||||
# Use for: Local development with local n8n instance
|
||||
# Security: Good balance
|
||||
# Example: n8n running on http://localhost:5678 or http://host.docker.internal:5678
|
||||
#
|
||||
# - permissive: Allow localhost + private IPs, block cloud metadata
|
||||
# Use for: Internal network testing, private cloud (NOT for production)
|
||||
# Security: Minimal - use with caution
|
||||
#
|
||||
# Default: strict
|
||||
# WEBHOOK_SECURITY_MODE=strict
|
||||
#
|
||||
# For local development with local n8n:
|
||||
# WEBHOOK_SECURITY_MODE=moderate
|
||||
|
||||
# Disabled Tools Configuration
|
||||
# Filter specific tools from registration at startup
|
||||
# Useful for multi-tenant deployments, security hardening, or feature flags
|
||||
#
|
||||
# Format: Comma-separated list of tool names
|
||||
# Example: DISABLED_TOOLS=n8n_diagnostic,n8n_health_check,custom_tool
|
||||
#
|
||||
# Common use cases:
|
||||
# - Multi-tenant: Hide tools that check env vars instead of instance context
|
||||
# Example: DISABLED_TOOLS=n8n_diagnostic,n8n_health_check
|
||||
# - Security: Disable management tools in production for certain users
|
||||
# - Feature flags: Gradually roll out new tools
|
||||
# - Deployment-specific: Different tool sets for cloud vs self-hosted
|
||||
#
|
||||
# Default: (empty - all tools enabled)
|
||||
# DISABLED_TOOLS=
|
||||
|
||||
# =========================
|
||||
# MULTI-TENANT CONFIGURATION
|
||||
# =========================
|
||||
@@ -132,4 +185,36 @@ ENABLE_MULTI_TENANT=false
|
||||
|
||||
# Enable metadata generation during template fetch (default: false)
|
||||
# Set to true to automatically generate metadata when running fetch:templates
|
||||
# METADATA_GENERATION_ENABLED=false
|
||||
# METADATA_GENERATION_ENABLED=false
|
||||
|
||||
# ========================================
|
||||
# INTEGRATION TESTING CONFIGURATION
|
||||
# ========================================
|
||||
# Configuration for integration tests that call real n8n instance API
|
||||
|
||||
# n8n API Configuration for Integration Tests
|
||||
# For local development: Use your local n8n instance
|
||||
# For CI: These will be provided by GitHub secrets
|
||||
# N8N_API_URL=http://localhost:5678
|
||||
# N8N_API_KEY=
|
||||
|
||||
# Pre-activated Webhook Workflows for Testing
|
||||
# These workflows must be created manually in n8n and activated
|
||||
# because n8n API doesn't support workflow activation.
|
||||
#
|
||||
# Setup Instructions:
|
||||
# 1. Create 4 workflows in n8n UI (one for each HTTP method)
|
||||
# 2. Each workflow should have a single Webhook node
|
||||
# 3. Configure webhook paths: mcp-test-get, mcp-test-post, mcp-test-put, mcp-test-delete
|
||||
# 4. ACTIVATE each workflow in n8n UI
|
||||
# 5. Copy the workflow IDs here
|
||||
#
|
||||
# N8N_TEST_WEBHOOK_GET_ID= # Workflow ID for GET method webhook
|
||||
# N8N_TEST_WEBHOOK_POST_ID= # Workflow ID for POST method webhook
|
||||
# N8N_TEST_WEBHOOK_PUT_ID= # Workflow ID for PUT method webhook
|
||||
# N8N_TEST_WEBHOOK_DELETE_ID= # Workflow ID for DELETE method webhook
|
||||
|
||||
# Test Configuration
|
||||
N8N_TEST_CLEANUP_ENABLED=true # Enable automatic cleanup of test workflows
|
||||
N8N_TEST_TAG=mcp-integration-test # Tag applied to all test workflows
|
||||
N8N_TEST_NAME_PREFIX=[MCP-TEST] # Name prefix for test workflows
|
||||
222
.github/workflows/dependency-check.yml
vendored
Normal file
222
.github/workflows/dependency-check.yml
vendored
Normal file
@@ -0,0 +1,222 @@
|
||||
name: Dependency Compatibility Check
|
||||
|
||||
# This workflow verifies that when users install n8n-mcp via npm (without lockfile),
|
||||
# they get compatible dependency versions. This catches issues like #440, #444, #446, #447, #450
|
||||
# where npm resolution gave users incompatible SDK/Zod versions.
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'package.json'
|
||||
- 'package-lock.json'
|
||||
- '.github/workflows/dependency-check.yml'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'package.json'
|
||||
- 'package-lock.json'
|
||||
- '.github/workflows/dependency-check.yml'
|
||||
# Allow manual trigger for debugging
|
||||
workflow_dispatch:
|
||||
# Run weekly to catch upstream dependency changes
|
||||
schedule:
|
||||
- cron: '0 6 * * 1' # Every Monday at 6 AM UTC
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
fresh-install-check:
|
||||
name: Fresh Install Dependency Check
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Build package
|
||||
run: |
|
||||
npm ci
|
||||
npm run build
|
||||
|
||||
- name: Pack package for testing
|
||||
run: npm pack
|
||||
|
||||
- name: Create fresh install test directory
|
||||
run: |
|
||||
mkdir -p /tmp/fresh-install-test
|
||||
cp n8n-mcp-*.tgz /tmp/fresh-install-test/
|
||||
|
||||
- name: Install package fresh (simulating user install)
|
||||
working-directory: /tmp/fresh-install-test
|
||||
run: |
|
||||
npm init -y
|
||||
# Install from tarball WITHOUT lockfile (simulates npm install n8n-mcp)
|
||||
npm install ./n8n-mcp-*.tgz
|
||||
|
||||
- name: Verify critical dependency versions
|
||||
working-directory: /tmp/fresh-install-test
|
||||
run: |
|
||||
echo "=== Dependency Version Check ==="
|
||||
echo ""
|
||||
|
||||
# Get actual resolved versions
|
||||
SDK_VERSION=$(npm list @modelcontextprotocol/sdk --json 2>/dev/null | jq -r '.dependencies["n8n-mcp"].dependencies["@modelcontextprotocol/sdk"].version // .dependencies["@modelcontextprotocol/sdk"].version // "not found"')
|
||||
ZOD_VERSION=$(npm list zod --json 2>/dev/null | jq -r '.dependencies["n8n-mcp"].dependencies.zod.version // .dependencies.zod.version // "not found"')
|
||||
|
||||
echo "MCP SDK version: $SDK_VERSION"
|
||||
echo "Zod version: $ZOD_VERSION"
|
||||
echo ""
|
||||
|
||||
# Check MCP SDK version - must be exactly 1.20.1
|
||||
if [[ "$SDK_VERSION" == "not found" ]]; then
|
||||
echo "❌ FAILED: Could not determine MCP SDK version!"
|
||||
echo " The dependency may not have been installed correctly."
|
||||
exit 1
|
||||
fi
|
||||
if [[ "$SDK_VERSION" != "1.20.1" ]]; then
|
||||
echo "❌ FAILED: MCP SDK version mismatch!"
|
||||
echo " Expected: 1.20.1"
|
||||
echo " Got: $SDK_VERSION"
|
||||
echo ""
|
||||
echo "This can cause runtime errors. See issues #440, #444, #446, #447, #450"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ MCP SDK version is correct: $SDK_VERSION"
|
||||
|
||||
# Check Zod version - must be 3.x (not 4.x, including pre-releases)
|
||||
if [[ "$ZOD_VERSION" == "not found" ]]; then
|
||||
echo "❌ FAILED: Could not determine Zod version!"
|
||||
echo " The dependency may not have been installed correctly."
|
||||
exit 1
|
||||
fi
|
||||
if [[ "$ZOD_VERSION" =~ ^4\. ]]; then
|
||||
echo "❌ FAILED: Zod v4 detected - incompatible with MCP SDK 1.20.1!"
|
||||
echo " Expected: 3.x"
|
||||
echo " Got: $ZOD_VERSION"
|
||||
echo ""
|
||||
echo "Zod v4 causes '_zod' property errors. See issues #440, #444, #446, #447, #450"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Zod version is compatible: $ZOD_VERSION"
|
||||
|
||||
echo ""
|
||||
echo "=== All dependency checks passed ==="
|
||||
|
||||
- name: Test basic functionality
|
||||
working-directory: /tmp/fresh-install-test
|
||||
run: |
|
||||
echo "=== Basic Functionality Test ==="
|
||||
|
||||
# Create a simple test script
|
||||
cat > test-import.mjs << 'EOF'
|
||||
import { spawn } from 'child_process';
|
||||
import path from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
|
||||
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
||||
|
||||
// Test that the package can be required and basic tools work
|
||||
async function test() {
|
||||
console.log('Testing n8n-mcp package import...');
|
||||
|
||||
// Start the MCP server briefly to verify it initializes
|
||||
const serverPath = path.join(__dirname, 'node_modules/n8n-mcp/dist/mcp/index.js');
|
||||
|
||||
const proc = spawn('node', [serverPath], {
|
||||
env: { ...process.env, MCP_MODE: 'stdio' },
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
|
||||
// Send initialize request
|
||||
const initRequest = JSON.stringify({
|
||||
jsonrpc: '2.0',
|
||||
id: 1,
|
||||
method: 'initialize',
|
||||
params: {
|
||||
protocolVersion: '2024-11-05',
|
||||
capabilities: {},
|
||||
clientInfo: { name: 'test', version: '1.0.0' }
|
||||
}
|
||||
});
|
||||
|
||||
proc.stdin.write(initRequest + '\n');
|
||||
|
||||
// Wait for response or timeout
|
||||
let output = '';
|
||||
let stderrOutput = '';
|
||||
proc.stdout.on('data', (data) => {
|
||||
output += data.toString();
|
||||
});
|
||||
|
||||
proc.stderr.on('data', (data) => {
|
||||
stderrOutput += data.toString();
|
||||
console.error('stderr:', data.toString());
|
||||
});
|
||||
|
||||
// Give it 5 seconds to respond
|
||||
await new Promise((resolve) => setTimeout(resolve, 5000));
|
||||
|
||||
proc.kill();
|
||||
|
||||
// Check for Zod v4 compatibility errors (the bug we're testing for)
|
||||
const allOutput = output + stderrOutput;
|
||||
if (allOutput.includes('_zod') || allOutput.includes('Cannot read properties of undefined')) {
|
||||
console.error('❌ FAILED: Zod compatibility error detected!');
|
||||
console.error('This indicates the SDK/Zod version fix is not working.');
|
||||
console.error('See issues #440, #444, #446, #447, #450');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (output.includes('"result"')) {
|
||||
console.log('✅ MCP server initialized successfully');
|
||||
return true;
|
||||
} else {
|
||||
console.log('Output received:', output.substring(0, 500));
|
||||
// Server might not respond in stdio mode without proper framing
|
||||
// But if we got here without crashing, that's still good
|
||||
console.log('✅ MCP server started without errors');
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
test()
|
||||
.then(() => {
|
||||
console.log('=== Basic functionality test passed ===');
|
||||
process.exit(0);
|
||||
})
|
||||
.catch((err) => {
|
||||
console.error('❌ Test failed:', err.message);
|
||||
process.exit(1);
|
||||
});
|
||||
EOF
|
||||
|
||||
node test-import.mjs
|
||||
|
||||
- name: Generate dependency report
|
||||
if: always()
|
||||
working-directory: /tmp/fresh-install-test
|
||||
run: |
|
||||
echo "=== Full Dependency Tree ===" > dependency-report.txt
|
||||
npm list --all >> dependency-report.txt 2>&1 || true
|
||||
|
||||
echo "" >> dependency-report.txt
|
||||
echo "=== Critical Dependencies ===" >> dependency-report.txt
|
||||
npm list @modelcontextprotocol/sdk zod zod-to-json-schema >> dependency-report.txt 2>&1 || true
|
||||
|
||||
cat dependency-report.txt
|
||||
|
||||
- name: Upload dependency report
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dependency-report-${{ github.run_number }}
|
||||
path: /tmp/fresh-install-test/dependency-report.txt
|
||||
retention-days: 30
|
||||
3
.github/workflows/docker-build-n8n.yml
vendored
3
.github/workflows/docker-build-n8n.yml
vendored
@@ -52,6 +52,9 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
|
||||
82
.github/workflows/docker-build.yml
vendored
82
.github/workflows/docker-build.yml
vendored
@@ -5,8 +5,6 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- 'v*'
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- '**.txt'
|
||||
@@ -38,6 +36,12 @@ on:
|
||||
- 'CODE_OF_CONDUCT.md'
|
||||
workflow_dispatch:
|
||||
|
||||
# Prevent concurrent Docker pushes across all workflows (shared with release.yml)
|
||||
# This ensures docker-build.yml and release.yml never push to 'latest' simultaneously
|
||||
concurrency:
|
||||
group: docker-push-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
@@ -49,13 +53,24 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
lfs: true
|
||||
|
||||
|
||||
- name: Sync runtime version
|
||||
run: |
|
||||
VERSION=$(node -p "require('./package.json').version")
|
||||
node -e "
|
||||
const fs = require('fs');
|
||||
const pkg = JSON.parse(fs.readFileSync('package.runtime.json'));
|
||||
pkg.version = '$VERSION';
|
||||
fs.writeFileSync('package.runtime.json', JSON.stringify(pkg, null, 2) + '\n');
|
||||
"
|
||||
echo "✅ Synced package.runtime.json to version $VERSION"
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
@@ -89,26 +104,75 @@ jobs:
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
no-cache: true
|
||||
no-cache: false
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
provenance: false
|
||||
|
||||
- name: Verify multi-arch manifest for latest tag
|
||||
if: github.event_name != 'pull_request' && github.ref == 'refs/heads/main'
|
||||
run: |
|
||||
echo "Verifying multi-arch manifest for latest tag..."
|
||||
|
||||
# Retry with exponential backoff (registry propagation can take time)
|
||||
MAX_ATTEMPTS=5
|
||||
ATTEMPT=1
|
||||
WAIT_TIME=2
|
||||
|
||||
while [ $ATTEMPT -le $MAX_ATTEMPTS ]; do
|
||||
echo "Attempt $ATTEMPT of $MAX_ATTEMPTS..."
|
||||
|
||||
MANIFEST=$(docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest 2>&1 || true)
|
||||
|
||||
# Check for both platforms
|
||||
if echo "$MANIFEST" | grep -q "linux/amd64" && echo "$MANIFEST" | grep -q "linux/arm64"; then
|
||||
echo "✅ Multi-arch manifest verified: both amd64 and arm64 present"
|
||||
echo "$MANIFEST"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $ATTEMPT -lt $MAX_ATTEMPTS ]; then
|
||||
echo "⏳ Registry still propagating, waiting ${WAIT_TIME}s before retry..."
|
||||
sleep $WAIT_TIME
|
||||
WAIT_TIME=$((WAIT_TIME * 2)) # Exponential backoff: 2s, 4s, 8s, 16s
|
||||
fi
|
||||
|
||||
ATTEMPT=$((ATTEMPT + 1))
|
||||
done
|
||||
|
||||
echo "❌ ERROR: Multi-arch manifest incomplete after $MAX_ATTEMPTS attempts!"
|
||||
echo "$MANIFEST"
|
||||
exit 1
|
||||
|
||||
build-railway:
|
||||
name: Build Railway Docker Image
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
lfs: true
|
||||
|
||||
|
||||
- name: Sync runtime version
|
||||
run: |
|
||||
VERSION=$(node -p "require('./package.json').version")
|
||||
node -e "
|
||||
const fs = require('fs');
|
||||
const pkg = JSON.parse(fs.readFileSync('package.runtime.json'));
|
||||
pkg.version = '$VERSION';
|
||||
fs.writeFileSync('package.runtime.json', JSON.stringify(pkg, null, 2) + '\n');
|
||||
"
|
||||
echo "✅ Synced package.runtime.json to version $VERSION"
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
@@ -143,11 +207,13 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile.railway
|
||||
no-cache: true
|
||||
no-cache: false
|
||||
platforms: linux/amd64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta-railway.outputs.tags }}
|
||||
labels: ${{ steps.meta-railway.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
provenance: false
|
||||
|
||||
# Nginx build commented out until Phase 2
|
||||
|
||||
262
.github/workflows/release.yml
vendored
262
.github/workflows/release.yml
vendored
@@ -13,9 +13,10 @@ permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
# Prevent concurrent releases
|
||||
# Prevent concurrent Docker pushes across all workflows (shared with docker-build.yml)
|
||||
# This ensures release.yml and docker-build.yml never push to 'latest' simultaneously
|
||||
concurrency:
|
||||
group: release
|
||||
group: docker-push-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
@@ -79,53 +80,111 @@ jobs:
|
||||
echo "ℹ️ No version change detected"
|
||||
fi
|
||||
|
||||
extract-changelog:
|
||||
name: Extract Changelog
|
||||
- name: Validate version against npm registry
|
||||
if: steps.check.outputs.changed == 'true'
|
||||
run: |
|
||||
CURRENT_VERSION="${{ steps.check.outputs.version }}"
|
||||
|
||||
# Get latest version from npm (handle package not found)
|
||||
NPM_VERSION=$(npm view n8n-mcp version 2>/dev/null || echo "0.0.0")
|
||||
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
echo "NPM registry version: $NPM_VERSION"
|
||||
|
||||
# Check if version already exists in npm
|
||||
if [ "$CURRENT_VERSION" = "$NPM_VERSION" ]; then
|
||||
echo "❌ Error: Version $CURRENT_VERSION already published to npm"
|
||||
echo "Please bump the version in package.json before releasing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Simple semver comparison (assumes format: major.minor.patch)
|
||||
# Compare if current version is greater than npm version
|
||||
if [ "$NPM_VERSION" != "0.0.0" ]; then
|
||||
# Sort versions and check if current is not the highest
|
||||
HIGHEST=$(printf '%s\n%s' "$NPM_VERSION" "$CURRENT_VERSION" | sort -V | tail -n1)
|
||||
if [ "$HIGHEST" != "$CURRENT_VERSION" ]; then
|
||||
echo "❌ Error: Version $CURRENT_VERSION is not greater than npm version $NPM_VERSION"
|
||||
echo "Please use a higher version number"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "✅ Version $CURRENT_VERSION is valid (higher than npm version $NPM_VERSION)"
|
||||
|
||||
generate-release-notes:
|
||||
name: Generate Release Notes
|
||||
runs-on: ubuntu-latest
|
||||
needs: detect-version-change
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
outputs:
|
||||
release-notes: ${{ steps.extract.outputs.notes }}
|
||||
has-notes: ${{ steps.extract.outputs.has-notes }}
|
||||
release-notes: ${{ steps.generate.outputs.notes }}
|
||||
has-notes: ${{ steps.generate.outputs.has-notes }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Extract changelog for version
|
||||
id: extract
|
||||
with:
|
||||
fetch-depth: 0 # Need full history for git log
|
||||
|
||||
- name: Generate release notes from commits
|
||||
id: generate
|
||||
run: |
|
||||
VERSION="${{ needs.detect-version-change.outputs.new-version }}"
|
||||
CHANGELOG_FILE="docs/CHANGELOG.md"
|
||||
|
||||
if [ ! -f "$CHANGELOG_FILE" ]; then
|
||||
echo "Changelog file not found at $CHANGELOG_FILE"
|
||||
echo "has-notes=false" >> $GITHUB_OUTPUT
|
||||
echo "notes=No changelog entries found for version $VERSION" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Use the extracted changelog script
|
||||
if NOTES=$(node scripts/extract-changelog.js "$VERSION" "$CHANGELOG_FILE" 2>/dev/null); then
|
||||
CURRENT_VERSION="${{ needs.detect-version-change.outputs.new-version }}"
|
||||
CURRENT_TAG="v$CURRENT_VERSION"
|
||||
|
||||
# Get the previous tag (excluding the current tag which doesn't exist yet)
|
||||
PREVIOUS_TAG=$(git tag --sort=-version:refname | grep -v "^$CURRENT_TAG$" | head -1)
|
||||
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
echo "Current tag: $CURRENT_TAG"
|
||||
echo "Previous tag: $PREVIOUS_TAG"
|
||||
|
||||
if [ -z "$PREVIOUS_TAG" ]; then
|
||||
echo "ℹ️ No previous tag found, this might be the first release"
|
||||
|
||||
# Generate initial release notes using script
|
||||
if NOTES=$(node scripts/generate-initial-release-notes.js "$CURRENT_VERSION" 2>/dev/null); then
|
||||
echo "✅ Successfully generated initial release notes for version $CURRENT_VERSION"
|
||||
else
|
||||
echo "⚠️ Could not generate initial release notes for version $CURRENT_VERSION"
|
||||
NOTES="Initial release v$CURRENT_VERSION"
|
||||
fi
|
||||
|
||||
echo "has-notes=true" >> $GITHUB_OUTPUT
|
||||
|
||||
|
||||
# Use heredoc to properly handle multiline content
|
||||
{
|
||||
echo "notes<<EOF"
|
||||
echo "$NOTES"
|
||||
echo "EOF"
|
||||
} >> $GITHUB_OUTPUT
|
||||
|
||||
echo "✅ Successfully extracted changelog for version $VERSION"
|
||||
|
||||
else
|
||||
echo "has-notes=false" >> $GITHUB_OUTPUT
|
||||
echo "notes=No changelog entries found for version $VERSION" >> $GITHUB_OUTPUT
|
||||
echo "⚠️ Could not extract changelog for version $VERSION"
|
||||
echo "✅ Previous tag found: $PREVIOUS_TAG"
|
||||
|
||||
# Generate release notes between tags
|
||||
if NOTES=$(node scripts/generate-release-notes.js "$PREVIOUS_TAG" "HEAD" 2>/dev/null); then
|
||||
echo "has-notes=true" >> $GITHUB_OUTPUT
|
||||
|
||||
# Use heredoc to properly handle multiline content
|
||||
{
|
||||
echo "notes<<EOF"
|
||||
echo "$NOTES"
|
||||
echo "EOF"
|
||||
} >> $GITHUB_OUTPUT
|
||||
|
||||
echo "✅ Successfully generated release notes from $PREVIOUS_TAG to $CURRENT_TAG"
|
||||
else
|
||||
echo "has-notes=false" >> $GITHUB_OUTPUT
|
||||
echo "notes=Failed to generate release notes for version $CURRENT_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "⚠️ Could not generate release notes for version $CURRENT_VERSION"
|
||||
fi
|
||||
fi
|
||||
|
||||
create-release:
|
||||
name: Create GitHub Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [detect-version-change, extract-changelog]
|
||||
needs: [detect-version-change, generate-release-notes]
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
outputs:
|
||||
release-id: ${{ steps.create.outputs.id }}
|
||||
@@ -156,7 +215,7 @@ jobs:
|
||||
cat > release_body.md << 'EOF'
|
||||
# Release v${{ needs.detect-version-change.outputs.new-version }}
|
||||
|
||||
${{ needs.extract-changelog.outputs.release-notes }}
|
||||
${{ needs.generate-release-notes.outputs.release-notes }}
|
||||
|
||||
---
|
||||
|
||||
@@ -206,8 +265,8 @@ jobs:
|
||||
echo "id=$RELEASE_ID" >> $GITHUB_OUTPUT
|
||||
echo "upload_url=https://uploads.github.com/repos/${{ github.repository }}/releases/$RELEASE_ID/assets{?name,label}" >> $GITHUB_OUTPUT
|
||||
|
||||
build-and-test:
|
||||
name: Build and Test
|
||||
build-and-verify:
|
||||
name: Build and Verify
|
||||
runs-on: ubuntu-latest
|
||||
needs: detect-version-change
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
@@ -226,43 +285,55 @@ jobs:
|
||||
|
||||
- name: Build project
|
||||
run: npm run build
|
||||
|
||||
- name: Rebuild database
|
||||
run: npm run rebuild
|
||||
|
||||
- name: Run tests
|
||||
run: npm test
|
||||
env:
|
||||
CI: true
|
||||
|
||||
|
||||
# Database is already built and committed during development
|
||||
# Rebuilding here causes segfault due to memory pressure (exit code 139)
|
||||
- name: Verify database exists
|
||||
run: |
|
||||
if [ ! -f "data/nodes.db" ]; then
|
||||
echo "❌ Error: data/nodes.db not found"
|
||||
echo "Please run 'npm run rebuild' locally and commit the database"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Database exists ($(du -h data/nodes.db | cut -f1))"
|
||||
|
||||
# Skip tests - they already passed in PR before merge
|
||||
# Running them again on the same commit adds no safety, only time (~6-7 min)
|
||||
|
||||
- name: Run type checking
|
||||
run: npm run typecheck
|
||||
|
||||
publish-npm:
|
||||
name: Publish to NPM
|
||||
runs-on: ubuntu-latest
|
||||
needs: [detect-version-change, build-and-test, create-release]
|
||||
needs: [detect-version-change, build-and-verify, create-release]
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
registry-url: 'https://registry.npmjs.org'
|
||||
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Build project
|
||||
run: npm run build
|
||||
|
||||
- name: Rebuild database
|
||||
run: npm run rebuild
|
||||
|
||||
|
||||
# Database is already built and committed during development
|
||||
- name: Verify database exists
|
||||
run: |
|
||||
if [ ! -f "data/nodes.db" ]; then
|
||||
echo "❌ Error: data/nodes.db not found"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Database exists ($(du -h data/nodes.db | cut -f1))"
|
||||
|
||||
- name: Sync runtime version
|
||||
run: npm run sync:runtime-version
|
||||
|
||||
@@ -290,6 +361,15 @@ jobs:
|
||||
const pkg = require('./package.json');
|
||||
pkg.name = 'n8n-mcp';
|
||||
pkg.description = 'Integration between n8n workflow automation and Model Context Protocol (MCP)';
|
||||
pkg.main = 'dist/index.js';
|
||||
pkg.types = 'dist/index.d.ts';
|
||||
pkg.exports = {
|
||||
'.': {
|
||||
types: './dist/index.d.ts',
|
||||
require: './dist/index.js',
|
||||
import: './dist/index.js'
|
||||
}
|
||||
};
|
||||
pkg.bin = { 'n8n-mcp': './dist/mcp/index.js' };
|
||||
pkg.repository = { type: 'git', url: 'git+https://github.com/czlonkowski/n8n-mcp.git' };
|
||||
pkg.keywords = ['n8n', 'mcp', 'model-context-protocol', 'ai', 'workflow', 'automation'];
|
||||
@@ -316,7 +396,7 @@ jobs:
|
||||
npm publish --access public
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
|
||||
- name: Clean up
|
||||
if: always()
|
||||
run: rm -rf npm-publish-temp
|
||||
@@ -324,7 +404,7 @@ jobs:
|
||||
build-docker:
|
||||
name: Build and Push Docker Images
|
||||
runs-on: ubuntu-latest
|
||||
needs: [detect-version-change, build-and-test]
|
||||
needs: [detect-version-change, build-and-verify]
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -347,7 +427,18 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Sufficient disk space: ${AVAILABLE_GB}GB available"
|
||||
|
||||
|
||||
- name: Sync runtime version for Docker
|
||||
run: |
|
||||
VERSION=$(node -p "require('./package.json').version")
|
||||
node -e "
|
||||
const fs = require('fs');
|
||||
const pkg = JSON.parse(fs.readFileSync('package.runtime.json'));
|
||||
pkg.version = '$VERSION';
|
||||
fs.writeFileSync('package.runtime.json', JSON.stringify(pkg, null, 2) + '\n');
|
||||
"
|
||||
echo "✅ Synced package.runtime.json to version $VERSION"
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
@@ -382,7 +473,76 @@ jobs:
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
|
||||
- name: Verify multi-arch manifest for latest tag
|
||||
run: |
|
||||
echo "Verifying multi-arch manifest for latest tag..."
|
||||
|
||||
# Retry with exponential backoff (registry propagation can take time)
|
||||
MAX_ATTEMPTS=5
|
||||
ATTEMPT=1
|
||||
WAIT_TIME=2
|
||||
|
||||
while [ $ATTEMPT -le $MAX_ATTEMPTS ]; do
|
||||
echo "Attempt $ATTEMPT of $MAX_ATTEMPTS..."
|
||||
|
||||
MANIFEST=$(docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest 2>&1 || true)
|
||||
|
||||
# Check for both platforms
|
||||
if echo "$MANIFEST" | grep -q "linux/amd64" && echo "$MANIFEST" | grep -q "linux/arm64"; then
|
||||
echo "✅ Multi-arch manifest verified: both amd64 and arm64 present"
|
||||
echo "$MANIFEST"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $ATTEMPT -lt $MAX_ATTEMPTS ]; then
|
||||
echo "⏳ Registry still propagating, waiting ${WAIT_TIME}s before retry..."
|
||||
sleep $WAIT_TIME
|
||||
WAIT_TIME=$((WAIT_TIME * 2)) # Exponential backoff: 2s, 4s, 8s, 16s
|
||||
fi
|
||||
|
||||
ATTEMPT=$((ATTEMPT + 1))
|
||||
done
|
||||
|
||||
echo "❌ ERROR: Multi-arch manifest incomplete after $MAX_ATTEMPTS attempts!"
|
||||
echo "$MANIFEST"
|
||||
exit 1
|
||||
|
||||
- name: Verify multi-arch manifest for version tag
|
||||
run: |
|
||||
VERSION="${{ needs.detect-version-change.outputs.new-version }}"
|
||||
echo "Verifying multi-arch manifest for version tag :$VERSION (without 'v' prefix)..."
|
||||
|
||||
# Retry with exponential backoff (registry propagation can take time)
|
||||
MAX_ATTEMPTS=5
|
||||
ATTEMPT=1
|
||||
WAIT_TIME=2
|
||||
|
||||
while [ $ATTEMPT -le $MAX_ATTEMPTS ]; do
|
||||
echo "Attempt $ATTEMPT of $MAX_ATTEMPTS..."
|
||||
|
||||
MANIFEST=$(docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:$VERSION 2>&1 || true)
|
||||
|
||||
# Check for both platforms
|
||||
if echo "$MANIFEST" | grep -q "linux/amd64" && echo "$MANIFEST" | grep -q "linux/arm64"; then
|
||||
echo "✅ Multi-arch manifest verified for $VERSION: both amd64 and arm64 present"
|
||||
echo "$MANIFEST"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $ATTEMPT -lt $MAX_ATTEMPTS ]; then
|
||||
echo "⏳ Registry still propagating, waiting ${WAIT_TIME}s before retry..."
|
||||
sleep $WAIT_TIME
|
||||
WAIT_TIME=$((WAIT_TIME * 2)) # Exponential backoff: 2s, 4s, 8s, 16s
|
||||
fi
|
||||
|
||||
ATTEMPT=$((ATTEMPT + 1))
|
||||
done
|
||||
|
||||
echo "❌ ERROR: Multi-arch manifest incomplete for version $VERSION after $MAX_ATTEMPTS attempts!"
|
||||
echo "$MANIFEST"
|
||||
exit 1
|
||||
|
||||
- name: Extract metadata for Railway image
|
||||
id: meta-railway
|
||||
uses: docker/metadata-action@v5
|
||||
|
||||
8
.github/workflows/test.yml
vendored
8
.github/workflows/test.yml
vendored
@@ -40,7 +40,7 @@ permissions:
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10 # Add a 10-minute timeout to prevent hanging
|
||||
timeout-minutes: 15 # Increased from 10 to accommodate larger database with community nodes
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
@@ -72,6 +72,12 @@ jobs:
|
||||
run: npm run test:integration -- --reporter=default --reporter=junit
|
||||
env:
|
||||
CI: true
|
||||
N8N_API_URL: ${{ secrets.N8N_API_URL }}
|
||||
N8N_API_KEY: ${{ secrets.N8N_API_KEY }}
|
||||
N8N_TEST_WEBHOOK_GET_URL: ${{ secrets.N8N_TEST_WEBHOOK_GET_URL }}
|
||||
N8N_TEST_WEBHOOK_POST_URL: ${{ secrets.N8N_TEST_WEBHOOK_POST_URL }}
|
||||
N8N_TEST_WEBHOOK_PUT_URL: ${{ secrets.N8N_TEST_WEBHOOK_PUT_URL }}
|
||||
N8N_TEST_WEBHOOK_DELETE_URL: ${{ secrets.N8N_TEST_WEBHOOK_DELETE_URL }}
|
||||
|
||||
# Generate test summary
|
||||
- name: Generate test summary
|
||||
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -89,6 +89,13 @@ docker-compose.override.yml
|
||||
temp/
|
||||
tmp/
|
||||
|
||||
# Batch processing error files (may contain API tokens from templates)
|
||||
docs/batch_*.jsonl
|
||||
**/batch_*_error.jsonl
|
||||
|
||||
# Local documentation and analysis files
|
||||
docs/local/
|
||||
|
||||
# Database files
|
||||
# Database files - nodes.db is now tracked directly
|
||||
# data/*.db
|
||||
@@ -130,3 +137,6 @@ n8n-mcp-wrapper.sh
|
||||
|
||||
# MCP configuration files
|
||||
.mcp.json
|
||||
|
||||
# Telemetry configuration (user-specific)
|
||||
~/.n8n-mcp/
|
||||
|
||||
209
ANALYSIS_QUICK_REFERENCE.md
Normal file
209
ANALYSIS_QUICK_REFERENCE.md
Normal file
@@ -0,0 +1,209 @@
|
||||
# N8N-MCP Validation Analysis: Quick Reference
|
||||
|
||||
**Analysis Date**: November 8, 2025 | **Data Period**: 90 days | **Sample Size**: 29,218 events
|
||||
|
||||
---
|
||||
|
||||
## The Core Finding
|
||||
|
||||
**Validation is working perfectly. Guidance is the problem.**
|
||||
|
||||
- 29,218 validation events successfully prevented bad deployments
|
||||
- 100% of agents fix errors same-day (proving feedback works)
|
||||
- 12.6% error rate for advanced users (who attempt complex workflows)
|
||||
- High error volume = high usage, not broken system
|
||||
|
||||
---
|
||||
|
||||
## Top 3 Problem Areas (75% of errors)
|
||||
|
||||
| Area | Errors | Root Cause | Quick Fix |
|
||||
|------|--------|-----------|-----------|
|
||||
| **Workflow Structure** | 1,268 (26%) | JSON malformation | Better error messages with examples |
|
||||
| **Connections** | 676 (14%) | Syntax unintuitive | Create connections guide with diagrams |
|
||||
| **Required Fields** | 378 (8%) | Not marked upfront | Add "⚠️ REQUIRED" to tool responses |
|
||||
|
||||
---
|
||||
|
||||
## Problem Nodes (By Frequency)
|
||||
|
||||
```
|
||||
Webhook/Trigger ......... 127 failures (40 users)
|
||||
Slack .................. 73 failures (2 users)
|
||||
AI Agent ............... 36 failures (20 users)
|
||||
HTTP Request ........... 31 failures (13 users)
|
||||
OpenAI ................. 35 failures (8 users)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Top 5 Validation Errors
|
||||
|
||||
1. **"Duplicate node ID: undefined"** (179)
|
||||
- Fix: Point to exact location + show example format
|
||||
|
||||
2. **"Single-node workflows only valid for webhooks"** (58)
|
||||
- Fix: Create webhook guide explaining rule
|
||||
|
||||
3. **"responseNode requires onError: continueRegularOutput"** (57)
|
||||
- Fix: Same guide + inline error context
|
||||
|
||||
4. **"Required property X cannot be empty"** (25)
|
||||
- Fix: Mark required fields before validation
|
||||
|
||||
5. **"Duplicate node name: undefined"** (61)
|
||||
- Fix: Related to structural issues, same solution as #1
|
||||
|
||||
---
|
||||
|
||||
## Success Indicators
|
||||
|
||||
✓ **Agents learn from errors**: 100% same-day correction rate
|
||||
✓ **Validation catches issues**: Prevents bad deployments
|
||||
✓ **Feedback is clear**: Quick fixes show error messages work
|
||||
✓ **No systemic failures**: No "unfixable" errors
|
||||
|
||||
---
|
||||
|
||||
## What Works Well
|
||||
|
||||
- Error messages lead to immediate corrections
|
||||
- Agents retry and succeed same-day
|
||||
- Validation prevents broken workflows
|
||||
- 9,021 users actively using system
|
||||
|
||||
---
|
||||
|
||||
## What Needs Improvement
|
||||
|
||||
1. Required fields not marked in tool responses
|
||||
2. Error messages don't show valid options for enums
|
||||
3. Workflow structure documentation lacks examples
|
||||
4. Connection syntax unintuitive/undocumented
|
||||
5. Some error messages too generic
|
||||
|
||||
---
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1 (2 weeks): Quick Wins
|
||||
- Enhanced error messages (location + example)
|
||||
- Required field markers in tools
|
||||
- Webhook configuration guide
|
||||
- **Expected Impact**: 25-30% failure reduction
|
||||
|
||||
### Phase 2 (2 weeks): Documentation
|
||||
- Enum value suggestions in validation
|
||||
- Workflow connections guide
|
||||
- Error handler configuration guide
|
||||
- AI Agent validation improvements
|
||||
- **Expected Impact**: Additional 15-20% reduction
|
||||
|
||||
### Phase 3 (2 weeks): Advanced Features
|
||||
- Improved search with config hints
|
||||
- Node type fuzzy matching
|
||||
- KPI tracking setup
|
||||
- Test coverage
|
||||
- **Expected Impact**: Additional 10-15% reduction
|
||||
|
||||
**Total Impact**: 50-65% failure reduction (target: 6-7% error rate)
|
||||
|
||||
---
|
||||
|
||||
## Key Metrics
|
||||
|
||||
| Metric | Current | Target | Timeline |
|
||||
|--------|---------|--------|----------|
|
||||
| Validation failure rate | 12.6% | 6-7% | 6 weeks |
|
||||
| First-attempt success | ~77% | 85%+ | 6 weeks |
|
||||
| Retry success | 100% | 100% | N/A |
|
||||
| Webhook failures | 127 | <30 | Week 2 |
|
||||
| Connection errors | 676 | <270 | Week 4 |
|
||||
|
||||
---
|
||||
|
||||
## Files Delivered
|
||||
|
||||
1. **VALIDATION_ANALYSIS_REPORT.md** (27KB)
|
||||
- Complete analysis with 16 SQL queries
|
||||
- Detailed findings by category
|
||||
- 8 actionable recommendations
|
||||
|
||||
2. **VALIDATION_ANALYSIS_SUMMARY.md** (13KB)
|
||||
- Executive summary (one-page)
|
||||
- Key metrics scorecard
|
||||
- Top recommendations with ROI
|
||||
|
||||
3. **IMPLEMENTATION_ROADMAP.md** (4.3KB)
|
||||
- 6-week implementation plan
|
||||
- Phase-by-phase breakdown
|
||||
- Code locations and effort estimates
|
||||
|
||||
4. **ANALYSIS_QUICK_REFERENCE.md** (this file)
|
||||
- Quick lookup reference
|
||||
- Top problems at a glance
|
||||
- Decision-making summary
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **Week 1**: Review analysis + get team approval
|
||||
2. **Week 2**: Start Phase 1 (error messages + markers)
|
||||
3. **Week 4**: Deploy Phase 1 + start Phase 2
|
||||
4. **Week 6**: Deploy Phase 2 + start Phase 3
|
||||
5. **Week 8**: Deploy Phase 3 + measure impact
|
||||
6. **Week 9+**: Monitor KPIs + iterate
|
||||
|
||||
---
|
||||
|
||||
## Key Recommendations Priority
|
||||
|
||||
### HIGH (Do First - Week 1-2)
|
||||
1. Enhance structure error messages
|
||||
2. Add required field markers to tools
|
||||
3. Create webhook configuration guide
|
||||
|
||||
### MEDIUM (Do Next - Week 3-4)
|
||||
4. Add enum suggestions to validation responses
|
||||
5. Create workflow connections guide
|
||||
6. Add AI Agent node validation
|
||||
|
||||
### LOW (Do Later - Week 5-6)
|
||||
7. Enhance search with config hints
|
||||
8. Build fuzzy node matcher
|
||||
9. Setup KPI tracking
|
||||
|
||||
---
|
||||
|
||||
## Discussion Points
|
||||
|
||||
**Q: Why don't we just weaken validation?**
|
||||
A: Validation prevents 29,218 bad deployments. That's its job. We improve guidance instead.
|
||||
|
||||
**Q: Are agents really learning from errors?**
|
||||
A: Yes, 100% same-day recovery across 661 user-date pairs with errors.
|
||||
|
||||
**Q: Why do documentation readers have higher error rates?**
|
||||
A: They attempt more complex workflows (6.8x more attempts). Success rate is still 87.4%.
|
||||
|
||||
**Q: Which node needs the most help?**
|
||||
A: Webhook/Trigger configuration (127 failures). Most urgent fix.
|
||||
|
||||
**Q: Can we hit 50% reduction in 6 weeks?**
|
||||
A: Yes, analysis shows 50-65% reduction is achievable with these changes.
|
||||
|
||||
---
|
||||
|
||||
## Contact & Questions
|
||||
|
||||
For detailed information:
|
||||
- Full analysis: `VALIDATION_ANALYSIS_REPORT.md`
|
||||
- Executive summary: `VALIDATION_ANALYSIS_SUMMARY.md`
|
||||
- Implementation plan: `IMPLEMENTATION_ROADMAP.md`
|
||||
|
||||
---
|
||||
|
||||
**Report Status**: Complete and Ready for Action
|
||||
**Confidence Level**: High (9,021 users, 29,218 events, comprehensive analysis)
|
||||
**Generated**: November 8, 2025
|
||||
229
CHANGELOG.md
Normal file
229
CHANGELOG.md
Normal file
@@ -0,0 +1,229 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [2.33.0] - 2026-01-08
|
||||
|
||||
### Added
|
||||
|
||||
**AI-Powered Documentation for Community Nodes**
|
||||
|
||||
Added AI-generated documentation summaries for 537 community nodes, making them accessible through the MCP `get_node` tool.
|
||||
|
||||
**Features:**
|
||||
- **README Fetching**: Automatically fetches README content from npm registry for all community nodes
|
||||
- **AI Summary Generation**: Uses local LLM (Qwen or compatible) to generate structured documentation summaries
|
||||
- **MCP Integration**: AI summaries exposed in `get_node` with `mode='docs'`
|
||||
|
||||
**AI Documentation Structure:**
|
||||
```json
|
||||
{
|
||||
"aiDocumentationSummary": {
|
||||
"purpose": "What this node does",
|
||||
"capabilities": ["key features"],
|
||||
"authentication": "API key, OAuth, etc.",
|
||||
"commonUseCases": ["practical examples"],
|
||||
"limitations": ["known caveats"],
|
||||
"relatedNodes": ["related n8n nodes"]
|
||||
},
|
||||
"aiSummaryGeneratedAt": "2026-01-08T10:45:31.000Z"
|
||||
}
|
||||
```
|
||||
|
||||
**New CLI Commands:**
|
||||
```bash
|
||||
npm run generate:docs # Full generation (README + AI summary)
|
||||
npm run generate:docs:readme-only # Only fetch READMEs from npm
|
||||
npm run generate:docs:summary-only # Only generate AI summaries
|
||||
npm run generate:docs:incremental # Skip nodes with existing data
|
||||
npm run generate:docs:stats # Show documentation statistics
|
||||
npm run migrate:readme-columns # Migrate database schema
|
||||
```
|
||||
|
||||
**Environment Variables:**
|
||||
```bash
|
||||
N8N_MCP_LLM_BASE_URL=http://localhost:1234/v1 # LLM server URL
|
||||
N8N_MCP_LLM_MODEL=qwen3-4b-thinking-2507 # Model name
|
||||
N8N_MCP_LLM_TIMEOUT=60000 # Request timeout
|
||||
```
|
||||
|
||||
**Files Added:**
|
||||
- `src/community/documentation-generator.ts` - LLM integration with Zod validation
|
||||
- `src/community/documentation-batch-processor.ts` - Batch processing with progress tracking
|
||||
- `src/scripts/generate-community-docs.ts` - CLI entry point
|
||||
- `src/scripts/migrate-readme-columns.ts` - Database migration script
|
||||
|
||||
**Files Modified:**
|
||||
- `src/database/schema.sql` - Added `npm_readme`, `ai_documentation_summary`, `ai_summary_generated_at` columns
|
||||
- `src/database/node-repository.ts` - Added AI documentation methods and fields
|
||||
- `src/community/community-node-fetcher.ts` - Added `fetchPackageWithReadme()` and batch fetching
|
||||
- `src/community/index.ts` - Exported new classes
|
||||
- `src/mcp/server.ts` - Added AI documentation to `get_node` docs mode response
|
||||
|
||||
**Statistics:**
|
||||
- 538/547 community nodes have README content
|
||||
- 537/547 community nodes have AI-generated summaries
|
||||
|
||||
## [2.32.1] - 2026-01-08
|
||||
|
||||
### Fixed
|
||||
|
||||
- **Community node case sensitivity bug**: Fixed `extractNodeNameFromPackage` to use lowercase node names, matching n8n's community node convention (e.g., `chatwoot` instead of `Chatwoot`). This resolves validation failures for community nodes with incorrect casing.
|
||||
- **Case-insensitive node lookup**: Added fallback in `getNode` to handle case differences between stored and requested node types for better robustness.
|
||||
|
||||
## [2.32.0] - 2026-01-07
|
||||
|
||||
### Added
|
||||
|
||||
**Community Nodes Support (Issues #23, #490)**
|
||||
|
||||
Added comprehensive support for n8n community nodes, expanding the node database from 537 core nodes to 1,084 total nodes (537 core + 547 community).
|
||||
|
||||
**New Features:**
|
||||
- **547 community nodes** indexed (301 verified + 246 popular npm packages)
|
||||
- **`source` filter** for `search_nodes`: Filter by `all`, `core`, `community`, or `verified`
|
||||
- **Community metadata** in search results: `isCommunity`, `isVerified`, `authorName`, `npmDownloads`
|
||||
- **Full schema support** for verified community nodes (no additional parsing needed)
|
||||
|
||||
**Data Sources:**
|
||||
- Verified nodes fetched from n8n Strapi API (`api.n8n.io/api/community-nodes`)
|
||||
- Popular npm packages from npm registry (keyword: `n8n-community-node-package`)
|
||||
|
||||
**New CLI Commands:**
|
||||
```bash
|
||||
npm run fetch:community # Full rebuild (verified + top 100 npm)
|
||||
npm run fetch:community:verified # Verified nodes only (fast)
|
||||
npm run fetch:community:update # Incremental update (skip existing)
|
||||
```
|
||||
|
||||
**Example Usage:**
|
||||
```javascript
|
||||
// Search only community nodes
|
||||
search_nodes({query: "scraping", source: "community"})
|
||||
|
||||
// Search verified community nodes
|
||||
search_nodes({query: "pdf", source: "verified"})
|
||||
|
||||
// Results include community metadata
|
||||
{
|
||||
nodeType: "n8n-nodes-brightdata.brightData",
|
||||
displayName: "BrightData",
|
||||
isCommunity: true,
|
||||
isVerified: true,
|
||||
authorName: "brightdata.com",
|
||||
npmDownloads: 1234
|
||||
}
|
||||
```
|
||||
|
||||
**Files Added:**
|
||||
- `src/community/community-node-service.ts` - Business logic for syncing community nodes
|
||||
- `src/community/community-node-fetcher.ts` - API integration for Strapi and npm
|
||||
- `src/scripts/fetch-community-nodes.ts` - CLI script for fetching community nodes
|
||||
|
||||
**Files Modified:**
|
||||
- `src/database/schema.sql` - Added community columns and indexes
|
||||
- `src/database/node-repository.ts` - Extended for community node fields
|
||||
- `src/mcp/tools.ts` - Added `source` parameter to `search_nodes`
|
||||
- `src/mcp/server.ts` - Added source filtering and community metadata to results
|
||||
- `src/mcp/tool-docs/discovery/search-nodes.ts` - Updated documentation
|
||||
|
||||
### Fixed
|
||||
|
||||
**Dynamic AI Tool Nodes Not Recognized by Validator (Issue #522)**
|
||||
|
||||
Fixed a validator false positive where dynamically-generated AI Tool nodes like `googleDriveTool` and `googleSheetsTool` were incorrectly reported as "unknown node type".
|
||||
|
||||
**Root Cause:** n8n creates Tool variants at runtime when ANY node is connected to an AI Agent's tool slot (e.g., `googleDrive` → `googleDriveTool`). These dynamic nodes don't exist in npm packages, so the MCP database couldn't discover them during rebuild.
|
||||
|
||||
**Solution:** Added validation-time inference that checks if the base node exists when a `*Tool` node type is not found. If the base node exists, the Tool variant is treated as valid with an informative warning.
|
||||
|
||||
**Changes:**
|
||||
- `workflow-validator.ts`: Added inference logic for dynamic Tool variants
|
||||
- `node-similarity-service.ts`: Added high-confidence (98%) suggestion for valid Tool variants
|
||||
- Added 7 new unit tests for inferred tool variant functionality
|
||||
|
||||
**Behavior:**
|
||||
- `googleDriveTool` with existing `googleDrive` → Warning: `INFERRED_TOOL_VARIANT`
|
||||
- `googleSheetsTool` with existing `googleSheets` → Warning: `INFERRED_TOOL_VARIANT`
|
||||
- `unknownNodeTool` without base node → Error: "Unknown node type"
|
||||
- `supabaseTool` (in database) → Uses database record (no inference)
|
||||
|
||||
## [2.31.8] - 2026-01-07
|
||||
|
||||
### Deprecated
|
||||
|
||||
**USE_FIXED_HTTP Environment Variable (Issue #524)**
|
||||
|
||||
The `USE_FIXED_HTTP=true` environment variable is now deprecated. The fixed HTTP implementation does not support SSE (Server-Sent Events) streaming required by clients like OpenAI Codex.
|
||||
|
||||
**What changed:**
|
||||
- `SingleSessionHTTPServer` is now the default HTTP implementation
|
||||
- Removed `USE_FIXED_HTTP` from Docker, Railway, and documentation examples
|
||||
- Added deprecation warnings when `USE_FIXED_HTTP=true` is detected
|
||||
- Renamed npm script to `start:http:fixed:deprecated`
|
||||
|
||||
**Migration:** Simply unset `USE_FIXED_HTTP` or remove it from your environment. The `SingleSessionHTTPServer` supports both JSON-RPC and SSE streaming automatically.
|
||||
|
||||
**Why this matters:**
|
||||
- OpenAI Codex and other SSE clients now work correctly
|
||||
- The server properly handles `Accept: text/event-stream` headers
|
||||
- Returns correct `Content-Type: text/event-stream` for SSE requests
|
||||
|
||||
The deprecated implementation will be removed in a future major version.
|
||||
|
||||
## [2.31.7] - 2026-01-06
|
||||
|
||||
### Changed
|
||||
|
||||
- Updated n8n from 2.1.5 to 2.2.3
|
||||
- Updated n8n-core from 2.1.4 to 2.2.2
|
||||
- Updated n8n-workflow from 2.1.1 to 2.2.2
|
||||
- Updated @n8n/n8n-nodes-langchain from 2.1.4 to 2.2.2
|
||||
- Rebuilt node database with 540 nodes (434 from n8n-nodes-base, 106 from @n8n/n8n-nodes-langchain)
|
||||
|
||||
## [2.31.6] - 2026-01-03
|
||||
|
||||
### Changed
|
||||
|
||||
**Dependencies Update**
|
||||
|
||||
- Updated n8n from 2.1.4 to 2.1.5
|
||||
- Updated n8n-core from 2.1.3 to 2.1.4
|
||||
- Updated @n8n/n8n-nodes-langchain from 2.1.3 to 2.1.4
|
||||
- Rebuilt node database with 540 nodes (434 from n8n-nodes-base, 106 from @n8n/n8n-nodes-langchain)
|
||||
|
||||
## [2.31.5] - 2026-01-02
|
||||
|
||||
### Added
|
||||
|
||||
**MCP Tool Annotations (PR #512)**
|
||||
|
||||
Added MCP tool annotations to all 20 tools following the [MCP specification](https://spec.modelcontextprotocol.io/specification/2025-03-26/server/tools/#annotations). These annotations help AI assistants understand tool behavior and capabilities.
|
||||
|
||||
**Annotations added:**
|
||||
- `title`: Human-readable name for each tool
|
||||
- `readOnlyHint`: True for tools that don't modify state (11 tools)
|
||||
- `destructiveHint`: True for delete operations (3 tools)
|
||||
- `idempotentHint`: True for operations that produce same result when called repeatedly (14 tools)
|
||||
- `openWorldHint`: True for tools accessing external n8n API (13 tools)
|
||||
|
||||
**Documentation tools** (7): All marked `readOnlyHint=true`, `idempotentHint=true`
|
||||
- `tools_documentation`, `search_nodes`, `get_node`, `validate_node`, `get_template`, `search_templates`, `validate_workflow`
|
||||
|
||||
**Management tools** (13): All marked `openWorldHint=true`
|
||||
- Read-only: `n8n_get_workflow`, `n8n_list_workflows`, `n8n_validate_workflow`, `n8n_health_check`
|
||||
- Idempotent updates: `n8n_update_full_workflow`, `n8n_update_partial_workflow`, `n8n_autofix_workflow`
|
||||
- Destructive: `n8n_delete_workflow`, `n8n_executions` (delete action), `n8n_workflow_versions` (delete/truncate)
|
||||
|
||||
## [2.31.4] - 2026-01-02
|
||||
|
||||
### Fixed
|
||||
|
||||
**Workflow Data Mangled During Serialization: snake_case Conversion (Issue #517)**
|
||||
|
||||
Fixed a critical bug where workflow mutation data was corrupted during serialization to Supabase, making 98.9% of collected workflow data invalid for n8n API operations.
|
||||
44
CLAUDE.md
44
CLAUDE.md
@@ -28,8 +28,15 @@ src/
|
||||
│ ├── enhanced-config-validator.ts # Operation-aware validation (NEW in v2.4.2)
|
||||
│ ├── node-specific-validators.ts # Node-specific validation logic (NEW in v2.4.2)
|
||||
│ ├── property-dependencies.ts # Dependency analysis (NEW in v2.4)
|
||||
│ ├── type-structure-service.ts # Type structure validation (NEW in v2.22.21)
|
||||
│ ├── expression-validator.ts # n8n expression syntax validation (NEW in v2.5.0)
|
||||
│ └── workflow-validator.ts # Complete workflow validation (NEW in v2.5.0)
|
||||
├── types/
|
||||
│ ├── type-structures.ts # Type structure definitions (NEW in v2.22.21)
|
||||
│ ├── instance-context.ts # Multi-tenant instance configuration
|
||||
│ └── session-state.ts # Session persistence types (NEW in v2.24.1)
|
||||
├── constants/
|
||||
│ └── type-structures.ts # 22 complete type structures (NEW in v2.22.21)
|
||||
├── templates/
|
||||
│ ├── template-fetcher.ts # Fetches templates from n8n.io API (NEW in v2.4.1)
|
||||
│ ├── template-repository.ts # Template database operations (NEW in v2.4.1)
|
||||
@@ -40,6 +47,7 @@ src/
|
||||
│ ├── test-nodes.ts # Critical node tests
|
||||
│ ├── test-essentials.ts # Test new essentials tools (NEW in v2.4)
|
||||
│ ├── test-enhanced-validation.ts # Test enhanced validation (NEW in v2.4.2)
|
||||
│ ├── test-structure-validation.ts # Test type structure validation (NEW in v2.22.21)
|
||||
│ ├── test-workflow-validation.ts # Test workflow validation (NEW in v2.5.0)
|
||||
│ ├── test-ai-workflow-validation.ts # Test AI workflow validation (NEW in v2.5.1)
|
||||
│ ├── test-mcp-tools.ts # Test MCP tool enhancements (NEW in v2.5.1)
|
||||
@@ -58,7 +66,9 @@ src/
|
||||
│ ├── console-manager.ts # Console output isolation (NEW in v2.3.1)
|
||||
│ └── logger.ts # Logging utility with HTTP awareness
|
||||
├── http-server-single-session.ts # Single-session HTTP server (NEW in v2.3.1)
|
||||
│ # Session persistence API (NEW in v2.24.1)
|
||||
├── mcp-engine.ts # Clean API for service integration (NEW in v2.3.1)
|
||||
│ # Session persistence wrappers (NEW in v2.24.1)
|
||||
└── index.ts # Library exports
|
||||
```
|
||||
|
||||
@@ -76,6 +86,7 @@ npm run test:unit # Run unit tests only
|
||||
npm run test:integration # Run integration tests
|
||||
npm run test:coverage # Run tests with coverage report
|
||||
npm run test:watch # Run tests in watch mode
|
||||
npm run test:structure-validation # Test type structure validation (Phase 3)
|
||||
|
||||
# Run a single test file
|
||||
npm test -- tests/unit/services/property-filter.test.ts
|
||||
@@ -126,6 +137,7 @@ npm run test:templates # Test template functionality
|
||||
4. **Service Layer** (`services/`)
|
||||
- **Property Filter**: Reduces node properties to AI-friendly essentials
|
||||
- **Config Validator**: Multi-profile validation system
|
||||
- **Type Structure Service**: Validates complex type structures (filter, resourceMapper, etc.)
|
||||
- **Expression Validator**: Validates n8n expression syntax
|
||||
- **Workflow Validator**: Complete workflow structure validation
|
||||
|
||||
@@ -183,6 +195,35 @@ The MCP server exposes tools in several categories:
|
||||
### Development Best Practices
|
||||
- Run typecheck and lint after every code change
|
||||
|
||||
### Session Persistence Feature (v2.24.1)
|
||||
|
||||
**Location:**
|
||||
- Types: `src/types/session-state.ts`
|
||||
- Implementation: `src/http-server-single-session.ts` (lines 698-702, 1444-1584)
|
||||
- Wrapper: `src/mcp-engine.ts` (lines 123-169)
|
||||
- Tests: `tests/unit/http-server/session-persistence.test.ts`, `tests/unit/mcp-engine/session-persistence.test.ts`
|
||||
|
||||
**Key Features:**
|
||||
- **Export/Restore API**: `exportSessionState()` and `restoreSessionState()` methods
|
||||
- **Multi-tenant support**: Enables zero-downtime deployments for SaaS platforms
|
||||
- **Security-first**: API keys exported as plaintext - downstream MUST encrypt
|
||||
- **Dormant sessions**: Restored sessions recreate transports on first request
|
||||
- **Automatic expiration**: Respects `sessionTimeout` setting (default 30 min)
|
||||
- **MAX_SESSIONS limit**: Caps at 100 concurrent sessions (configurable via N8N_MCP_MAX_SESSIONS env var)
|
||||
|
||||
**Important Implementation Notes:**
|
||||
- Only exports sessions with valid n8nApiUrl and n8nApiKey in context
|
||||
- Skips expired sessions during both export and restore
|
||||
- Uses `validateInstanceContext()` for data integrity checks
|
||||
- Handles null/invalid session gracefully with warnings
|
||||
- Session metadata (timestamps) and context (credentials) are persisted
|
||||
- Transport and server objects are NOT persisted (recreated on-demand)
|
||||
|
||||
**Testing:**
|
||||
- 22 unit tests covering export, restore, edge cases, and round-trip cycles
|
||||
- Tests use current timestamps to avoid expiration issues
|
||||
- Integration with multi-tenant backends documented in README.md
|
||||
|
||||
# important-instruction-reminders
|
||||
Do what has been asked; nothing more, nothing less.
|
||||
NEVER create files unless they're absolutely necessary for achieving your goal.
|
||||
@@ -192,4 +233,5 @@ NEVER proactively create documentation files (*.md) or README files. Only create
|
||||
- When the user asks to review issues, you should use GH CLI to get the issue and all the comments
|
||||
- When the task can be divided into separated subtasks, you should spawn separate sub-agents to handle them in paralel
|
||||
- Use the best sub-agent for the task as per their descriptions
|
||||
- Do not use hyperbolic or dramatic language in comments and documentation
|
||||
- Do not use hyperbolic or dramatic language in comments and documentation
|
||||
- Add to every commit and PR: Concieved by Romuald Członkowski - and then link to www.aiadvisors.pl/en. Don't add it in conversations
|
||||
20
Dockerfile
20
Dockerfile
@@ -13,9 +13,9 @@ COPY tsconfig*.json ./
|
||||
RUN --mount=type=cache,target=/root/.npm \
|
||||
echo '{}' > package.json && \
|
||||
npm install --no-save typescript@^5.8.3 @types/node@^22.15.30 @types/express@^5.0.3 \
|
||||
@modelcontextprotocol/sdk@^1.12.1 dotenv@^16.5.0 express@^5.1.0 axios@^1.10.0 \
|
||||
@modelcontextprotocol/sdk@1.20.1 dotenv@^16.5.0 express@^5.1.0 axios@^1.10.0 \
|
||||
n8n-workflow@^1.96.0 uuid@^11.0.5 @types/uuid@^10.0.0 \
|
||||
openai@^4.77.0 zod@^3.24.1 lru-cache@^11.2.1
|
||||
openai@^4.77.0 zod@3.24.1 lru-cache@^11.2.1 @supabase/supabase-js@^2.57.4
|
||||
|
||||
# Copy source and build
|
||||
COPY src ./src
|
||||
@@ -34,9 +34,13 @@ RUN apk add --no-cache curl su-exec && \
|
||||
# Copy runtime-only package.json
|
||||
COPY package.runtime.json package.json
|
||||
|
||||
# Install runtime dependencies with cache mount
|
||||
# Install runtime dependencies with better-sqlite3 compilation
|
||||
# Build tools (python3, make, g++) are installed, used for compilation, then removed
|
||||
# This enables native SQLite (better-sqlite3) instead of sql.js, preventing memory leaks
|
||||
RUN --mount=type=cache,target=/root/.npm \
|
||||
npm install --production --no-audit --no-fund
|
||||
apk add --no-cache python3 make g++ && \
|
||||
npm install --production --no-audit --no-fund && \
|
||||
apk del python3 make g++
|
||||
|
||||
# Copy built application
|
||||
COPY --from=builder /app/dist ./dist
|
||||
@@ -74,7 +78,11 @@ USER nodejs
|
||||
# Set Docker environment flag
|
||||
ENV IS_DOCKER=true
|
||||
|
||||
# Expose HTTP port
|
||||
# Telemetry: Anonymous usage statistics are ENABLED by default
|
||||
# To opt-out, uncomment the following line:
|
||||
# ENV N8N_MCP_TELEMETRY_DISABLED=true
|
||||
|
||||
# Expose HTTP port (default 3000, configurable via PORT environment variable at runtime)
|
||||
EXPOSE 3000
|
||||
|
||||
# Set stop signal to SIGTERM (default, but explicit is better)
|
||||
@@ -82,7 +90,7 @@ STOPSIGNAL SIGTERM
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://127.0.0.1:3000/health || exit 1
|
||||
CMD sh -c 'curl -f http://127.0.0.1:${PORT:-3000}/health || exit 1'
|
||||
|
||||
# Optimized entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
|
||||
|
||||
@@ -25,16 +25,20 @@ RUN npm run build
|
||||
FROM node:22-alpine AS runtime
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
RUN apk add --no-cache curl python3 make g++ && \
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache curl && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
# Copy runtime-only package.json
|
||||
COPY package.runtime.json package.json
|
||||
|
||||
# Install only production dependencies
|
||||
RUN npm install --production --no-audit --no-fund && \
|
||||
npm cache clean --force
|
||||
# Install production dependencies with temporary build tools
|
||||
# Build tools (python3, make, g++) enable better-sqlite3 compilation (native SQLite)
|
||||
# They are removed after installation to reduce image size and attack surface
|
||||
RUN apk add --no-cache python3 make g++ && \
|
||||
npm install --production --no-audit --no-fund && \
|
||||
npm cache clean --force && \
|
||||
apk del python3 make g++
|
||||
|
||||
# Copy built application from builder stage
|
||||
COPY --from=builder /app/dist ./dist
|
||||
@@ -70,7 +74,8 @@ ENV AUTH_TOKEN="REPLACE_THIS_AUTH_TOKEN_32_CHARS_MIN_abcdefgh"
|
||||
ENV NODE_ENV=production
|
||||
ENV IS_DOCKER=true
|
||||
ENV MCP_MODE=http
|
||||
ENV USE_FIXED_HTTP=true
|
||||
# NOTE: USE_FIXED_HTTP is deprecated. SingleSessionHTTPServer is now the default.
|
||||
# See: https://github.com/czlonkowski/n8n-mcp/issues/524
|
||||
ENV LOG_LEVEL=info
|
||||
ENV TRUST_PROXY=1
|
||||
ENV HOST=0.0.0.0
|
||||
|
||||
@@ -1,5 +1,87 @@
|
||||
# n8n Update Process - Quick Reference
|
||||
|
||||
## ⚡ Recommended Fast Workflow (2025-11-04)
|
||||
|
||||
**CRITICAL FIRST STEP**: Check existing releases to avoid version conflicts!
|
||||
|
||||
```bash
|
||||
# 1. CHECK EXISTING RELEASES FIRST (prevents version conflicts!)
|
||||
gh release list | head -5
|
||||
# Look at the latest version - your new version must be higher!
|
||||
|
||||
# 2. Switch to main and pull
|
||||
git checkout main && git pull
|
||||
|
||||
# 3. Check for updates (dry run)
|
||||
npm run update:n8n:check
|
||||
|
||||
# 4. Run update and skip tests (we'll test in CI)
|
||||
yes y | npm run update:n8n
|
||||
|
||||
# 5. Create feature branch
|
||||
git checkout -b update/n8n-X.X.X
|
||||
|
||||
# 6. Update version in package.json (must be HIGHER than latest release!)
|
||||
# Edit: "version": "2.XX.X" (not the version from the release list!)
|
||||
|
||||
# 7. Update CHANGELOG.md
|
||||
# - Change version number to match package.json
|
||||
# - Update date to today
|
||||
# - Update dependency versions
|
||||
|
||||
# 8. Update README badge
|
||||
# Edit line 8: Change n8n version badge to new n8n version
|
||||
|
||||
# 9. Commit and push
|
||||
git add -A
|
||||
git commit -m "chore: update n8n to X.X.X and bump version to 2.XX.X
|
||||
|
||||
- Updated n8n from X.X.X to X.X.X
|
||||
- Updated n8n-core from X.X.X to X.X.X
|
||||
- Updated n8n-workflow from X.X.X to X.X.X
|
||||
- Updated @n8n/n8n-nodes-langchain from X.X.X to X.X.X
|
||||
- Rebuilt node database with XXX nodes (XXX from n8n-nodes-base, XXX from @n8n/n8n-nodes-langchain)
|
||||
- Updated README badge with new n8n version
|
||||
- Updated CHANGELOG with dependency changes
|
||||
|
||||
Conceived by Romuald Członkowski - https://www.aiadvisors.pl/en
|
||||
|
||||
🤖 Generated with [Claude Code](https://claude.com/claude-code)
|
||||
|
||||
Co-Authored-By: Claude <noreply@anthropic.com>"
|
||||
|
||||
git push -u origin update/n8n-X.X.X
|
||||
|
||||
# 10. Create PR
|
||||
gh pr create --title "chore: update n8n to X.X.X" --body "Updates n8n and all related dependencies to the latest versions..."
|
||||
|
||||
# 11. After PR is merged, verify release triggered
|
||||
gh release list | head -1
|
||||
# If the new version appears, you're done!
|
||||
# If not, the version might have already been released - bump version again and create new PR
|
||||
```
|
||||
|
||||
### Why This Workflow?
|
||||
|
||||
✅ **Fast**: Skip local tests (2-3 min saved) - CI runs them anyway
|
||||
✅ **Safe**: Unit tests in CI verify compatibility
|
||||
✅ **Clean**: All changes in one PR with proper tracking
|
||||
✅ **Automatic**: Release workflow triggers on merge if version is new
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Problem**: Release workflow doesn't trigger after merge
|
||||
**Cause**: Version number was already released (check `gh release list`)
|
||||
**Solution**: Create new PR bumping version by one patch number
|
||||
|
||||
**Problem**: Integration tests fail in CI with "unauthorized"
|
||||
**Cause**: n8n test instance credentials expired (infrastructure issue)
|
||||
**Solution**: Ignore if unit tests pass - this is not a code problem
|
||||
|
||||
**Problem**: CI takes 8+ minutes
|
||||
**Reason**: Integration tests need live n8n instance (slow)
|
||||
**Normal**: Unit tests (~2 min) + integration tests (~6 min) = ~8 min total
|
||||
|
||||
## Quick One-Command Update
|
||||
|
||||
For a complete update with tests and publish preparation:
|
||||
@@ -99,12 +181,14 @@ This command:
|
||||
|
||||
## Important Notes
|
||||
|
||||
1. **Always run on main branch** - Make sure you're on main and it's clean
|
||||
2. **The update script is smart** - It automatically syncs all n8n dependencies to compatible versions
|
||||
3. **Tests are required** - The publish script now runs tests automatically
|
||||
4. **Database rebuild is automatic** - The update script handles this for you
|
||||
5. **Template sanitization is automatic** - Any API tokens in workflow templates are replaced with placeholders
|
||||
6. **Docker image builds automatically** - Pushing to GitHub triggers the workflow
|
||||
1. **ALWAYS check existing releases first** - Use `gh release list` to see what versions are already released. Your new version must be higher!
|
||||
2. **Release workflow only triggers on version CHANGE** - If you merge a PR with an already-released version (e.g., 2.22.8), the workflow won't run. You'll need to bump to a new version (e.g., 2.22.9) and create another PR.
|
||||
3. **Integration test failures in CI are usually infrastructure issues** - If unit tests pass but integration tests fail with "unauthorized", this is typically because the test n8n instance credentials need updating. The code itself is fine.
|
||||
4. **Skip local tests - let CI handle them** - Running tests locally adds 2-3 minutes with no benefit since CI runs them anyway. The fast workflow skips local tests.
|
||||
5. **The update script is smart** - It automatically syncs all n8n dependencies to compatible versions
|
||||
6. **Database rebuild is automatic** - The update script handles this for you
|
||||
7. **Template sanitization is automatic** - Any API tokens in workflow templates are replaced with placeholders
|
||||
8. **Docker image builds automatically** - Pushing to GitHub triggers the workflow
|
||||
|
||||
## GitHub Push Protection
|
||||
|
||||
@@ -115,11 +199,27 @@ As of July 2025, GitHub's push protection may block database pushes if they cont
|
||||
3. If push is still blocked, use the GitHub web interface to review and allow the push
|
||||
|
||||
## Time Estimate
|
||||
|
||||
### Fast Workflow (Recommended)
|
||||
- Local work: ~2-3 minutes
|
||||
- npm install and database rebuild: ~2-3 minutes
|
||||
- File edits (CHANGELOG, README, package.json): ~30 seconds
|
||||
- Git operations (commit, push, create PR): ~30 seconds
|
||||
- CI testing after PR creation: ~8-10 minutes (runs automatically)
|
||||
- Unit tests: ~2 minutes
|
||||
- Integration tests: ~6 minutes (may fail with infrastructure issues - ignore if unit tests pass)
|
||||
- Other checks: ~1 minute
|
||||
|
||||
**Total hands-on time: ~3 minutes** (then wait for CI)
|
||||
|
||||
### Full Workflow with Local Tests
|
||||
- Total time: ~5-7 minutes
|
||||
- Test suite: ~2.5 minutes
|
||||
- npm install and database rebuild: ~2-3 minutes
|
||||
- The rest: seconds
|
||||
|
||||
**Note**: The fast workflow is recommended since CI runs the same tests anyway.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If tests fail:
|
||||
|
||||
336
MEMORY_TEMPLATE_UPDATE.md
Normal file
336
MEMORY_TEMPLATE_UPDATE.md
Normal file
@@ -0,0 +1,336 @@
|
||||
# Template Update Process - Quick Reference
|
||||
|
||||
## Overview
|
||||
|
||||
The n8n-mcp project maintains a database of workflow templates from n8n.io. This guide explains how to update the template database incrementally without rebuilding from scratch.
|
||||
|
||||
## Current Database State
|
||||
|
||||
As of the last update:
|
||||
- **2,598 templates** in database
|
||||
- Templates from the last 12 months
|
||||
- Latest template: September 12, 2025
|
||||
|
||||
## Quick Commands
|
||||
|
||||
### Incremental Update (Recommended)
|
||||
```bash
|
||||
# Build if needed
|
||||
npm run build
|
||||
|
||||
# Fetch only NEW templates (5-10 minutes)
|
||||
npm run fetch:templates:update
|
||||
```
|
||||
|
||||
### Full Rebuild (Rare)
|
||||
```bash
|
||||
# Rebuild entire database from scratch (30-40 minutes)
|
||||
npm run fetch:templates
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
### Incremental Update Mode (`--update`)
|
||||
|
||||
The incremental update is **smart and efficient**:
|
||||
|
||||
1. **Loads existing template IDs** from database (~2,598 templates)
|
||||
2. **Fetches template list** from n8n.io API (all templates from last 12 months)
|
||||
3. **Filters** to find only NEW templates not in database
|
||||
4. **Fetches details** for new templates only (saves time and API calls)
|
||||
5. **Saves** new templates to database (existing ones untouched)
|
||||
6. **Rebuilds FTS5** search index for new templates
|
||||
|
||||
### Key Benefits
|
||||
|
||||
✅ **Non-destructive**: All existing templates preserved
|
||||
✅ **Fast**: Only fetches new templates (5-10 min vs 30-40 min)
|
||||
✅ **API friendly**: Reduces load on n8n.io API
|
||||
✅ **Safe**: Preserves AI-generated metadata
|
||||
✅ **Smart**: Automatically skips duplicates
|
||||
|
||||
## Performance Comparison
|
||||
|
||||
| Mode | Templates Fetched | Time | Use Case |
|
||||
|------|------------------|------|----------|
|
||||
| **Update** | Only new (~50-200) | 5-10 min | Regular updates |
|
||||
| **Rebuild** | All (~8000+) | 30-40 min | Initial setup or corruption |
|
||||
|
||||
## Command Options
|
||||
|
||||
### Basic Update
|
||||
```bash
|
||||
npm run fetch:templates:update
|
||||
```
|
||||
|
||||
### Full Rebuild
|
||||
```bash
|
||||
npm run fetch:templates
|
||||
```
|
||||
|
||||
### With Metadata Generation
|
||||
```bash
|
||||
# Update templates and generate AI metadata
|
||||
npm run fetch:templates -- --update --generate-metadata
|
||||
|
||||
# Or just generate metadata for existing templates
|
||||
npm run fetch:templates -- --metadata-only
|
||||
```
|
||||
|
||||
### Help
|
||||
```bash
|
||||
npm run fetch:templates -- --help
|
||||
```
|
||||
|
||||
## Update Frequency
|
||||
|
||||
Recommended update schedule:
|
||||
- **Weekly**: Run incremental update to get latest templates
|
||||
- **Monthly**: Review database statistics
|
||||
- **As needed**: Rebuild only if database corruption suspected
|
||||
|
||||
## Template Filtering
|
||||
|
||||
The fetcher automatically filters templates:
|
||||
- ✅ **Includes**: Templates from last 12 months
|
||||
- ✅ **Includes**: Templates with >10 views
|
||||
- ❌ **Excludes**: Templates with ≤10 views (too niche)
|
||||
- ❌ **Excludes**: Templates older than 12 months
|
||||
|
||||
## Workflow
|
||||
|
||||
### Regular Update Workflow
|
||||
|
||||
```bash
|
||||
# 1. Check current state
|
||||
sqlite3 data/nodes.db "SELECT COUNT(*) FROM templates"
|
||||
|
||||
# 2. Build project (if code changed)
|
||||
npm run build
|
||||
|
||||
# 3. Run incremental update
|
||||
npm run fetch:templates:update
|
||||
|
||||
# 4. Verify new templates added
|
||||
sqlite3 data/nodes.db "SELECT COUNT(*) FROM templates"
|
||||
```
|
||||
|
||||
### After n8n Dependency Update
|
||||
|
||||
When you update n8n dependencies, templates remain compatible:
|
||||
```bash
|
||||
# 1. Update n8n (from MEMORY_N8N_UPDATE.md)
|
||||
npm run update:all
|
||||
|
||||
# 2. Fetch new templates incrementally
|
||||
npm run fetch:templates:update
|
||||
|
||||
# 3. Check how many templates were added
|
||||
sqlite3 data/nodes.db "SELECT COUNT(*) FROM templates"
|
||||
|
||||
# 4. Generate AI metadata for new templates (optional, requires OPENAI_API_KEY)
|
||||
npm run fetch:templates -- --metadata-only
|
||||
|
||||
# 5. IMPORTANT: Sanitize templates before pushing database
|
||||
npm run build
|
||||
npm run sanitize:templates
|
||||
```
|
||||
|
||||
Templates are independent of n8n version - they're just workflow JSON data.
|
||||
|
||||
**CRITICAL**: Always run `npm run sanitize:templates` before pushing the database to remove API tokens from template workflows.
|
||||
|
||||
**Note**: New templates fetched via `--update` mode will NOT have AI-generated metadata by default. You need to run `--metadata-only` separately to generate metadata for templates that don't have it yet.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### No New Templates Found
|
||||
|
||||
This is normal! It means:
|
||||
- All recent templates are already in your database
|
||||
- n8n.io hasn't published many new templates recently
|
||||
- Your database is up to date
|
||||
|
||||
```bash
|
||||
📊 Update mode: 0 new templates to fetch (skipping 2598 existing)
|
||||
✅ All templates already have metadata
|
||||
```
|
||||
|
||||
### API Rate Limiting
|
||||
|
||||
If you hit rate limits:
|
||||
- The fetcher includes built-in delays (150ms between requests)
|
||||
- Wait a few minutes and try again
|
||||
- Use `--update` mode instead of full rebuild
|
||||
|
||||
### Database Corruption
|
||||
|
||||
If you suspect corruption:
|
||||
```bash
|
||||
# Full rebuild from scratch
|
||||
npm run fetch:templates
|
||||
|
||||
# This will:
|
||||
# - Drop and recreate templates table
|
||||
# - Fetch all templates fresh
|
||||
# - Rebuild search indexes
|
||||
```
|
||||
|
||||
## Database Schema
|
||||
|
||||
Templates are stored with:
|
||||
- Basic info (id, name, description, author, views, created_at)
|
||||
- Node types used (JSON array)
|
||||
- Complete workflow (gzip compressed, base64 encoded)
|
||||
- AI-generated metadata (optional, requires OpenAI API key)
|
||||
- FTS5 search index for fast text search
|
||||
|
||||
## Metadata Generation
|
||||
|
||||
Generate AI metadata for templates:
|
||||
```bash
|
||||
# Requires OPENAI_API_KEY in .env
|
||||
export OPENAI_API_KEY="sk-..."
|
||||
|
||||
# Generate for templates without metadata (recommended after incremental update)
|
||||
npm run fetch:templates -- --metadata-only
|
||||
|
||||
# Generate during template fetch (slower, but automatic)
|
||||
npm run fetch:templates:update -- --generate-metadata
|
||||
```
|
||||
|
||||
**Important**: Incremental updates (`--update`) do NOT generate metadata by default. After running `npm run fetch:templates:update`, you'll have new templates without metadata. Run `--metadata-only` separately to generate metadata for them.
|
||||
|
||||
### Check Metadata Coverage
|
||||
|
||||
```bash
|
||||
# See how many templates have metadata
|
||||
sqlite3 data/nodes.db "SELECT
|
||||
COUNT(*) as total,
|
||||
SUM(CASE WHEN metadata_json IS NOT NULL THEN 1 ELSE 0 END) as with_metadata,
|
||||
SUM(CASE WHEN metadata_json IS NULL THEN 1 ELSE 0 END) as without_metadata
|
||||
FROM templates"
|
||||
|
||||
# See recent templates without metadata
|
||||
sqlite3 data/nodes.db "SELECT id, name, created_at
|
||||
FROM templates
|
||||
WHERE metadata_json IS NULL
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 10"
|
||||
```
|
||||
|
||||
Metadata includes:
|
||||
- Categories
|
||||
- Complexity level (simple/medium/complex)
|
||||
- Use cases
|
||||
- Estimated setup time
|
||||
- Required services
|
||||
- Key features
|
||||
- Target audience
|
||||
|
||||
### Metadata Generation Troubleshooting
|
||||
|
||||
If metadata generation fails:
|
||||
|
||||
1. **Check error file**: Errors are saved to `temp/batch/batch_*_error.jsonl`
|
||||
2. **Common issues**:
|
||||
- `"Unsupported value: 'temperature'"` - Model doesn't support custom temperature
|
||||
- `"Invalid request"` - Check OPENAI_API_KEY is valid
|
||||
- Model availability issues
|
||||
3. **Model**: Uses `gpt-5-mini-2025-08-07` by default
|
||||
4. **Token limit**: 3000 tokens per request for detailed metadata
|
||||
|
||||
The system will automatically:
|
||||
- Process error files and assign default metadata to failed templates
|
||||
- Save error details for debugging
|
||||
- Continue processing even if some templates fail
|
||||
|
||||
**Example error handling**:
|
||||
```bash
|
||||
# If you see: "No output file available for batch job"
|
||||
# Check: temp/batch/batch_*_error.jsonl for error details
|
||||
# The system now automatically processes errors and generates default metadata
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
Optional configuration:
|
||||
```bash
|
||||
# OpenAI for metadata generation
|
||||
OPENAI_API_KEY=sk-...
|
||||
OPENAI_MODEL=gpt-4o-mini # Default model
|
||||
OPENAI_BATCH_SIZE=50 # Batch size for metadata generation
|
||||
|
||||
# Metadata generation limits
|
||||
METADATA_LIMIT=100 # Max templates to process (0 = all)
|
||||
```
|
||||
|
||||
## Statistics
|
||||
|
||||
After update, check stats:
|
||||
```bash
|
||||
# Template count
|
||||
sqlite3 data/nodes.db "SELECT COUNT(*) FROM templates"
|
||||
|
||||
# Most recent template
|
||||
sqlite3 data/nodes.db "SELECT MAX(created_at) FROM templates"
|
||||
|
||||
# Templates by view count
|
||||
sqlite3 data/nodes.db "SELECT COUNT(*),
|
||||
CASE
|
||||
WHEN views < 50 THEN '<50'
|
||||
WHEN views < 100 THEN '50-100'
|
||||
WHEN views < 500 THEN '100-500'
|
||||
ELSE '500+'
|
||||
END as view_range
|
||||
FROM templates GROUP BY view_range"
|
||||
```
|
||||
|
||||
## Integration with n8n-mcp
|
||||
|
||||
Templates are available through MCP tools:
|
||||
- `list_templates`: List all templates
|
||||
- `get_template`: Get specific template with workflow
|
||||
- `search_templates`: Search by keyword
|
||||
- `list_node_templates`: Templates using specific nodes
|
||||
- `get_templates_for_task`: Templates for common tasks
|
||||
- `search_templates_by_metadata`: Advanced filtering
|
||||
|
||||
See `npm run test:templates` for usage examples.
|
||||
|
||||
## Time Estimates
|
||||
|
||||
Typical incremental update:
|
||||
- Loading existing IDs: 1-2 seconds
|
||||
- Fetching template list: 2-3 minutes
|
||||
- Filtering new templates: instant
|
||||
- Fetching details for 100 new templates: ~15 seconds (0.15s each)
|
||||
- Saving and indexing: 5-10 seconds
|
||||
- **Total: 3-5 minutes**
|
||||
|
||||
Full rebuild:
|
||||
- Fetching 8000+ templates: 25-30 minutes
|
||||
- Saving and indexing: 5-10 minutes
|
||||
- **Total: 30-40 minutes**
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use incremental updates** for regular maintenance
|
||||
2. **Rebuild only when necessary** (corruption, major changes)
|
||||
3. **Generate metadata incrementally** to avoid OpenAI costs
|
||||
4. **Monitor template count** to verify updates working
|
||||
5. **Keep database backed up** before major operations
|
||||
|
||||
## Next Steps
|
||||
|
||||
After updating templates:
|
||||
1. Test template search: `npm run test:templates`
|
||||
2. Verify MCP tools work: Test in Claude Desktop
|
||||
3. Check statistics in database
|
||||
4. Commit changes if desired (database changes)
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- `MEMORY_N8N_UPDATE.md` - Updating n8n dependencies
|
||||
- `CLAUDE.md` - Project overview and architecture
|
||||
- `README.md` - User documentation
|
||||
484
P0-R3-TEST-PLAN.md
Normal file
484
P0-R3-TEST-PLAN.md
Normal file
@@ -0,0 +1,484 @@
|
||||
# P0-R3 Feature Test Coverage Plan
|
||||
|
||||
## Executive Summary
|
||||
|
||||
This document outlines comprehensive test coverage for the P0-R3 feature (Template-based Configuration Examples). The feature adds real-world configuration examples from popular templates to node search and essentials tools.
|
||||
|
||||
**Feature Overview:**
|
||||
- New database table: `template_node_configs` (197 pre-extracted configurations)
|
||||
- Enhanced tools: `search_nodes({includeExamples: true})` and `get_node_essentials({includeExamples: true})`
|
||||
- Breaking changes: Removed `get_node_for_task` tool
|
||||
|
||||
## Test Files Created
|
||||
|
||||
### Unit Tests
|
||||
|
||||
#### 1. `/tests/unit/scripts/fetch-templates-extraction.test.ts` ✅
|
||||
**Purpose:** Test template extraction logic from `fetch-templates.ts`
|
||||
|
||||
**Coverage:**
|
||||
- `extractNodeConfigs()` - 90%+ coverage
|
||||
- Valid workflows with multiple nodes
|
||||
- Empty workflows
|
||||
- Malformed compressed data
|
||||
- Invalid JSON
|
||||
- Nodes without parameters
|
||||
- Sticky note filtering
|
||||
- Credential handling
|
||||
- Expression detection
|
||||
- Special characters
|
||||
- Large workflows (100 nodes)
|
||||
|
||||
- `detectExpressions()` - 100% coverage
|
||||
- `={{...}}` syntax detection
|
||||
- `$json` references
|
||||
- `$node` references
|
||||
- Nested objects
|
||||
- Arrays
|
||||
- Null/undefined handling
|
||||
- Multiple expression types
|
||||
|
||||
**Test Count:** 27 tests
|
||||
**Expected Coverage:** 92%+
|
||||
|
||||
---
|
||||
|
||||
#### 2. `/tests/unit/mcp/search-nodes-examples.test.ts` ✅
|
||||
**Purpose:** Test `search_nodes` tool with includeExamples parameter
|
||||
|
||||
**Coverage:**
|
||||
- includeExamples parameter behavior
|
||||
- false: no examples returned
|
||||
- undefined: no examples returned (default)
|
||||
- true: examples returned
|
||||
- Example data structure validation
|
||||
- Top 2 limit enforcement
|
||||
- Backward compatibility
|
||||
- Performance (<100ms)
|
||||
- Error handling (malformed JSON, database errors)
|
||||
- searchNodesLIKE integration
|
||||
- searchNodesFTS integration
|
||||
|
||||
**Test Count:** 12 tests
|
||||
**Expected Coverage:** 85%+
|
||||
|
||||
---
|
||||
|
||||
#### 3. `/tests/unit/mcp/get-node-essentials-examples.test.ts` ✅
|
||||
**Purpose:** Test `get_node_essentials` tool with includeExamples parameter
|
||||
|
||||
**Coverage:**
|
||||
- includeExamples parameter behavior
|
||||
- Full metadata structure
|
||||
- configuration object
|
||||
- source (template, views, complexity)
|
||||
- useCases (limited to 2)
|
||||
- metadata (hasCredentials, hasExpressions)
|
||||
- Cache key differentiation
|
||||
- Backward compatibility
|
||||
- Performance (<100ms)
|
||||
- Error handling
|
||||
- Top 3 limit enforcement
|
||||
|
||||
**Test Count:** 13 tests
|
||||
**Expected Coverage:** 88%+
|
||||
|
||||
---
|
||||
|
||||
### Integration Tests
|
||||
|
||||
#### 4. `/tests/integration/database/template-node-configs.test.ts` ✅
|
||||
**Purpose:** Test database schema, migrations, and operations
|
||||
|
||||
**Coverage:**
|
||||
- Schema validation
|
||||
- Table creation
|
||||
- All columns present
|
||||
- Correct types and constraints
|
||||
- CHECK constraint on complexity
|
||||
- Indexes
|
||||
- idx_config_node_type_rank
|
||||
- idx_config_complexity
|
||||
- idx_config_auth
|
||||
- View: ranked_node_configs
|
||||
- Top 5 per node_type
|
||||
- Correct ordering
|
||||
- Foreign key constraints
|
||||
- CASCADE delete
|
||||
- Referential integrity
|
||||
- Data operations
|
||||
- INSERT with all fields
|
||||
- Nullable fields
|
||||
- Rank updates
|
||||
- Delete rank > 10
|
||||
- Performance
|
||||
- 1000 records < 10ms queries
|
||||
- Migration idempotency
|
||||
|
||||
**Test Count:** 19 tests
|
||||
**Expected Coverage:** 95%+
|
||||
|
||||
---
|
||||
|
||||
#### 5. `/tests/integration/mcp/template-examples-e2e.test.ts` ✅
|
||||
**Purpose:** End-to-end integration testing
|
||||
|
||||
**Coverage:**
|
||||
- Direct SQL queries
|
||||
- Top 2 examples for search_nodes
|
||||
- Top 3 examples with metadata for get_node_essentials
|
||||
- Data structure validation
|
||||
- Valid JSON in all fields
|
||||
- Credentials when has_credentials=1
|
||||
- Ranked view functionality
|
||||
- Performance with 100+ configs
|
||||
- Query performance < 5ms
|
||||
- Complexity filtering
|
||||
- Edge cases
|
||||
- Non-existent node types
|
||||
- Long parameters_json (100 params)
|
||||
- Special characters (Unicode, emojis, symbols)
|
||||
- Data integrity
|
||||
- Foreign key constraints
|
||||
- Cascade deletes
|
||||
|
||||
**Test Count:** 14 tests
|
||||
**Expected Coverage:** 90%+
|
||||
|
||||
---
|
||||
|
||||
### Test Fixtures
|
||||
|
||||
#### 6. `/tests/fixtures/template-configs.ts` ✅
|
||||
**Purpose:** Reusable test data
|
||||
|
||||
**Provides:**
|
||||
- `sampleConfigs`: 7 realistic node configurations
|
||||
- simpleWebhook
|
||||
- webhookWithAuth
|
||||
- httpRequestBasic
|
||||
- httpRequestWithExpressions
|
||||
- slackMessage
|
||||
- codeNodeTransform
|
||||
- codeNodeWithExpressions
|
||||
|
||||
- `sampleWorkflows`: 3 complete workflows
|
||||
- webhookToSlack
|
||||
- apiWorkflow
|
||||
- complexWorkflow
|
||||
|
||||
- **Helper Functions:**
|
||||
- `compressWorkflow()` - Compress to base64
|
||||
- `createTemplateMetadata()` - Generate metadata
|
||||
- `createConfigBatch()` - Batch create configs
|
||||
- `getConfigByComplexity()` - Filter by complexity
|
||||
- `getConfigsWithExpressions()` - Filter with expressions
|
||||
- `getConfigsWithCredentials()` - Filter with credentials
|
||||
- `createInsertStatement()` - SQL insert helper
|
||||
|
||||
---
|
||||
|
||||
## Existing Tests Requiring Updates
|
||||
|
||||
### High Priority
|
||||
|
||||
#### 1. `tests/unit/mcp/parameter-validation.test.ts`
|
||||
**Line 480:** Remove `get_node_for_task` from legacyValidationTools array
|
||||
|
||||
```typescript
|
||||
// REMOVE THIS:
|
||||
{ name: 'get_node_for_task', args: {}, expected: 'Missing required parameters for get_node_for_task: task' },
|
||||
```
|
||||
|
||||
**Status:** ⚠️ BREAKING CHANGE - Tool removed
|
||||
|
||||
---
|
||||
|
||||
#### 2. `tests/unit/mcp/tools.test.ts`
|
||||
**Update:** Remove `get_node_for_task` from templates category
|
||||
|
||||
```typescript
|
||||
// BEFORE:
|
||||
templates: ['list_tasks', 'get_node_for_task', 'search_templates', ...]
|
||||
|
||||
// AFTER:
|
||||
templates: ['list_tasks', 'search_templates', ...]
|
||||
```
|
||||
|
||||
**Add:** Tests for new includeExamples parameter in tool definitions
|
||||
|
||||
```typescript
|
||||
it('should have includeExamples parameter in search_nodes', () => {
|
||||
const searchNodesTool = tools.find(t => t.name === 'search_nodes');
|
||||
expect(searchNodesTool.inputSchema.properties.includeExamples).toBeDefined();
|
||||
expect(searchNodesTool.inputSchema.properties.includeExamples.type).toBe('boolean');
|
||||
expect(searchNodesTool.inputSchema.properties.includeExamples.default).toBe(false);
|
||||
});
|
||||
|
||||
it('should have includeExamples parameter in get_node_essentials', () => {
|
||||
const essentialsTool = tools.find(t => t.name === 'get_node_essentials');
|
||||
expect(essentialsTool.inputSchema.properties.includeExamples).toBeDefined();
|
||||
});
|
||||
```
|
||||
|
||||
**Status:** ⚠️ REQUIRED UPDATE
|
||||
|
||||
---
|
||||
|
||||
#### 3. `tests/integration/mcp-protocol/session-management.test.ts`
|
||||
**Remove:** Test case calling `get_node_for_task` with invalid task
|
||||
|
||||
```typescript
|
||||
// REMOVE THIS TEST:
|
||||
client.callTool({ name: 'get_node_for_task', arguments: { task: 'invalid_task' } }).catch(e => e)
|
||||
```
|
||||
|
||||
**Status:** ⚠️ BREAKING CHANGE
|
||||
|
||||
---
|
||||
|
||||
#### 4. `tests/integration/mcp-protocol/tool-invocation.test.ts`
|
||||
**Remove:** Entire `get_node_for_task` describe block
|
||||
|
||||
**Add:** Tests for new includeExamples functionality
|
||||
|
||||
```typescript
|
||||
describe('search_nodes with includeExamples', () => {
|
||||
it('should return examples when includeExamples is true', async () => {
|
||||
const response = await client.callTool({
|
||||
name: 'search_nodes',
|
||||
arguments: { query: 'webhook', includeExamples: true }
|
||||
});
|
||||
|
||||
expect(response.results).toBeDefined();
|
||||
// Examples may or may not be present depending on database
|
||||
});
|
||||
|
||||
it('should not return examples when includeExamples is false', async () => {
|
||||
const response = await client.callTool({
|
||||
name: 'search_nodes',
|
||||
arguments: { query: 'webhook', includeExamples: false }
|
||||
});
|
||||
|
||||
expect(response.results).toBeDefined();
|
||||
response.results.forEach(node => {
|
||||
expect(node.examples).toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('get_node_essentials with includeExamples', () => {
|
||||
it('should return examples with metadata when includeExamples is true', async () => {
|
||||
const response = await client.callTool({
|
||||
name: 'get_node_essentials',
|
||||
arguments: { nodeType: 'nodes-base.webhook', includeExamples: true }
|
||||
});
|
||||
|
||||
expect(response.nodeType).toBeDefined();
|
||||
// Examples may or may not be present depending on database
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
**Status:** ⚠️ REQUIRED UPDATE
|
||||
|
||||
---
|
||||
|
||||
### Medium Priority
|
||||
|
||||
#### 5. `tests/unit/services/task-templates.test.ts`
|
||||
**Status:** ✅ No changes needed (TaskTemplates marked as deprecated but not removed)
|
||||
|
||||
**Note:** TaskTemplates remains for backward compatibility. Tests should continue to pass.
|
||||
|
||||
---
|
||||
|
||||
## Test Execution Plan
|
||||
|
||||
### Phase 1: Unit Tests
|
||||
```bash
|
||||
# Run new unit tests
|
||||
npm test tests/unit/scripts/fetch-templates-extraction.test.ts
|
||||
npm test tests/unit/mcp/search-nodes-examples.test.ts
|
||||
npm test tests/unit/mcp/get-node-essentials-examples.test.ts
|
||||
|
||||
# Expected: All pass, 52 tests
|
||||
```
|
||||
|
||||
### Phase 2: Integration Tests
|
||||
```bash
|
||||
# Run new integration tests
|
||||
npm test tests/integration/database/template-node-configs.test.ts
|
||||
npm test tests/integration/mcp/template-examples-e2e.test.ts
|
||||
|
||||
# Expected: All pass, 33 tests
|
||||
```
|
||||
|
||||
### Phase 3: Update Existing Tests
|
||||
```bash
|
||||
# Update files as outlined above, then run:
|
||||
npm test tests/unit/mcp/parameter-validation.test.ts
|
||||
npm test tests/unit/mcp/tools.test.ts
|
||||
npm test tests/integration/mcp-protocol/session-management.test.ts
|
||||
npm test tests/integration/mcp-protocol/tool-invocation.test.ts
|
||||
|
||||
# Expected: All pass after updates
|
||||
```
|
||||
|
||||
### Phase 4: Full Test Suite
|
||||
```bash
|
||||
# Run all tests
|
||||
npm test
|
||||
|
||||
# Run with coverage
|
||||
npm run test:coverage
|
||||
|
||||
# Expected coverage improvements:
|
||||
# - src/scripts/fetch-templates.ts: +20% (60% → 80%)
|
||||
# - src/mcp/server.ts: +5% (75% → 80%)
|
||||
# - Overall project: +2% (current → current+2%)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Coverage Expectations
|
||||
|
||||
### New Code Coverage
|
||||
|
||||
| File | Function | Target | Tests |
|
||||
|------|----------|--------|-------|
|
||||
| fetch-templates.ts | extractNodeConfigs | 95% | 15 tests |
|
||||
| fetch-templates.ts | detectExpressions | 100% | 12 tests |
|
||||
| server.ts | searchNodes (with examples) | 90% | 8 tests |
|
||||
| server.ts | getNodeEssentials (with examples) | 90% | 10 tests |
|
||||
| Database migration | template_node_configs | 100% | 19 tests |
|
||||
|
||||
### Overall Coverage Goals
|
||||
|
||||
- **Unit Tests:** 90%+ coverage for new code
|
||||
- **Integration Tests:** All happy paths + critical error paths
|
||||
- **E2E Tests:** Complete feature workflows
|
||||
- **Performance:** All queries <10ms (database), <100ms (MCP)
|
||||
|
||||
---
|
||||
|
||||
## Test Infrastructure
|
||||
|
||||
### Dependencies Required
|
||||
All dependencies already present in `package.json`:
|
||||
- vitest (test runner)
|
||||
- better-sqlite3 (database)
|
||||
- @vitest/coverage-v8 (coverage)
|
||||
|
||||
### Test Utilities Used
|
||||
- TestDatabase helper (from existing test utils)
|
||||
- createTestDatabaseAdapter (from existing test utils)
|
||||
- Standard vitest matchers
|
||||
|
||||
### No New Dependencies Required ✅
|
||||
|
||||
---
|
||||
|
||||
## Regression Prevention
|
||||
|
||||
### Critical Paths Protected
|
||||
|
||||
1. **Backward Compatibility**
|
||||
- Tools work without includeExamples parameter
|
||||
- Existing workflows unchanged
|
||||
- Cache keys differentiated
|
||||
|
||||
2. **Performance**
|
||||
- No degradation when includeExamples=false
|
||||
- Indexed queries <10ms
|
||||
- Example fetch errors don't break responses
|
||||
|
||||
3. **Data Integrity**
|
||||
- Foreign key constraints enforced
|
||||
- JSON validation in all fields
|
||||
- Rank calculations correct
|
||||
|
||||
---
|
||||
|
||||
## CI/CD Integration
|
||||
|
||||
### GitHub Actions Updates
|
||||
No changes required. Existing test commands will run new tests:
|
||||
|
||||
```yaml
|
||||
- run: npm test
|
||||
- run: npm run test:coverage
|
||||
```
|
||||
|
||||
### Coverage Thresholds
|
||||
Current thresholds maintained. Expected improvements:
|
||||
- Lines: +2%
|
||||
- Functions: +3%
|
||||
- Branches: +2%
|
||||
|
||||
---
|
||||
|
||||
## Manual Testing Checklist
|
||||
|
||||
### Pre-Deployment Verification
|
||||
|
||||
- [ ] Run `npm run rebuild` - Verify migration applies cleanly
|
||||
- [ ] Run `npm run fetch:templates --extract-only` - Verify extraction works
|
||||
- [ ] Check database: `SELECT COUNT(*) FROM template_node_configs` - Should be ~197
|
||||
- [ ] Test MCP tool: `search_nodes({query: "webhook", includeExamples: true})`
|
||||
- [ ] Test MCP tool: `get_node_essentials({nodeType: "nodes-base.webhook", includeExamples: true})`
|
||||
- [ ] Verify backward compatibility: Tools work without includeExamples parameter
|
||||
- [ ] Performance test: Query 100 nodes with examples < 200ms
|
||||
|
||||
---
|
||||
|
||||
## Rollback Plan
|
||||
|
||||
If issues are detected:
|
||||
|
||||
1. **Database Rollback:**
|
||||
```sql
|
||||
DROP TABLE IF EXISTS template_node_configs;
|
||||
DROP VIEW IF EXISTS ranked_node_configs;
|
||||
```
|
||||
|
||||
2. **Code Rollback:**
|
||||
- Revert server.ts changes
|
||||
- Revert tools.ts changes
|
||||
- Restore get_node_for_task tool (if critical)
|
||||
|
||||
3. **Test Rollback:**
|
||||
- Revert parameter-validation.test.ts
|
||||
- Revert tools.test.ts
|
||||
- Revert tool-invocation.test.ts
|
||||
|
||||
---
|
||||
|
||||
## Success Metrics
|
||||
|
||||
### Test Metrics
|
||||
- ✅ 85+ new tests added
|
||||
- ✅ 0 tests failing after updates
|
||||
- ✅ Coverage increase 2%+
|
||||
- ✅ All performance tests pass
|
||||
|
||||
### Feature Metrics
|
||||
- ✅ 197 template configs extracted
|
||||
- ✅ Top 2/3 examples returned correctly
|
||||
- ✅ Query performance <10ms
|
||||
- ✅ No backward compatibility breaks
|
||||
|
||||
---
|
||||
|
||||
## Conclusion
|
||||
|
||||
This test plan provides **comprehensive coverage** for the P0-R3 feature with:
|
||||
- **85+ new tests** across unit, integration, and E2E levels
|
||||
- **Complete coverage** of extraction, storage, and retrieval
|
||||
- **Backward compatibility** protection
|
||||
- **Performance validation** (<10ms queries)
|
||||
- **Clear migration path** for existing tests
|
||||
|
||||
**All test files are ready for execution.** Update the 4 existing test files as outlined, then run the full test suite.
|
||||
|
||||
**Estimated Total Implementation Time:** 2-3 hours for updating existing tests + validation
|
||||
73
PRIVACY.md
Normal file
73
PRIVACY.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# Privacy Policy for n8n-mcp Telemetry
|
||||
|
||||
## Overview
|
||||
n8n-mcp collects anonymous usage statistics to help improve the tool. This data collection is designed to respect user privacy while providing valuable insights into how the tool is used.
|
||||
|
||||
## What We Collect
|
||||
- **Anonymous User ID**: A hashed identifier derived from your machine characteristics (no personal information)
|
||||
- **Tool Usage**: Which MCP tools are used and their performance metrics
|
||||
- **Workflow Patterns**: Sanitized workflow structures (all sensitive data removed)
|
||||
- **Error Types**: Categories of errors encountered (no error messages with user data)
|
||||
- **System Information**: Platform, architecture, Node.js version, and n8n-mcp version
|
||||
|
||||
## What We DON'T Collect
|
||||
- Personal information or usernames
|
||||
- API keys, tokens, or credentials
|
||||
- URLs, endpoints, or hostnames
|
||||
- Email addresses or contact information
|
||||
- File paths or directory structures
|
||||
- Actual workflow data or parameters
|
||||
- Database connection strings
|
||||
- Any authentication information
|
||||
|
||||
## Data Sanitization
|
||||
All collected data undergoes automatic sanitization:
|
||||
- URLs are replaced with `[URL]` or `[REDACTED]`
|
||||
- Long alphanumeric strings (potential keys) are replaced with `[KEY]`
|
||||
- Email addresses are replaced with `[EMAIL]`
|
||||
- Authentication-related fields are completely removed
|
||||
|
||||
## Data Storage
|
||||
- Data is stored securely using Supabase
|
||||
- Anonymous users have write-only access (cannot read data back)
|
||||
- Row Level Security (RLS) policies prevent data access by anonymous users
|
||||
|
||||
## Opt-Out
|
||||
You can disable telemetry at any time:
|
||||
```bash
|
||||
npx n8n-mcp telemetry disable
|
||||
```
|
||||
|
||||
To re-enable:
|
||||
```bash
|
||||
npx n8n-mcp telemetry enable
|
||||
```
|
||||
|
||||
To check status:
|
||||
```bash
|
||||
npx n8n-mcp telemetry status
|
||||
```
|
||||
|
||||
## Data Usage
|
||||
Collected data is used solely to:
|
||||
- Understand which features are most used
|
||||
- Identify common error patterns
|
||||
- Improve tool performance and reliability
|
||||
- Guide development priorities
|
||||
- Train machine learning models for workflow generation
|
||||
|
||||
All ML training uses sanitized, anonymized data only.
|
||||
Users can opt-out at any time with `npx n8n-mcp telemetry disable`
|
||||
|
||||
## Data Retention
|
||||
- Data is retained for analysis purposes
|
||||
- No personal identification is possible from the collected data
|
||||
|
||||
## Changes to This Policy
|
||||
We may update this privacy policy from time to time. Updates will be reflected in this document.
|
||||
|
||||
## Contact
|
||||
For questions about telemetry or privacy, please open an issue on GitHub:
|
||||
https://github.com/czlonkowski/n8n-mcp/issues
|
||||
|
||||
Last updated: 2025-11-06
|
||||
318
README_ANALYSIS.md
Normal file
318
README_ANALYSIS.md
Normal file
@@ -0,0 +1,318 @@
|
||||
# N8N-MCP Validation Analysis: Complete Report
|
||||
|
||||
**Date**: November 8, 2025
|
||||
**Dataset**: 29,218 validation events | 9,021 unique users | 90 days
|
||||
**Status**: Complete and ready for action
|
||||
|
||||
---
|
||||
|
||||
## Analysis Documents
|
||||
|
||||
### 1. ANALYSIS_QUICK_REFERENCE.md (5.8KB)
|
||||
**Best for**: Quick decisions, meetings, slide presentations
|
||||
|
||||
START HERE if you want the key points in 5 minutes.
|
||||
|
||||
**Contains**:
|
||||
- One-paragraph core finding
|
||||
- Top 3 problem areas with root causes
|
||||
- 5 most common errors
|
||||
- Implementation plan summary
|
||||
- Key metrics & targets
|
||||
- FAQ section
|
||||
|
||||
---
|
||||
|
||||
### 2. VALIDATION_ANALYSIS_SUMMARY.md (13KB)
|
||||
**Best for**: Executive stakeholders, team leads, decision makers
|
||||
|
||||
Read this for comprehensive but concise overview.
|
||||
|
||||
**Contains**:
|
||||
- One-page executive summary
|
||||
- Health scorecard with key metrics
|
||||
- Detailed problem area breakdown
|
||||
- Error category distribution
|
||||
- Agent behavior insights
|
||||
- Tool usage patterns
|
||||
- Documentation impact findings
|
||||
- Top 5 recommendations with ROI estimates
|
||||
- 50-65% improvement projection
|
||||
|
||||
---
|
||||
|
||||
### 3. VALIDATION_ANALYSIS_REPORT.md (27KB)
|
||||
**Best for**: Technical deep-dive, implementation planning, root cause analysis
|
||||
|
||||
Complete reference document with all findings.
|
||||
|
||||
**Contains**:
|
||||
- All 16 SQL queries (reproducible)
|
||||
- Node-specific difficulty ranking (top 20)
|
||||
- Top 25 unique validation error messages
|
||||
- Error categorization with root causes
|
||||
- Tool usage patterns before failures
|
||||
- Search query analysis
|
||||
- Documentation effectiveness study
|
||||
- Retry success rate analysis
|
||||
- Property-level difficulty matrix
|
||||
- 8 detailed recommendations with implementation guides
|
||||
- Phase-by-phase action items
|
||||
- KPI tracking setup
|
||||
- Complete appendix with error message reference
|
||||
|
||||
---
|
||||
|
||||
### 4. IMPLEMENTATION_ROADMAP.md (4.3KB)
|
||||
**Best for**: Project managers, development team, sprint planning
|
||||
|
||||
Actionable roadmap for the next 6 weeks.
|
||||
|
||||
**Contains**:
|
||||
- Phase 1-3 breakdown (2 weeks each)
|
||||
- Specific file locations to modify
|
||||
- Effort estimates per task
|
||||
- Success criteria for each phase
|
||||
- Expected impact projections
|
||||
- Code examples (before/after)
|
||||
- Key changes documentation
|
||||
|
||||
---
|
||||
|
||||
## Reading Paths
|
||||
|
||||
### Path A: Decision Maker (30 minutes)
|
||||
1. Read: ANALYSIS_QUICK_REFERENCE.md
|
||||
2. Review: Key metrics in VALIDATION_ANALYSIS_SUMMARY.md
|
||||
3. Decision: Approve IMPLEMENTATION_ROADMAP.md
|
||||
|
||||
### Path B: Product Manager (1 hour)
|
||||
1. Read: VALIDATION_ANALYSIS_SUMMARY.md
|
||||
2. Skim: Top recommendations in VALIDATION_ANALYSIS_REPORT.md
|
||||
3. Review: IMPLEMENTATION_ROADMAP.md
|
||||
4. Check: Success metrics and timelines
|
||||
|
||||
### Path C: Technical Lead (2-3 hours)
|
||||
1. Read: ANALYSIS_QUICK_REFERENCE.md
|
||||
2. Deep-dive: VALIDATION_ANALYSIS_REPORT.md
|
||||
3. Study: IMPLEMENTATION_ROADMAP.md
|
||||
4. Review: Code examples and SQL queries
|
||||
5. Plan: Ticket creation and sprint allocation
|
||||
|
||||
### Path D: Developer (3-4 hours)
|
||||
1. Skim: ANALYSIS_QUICK_REFERENCE.md for context
|
||||
2. Read: VALIDATION_ANALYSIS_REPORT.md sections 3-8
|
||||
3. Study: IMPLEMENTATION_ROADMAP.md thoroughly
|
||||
4. Review: All code locations and examples
|
||||
5. Plan: First task implementation
|
||||
|
||||
---
|
||||
|
||||
## Key Findings Overview
|
||||
|
||||
### The Core Insight
|
||||
Validation failures are NOT broken—they're evidence the system works perfectly. 29,218 validation events prevented bad deployments. The challenge is GUIDANCE GAPS that cause first-attempt failures.
|
||||
|
||||
### Success Evidence
|
||||
- 100% same-day error recovery rate
|
||||
- 100% retry success rate
|
||||
- All agents fix errors when given feedback
|
||||
- Zero "unfixable" errors
|
||||
|
||||
### Problem Areas (75% of errors)
|
||||
1. **Workflow structure** (26%) - JSON malformation
|
||||
2. **Connections** (14%) - Unintuitive syntax
|
||||
3. **Required fields** (8%) - Not marked upfront
|
||||
|
||||
### Most Problematic Nodes
|
||||
- Webhook/Trigger (127 failures)
|
||||
- Slack (73 failures)
|
||||
- AI Agent (36 failures)
|
||||
- HTTP Request (31 failures)
|
||||
- OpenAI (35 failures)
|
||||
|
||||
### Solution Strategy
|
||||
- Phase 1: Better error messages + required field markers (25-30% reduction)
|
||||
- Phase 2: Documentation + validation improvements (additional 15-20%)
|
||||
- Phase 3: Advanced features + monitoring (additional 10-15%)
|
||||
- **Target**: 50-65% total failure reduction in 6 weeks
|
||||
|
||||
---
|
||||
|
||||
## Critical Numbers
|
||||
|
||||
```
|
||||
Validation Events ............. 29,218
|
||||
Unique Users .................. 9,021
|
||||
Data Quality .................. 100% (all marked as errors)
|
||||
|
||||
Current Metrics:
|
||||
Error Rate (doc users) ....... 12.6%
|
||||
Error Rate (non-doc users) ... 10.8%
|
||||
First-attempt success ........ ~77%
|
||||
Retry success ................ 100%
|
||||
Same-day recovery ............ 100%
|
||||
|
||||
Target Metrics (after 6 weeks):
|
||||
Error Rate ................... 6-7% (-50%)
|
||||
First-attempt success ........ 85%+
|
||||
Retry success ................ 100%
|
||||
Implementation effort ........ 60-80 hours
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Timeline
|
||||
|
||||
```
|
||||
Week 1-2: Phase 1 (Error messages, field markers, webhook guide)
|
||||
Expected: 25-30% failure reduction
|
||||
|
||||
Week 3-4: Phase 2 (Enum suggestions, connection guide, AI validation)
|
||||
Expected: Additional 15-20% reduction
|
||||
|
||||
Week 5-6: Phase 3 (Search improvements, fuzzy matching, KPI setup)
|
||||
Expected: Additional 10-15% reduction
|
||||
|
||||
Target: 50-65% total reduction by Week 6
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## How to Use These Documents
|
||||
|
||||
### For Review & Approval
|
||||
1. Start with ANALYSIS_QUICK_REFERENCE.md
|
||||
2. Check key metrics in VALIDATION_ANALYSIS_SUMMARY.md
|
||||
3. Review IMPLEMENTATION_ROADMAP.md for feasibility
|
||||
4. Decision: Approve phase 1-3
|
||||
|
||||
### For Team Planning
|
||||
1. Read IMPLEMENTATION_ROADMAP.md
|
||||
2. Create GitHub issues from each task
|
||||
3. Assign based on effort estimates
|
||||
4. Schedule sprints for phase 1-3
|
||||
|
||||
### For Development
|
||||
1. Review specific recommendations in VALIDATION_ANALYSIS_REPORT.md
|
||||
2. Find code locations in IMPLEMENTATION_ROADMAP.md
|
||||
3. Study code examples (before/after)
|
||||
4. Implement and test
|
||||
|
||||
### For Measurement
|
||||
1. Record baseline metrics (current state)
|
||||
2. Deploy Phase 1 and measure impact
|
||||
3. Use KPI queries from VALIDATION_ANALYSIS_REPORT.md
|
||||
4. Adjust strategy based on actual results
|
||||
|
||||
---
|
||||
|
||||
## Key Recommendations (Priority Order)
|
||||
|
||||
### IMMEDIATE (Week 1-2)
|
||||
1. **Enhance error messages** - Add location + examples
|
||||
2. **Mark required fields** - Add "⚠️ REQUIRED" to tools
|
||||
3. **Create webhook guide** - Document configuration rules
|
||||
|
||||
### HIGH (Week 3-4)
|
||||
4. **Add enum suggestions** - Show valid values in errors
|
||||
5. **Create connections guide** - Document syntax + examples
|
||||
6. **Add AI Agent validation** - Detect missing LLM connections
|
||||
|
||||
### MEDIUM (Week 5-6)
|
||||
7. **Improve search results** - Add configuration hints
|
||||
8. **Build fuzzy matcher** - Suggest similar node types
|
||||
9. **Setup KPI tracking** - Monitor improvement
|
||||
|
||||
---
|
||||
|
||||
## Questions & Answers
|
||||
|
||||
**Q: Why so many validation failures?**
|
||||
A: High usage (9,021 users, complex workflows). System is working—preventing bad deployments.
|
||||
|
||||
**Q: Shouldn't we just allow invalid configurations?**
|
||||
A: No, validation prevents 29,218 broken workflows from deploying. We improve guidance instead.
|
||||
|
||||
**Q: Do agents actually learn from errors?**
|
||||
A: Yes, 100% same-day recovery rate proves feedback works perfectly.
|
||||
|
||||
**Q: Can we really reduce failures by 50-65%?**
|
||||
A: Yes, analysis shows these specific improvements target the actual root causes.
|
||||
|
||||
**Q: How long will this take?**
|
||||
A: 60-80 developer-hours across 6 weeks. Can start immediately.
|
||||
|
||||
**Q: What's the biggest win?**
|
||||
A: Marking required fields (378 errors) + better structure messages (1,268 errors).
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **This Week**: Review all documents and get approval
|
||||
2. **Week 1**: Create GitHub issues from IMPLEMENTATION_ROADMAP.md
|
||||
3. **Week 2**: Assign to team, start Phase 1
|
||||
4. **Week 4**: Deploy Phase 1, start Phase 2
|
||||
5. **Week 6**: Deploy Phase 2, start Phase 3
|
||||
6. **Week 8**: Deploy Phase 3, begin monitoring
|
||||
7. **Week 9+**: Review metrics, iterate
|
||||
|
||||
---
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
/Users/romualdczlonkowski/Pliki/n8n-mcp/n8n-mcp/
|
||||
├── ANALYSIS_QUICK_REFERENCE.md ............ Quick lookup (5.8KB)
|
||||
├── VALIDATION_ANALYSIS_SUMMARY.md ........ Executive summary (13KB)
|
||||
├── VALIDATION_ANALYSIS_REPORT.md ......... Complete analysis (27KB)
|
||||
├── IMPLEMENTATION_ROADMAP.md ............. Action plan (4.3KB)
|
||||
└── README_ANALYSIS.md ................... This file
|
||||
```
|
||||
|
||||
**Total Documentation**: 50KB of analysis, recommendations, and implementation guidance
|
||||
|
||||
---
|
||||
|
||||
## Contact & Support
|
||||
|
||||
For specific questions:
|
||||
- **Why?** → See VALIDATION_ANALYSIS_REPORT.md Section 2-8
|
||||
- **How?** → See IMPLEMENTATION_ROADMAP.md for code locations
|
||||
- **When?** → See IMPLEMENTATION_ROADMAP.md for timeline
|
||||
- **Metrics?** → See VALIDATION_ANALYSIS_SUMMARY.md key metrics section
|
||||
|
||||
---
|
||||
|
||||
## Metadata
|
||||
|
||||
| Item | Value |
|
||||
|------|-------|
|
||||
| Analysis Date | November 8, 2025 |
|
||||
| Data Period | Sept 26 - Nov 8, 2025 (90 days) |
|
||||
| Sample Size | 29,218 validation events |
|
||||
| Users Analyzed | 9,021 unique users |
|
||||
| SQL Queries | 16 comprehensive queries |
|
||||
| Confidence Level | HIGH |
|
||||
| Status | Complete & Ready for Implementation |
|
||||
|
||||
---
|
||||
|
||||
## Analysis Methodology
|
||||
|
||||
1. **Data Collection**: Extracted all validation_details events from PostgreSQL
|
||||
2. **Categorization**: Grouped errors by type, node, and message pattern
|
||||
3. **Pattern Analysis**: Identified root causes for each error category
|
||||
4. **User Behavior**: Tracked tool usage before/after failures
|
||||
5. **Recovery Analysis**: Measured success rates and correction time
|
||||
6. **Recommendation Development**: Mapped solutions to specific problems
|
||||
7. **Impact Projection**: Estimated improvement from each solution
|
||||
8. **Roadmap Creation**: Phased implementation plan with effort estimates
|
||||
|
||||
**Data Quality**: 100% of validation events properly categorized, no data loss or corruption
|
||||
|
||||
---
|
||||
|
||||
**Analysis Complete** | **Ready for Review** | **Awaiting Approval to Proceed**
|
||||
|
||||
BIN
data/nodes.db
BIN
data/nodes.db
Binary file not shown.
0
data/templates.db
Normal file
0
data/templates.db
Normal file
15
dist/config/n8n-api.d.ts
vendored
Normal file
15
dist/config/n8n-api.d.ts
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
export declare function getN8nApiConfig(): {
|
||||
baseUrl: string;
|
||||
apiKey: string;
|
||||
timeout: number;
|
||||
maxRetries: number;
|
||||
} | null;
|
||||
export declare function isN8nApiConfigured(): boolean;
|
||||
export declare function getN8nApiConfigFromContext(context: {
|
||||
n8nApiUrl?: string;
|
||||
n8nApiKey?: string;
|
||||
n8nApiTimeout?: number;
|
||||
n8nApiMaxRetries?: number;
|
||||
}): N8nApiConfig | null;
|
||||
export type N8nApiConfig = NonNullable<ReturnType<typeof getN8nApiConfig>>;
|
||||
//# sourceMappingURL=n8n-api.d.ts.map
|
||||
1
dist/config/n8n-api.d.ts.map
vendored
Normal file
1
dist/config/n8n-api.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"n8n-api.d.ts","sourceRoot":"","sources":["../../src/config/n8n-api.ts"],"names":[],"mappings":"AAgBA,wBAAgB,eAAe;;;;;SA0B9B;AAGD,wBAAgB,kBAAkB,IAAI,OAAO,CAG5C;AAMD,wBAAgB,0BAA0B,CAAC,OAAO,EAAE;IAClD,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,gBAAgB,CAAC,EAAE,MAAM,CAAC;CAC3B,GAAG,YAAY,GAAG,IAAI,CAWtB;AAGD,MAAM,MAAM,YAAY,GAAG,WAAW,CAAC,UAAU,CAAC,OAAO,eAAe,CAAC,CAAC,CAAC"}
|
||||
53
dist/config/n8n-api.js
vendored
Normal file
53
dist/config/n8n-api.js
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
"use strict";
|
||||
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getN8nApiConfig = getN8nApiConfig;
|
||||
exports.isN8nApiConfigured = isN8nApiConfigured;
|
||||
exports.getN8nApiConfigFromContext = getN8nApiConfigFromContext;
|
||||
const zod_1 = require("zod");
|
||||
const dotenv_1 = __importDefault(require("dotenv"));
|
||||
const n8nApiConfigSchema = zod_1.z.object({
|
||||
N8N_API_URL: zod_1.z.string().url().optional(),
|
||||
N8N_API_KEY: zod_1.z.string().min(1).optional(),
|
||||
N8N_API_TIMEOUT: zod_1.z.coerce.number().positive().default(30000),
|
||||
N8N_API_MAX_RETRIES: zod_1.z.coerce.number().positive().default(3),
|
||||
});
|
||||
let envLoaded = false;
|
||||
function getN8nApiConfig() {
|
||||
if (!envLoaded) {
|
||||
dotenv_1.default.config();
|
||||
envLoaded = true;
|
||||
}
|
||||
const result = n8nApiConfigSchema.safeParse(process.env);
|
||||
if (!result.success) {
|
||||
return null;
|
||||
}
|
||||
const config = result.data;
|
||||
if (!config.N8N_API_URL || !config.N8N_API_KEY) {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
baseUrl: config.N8N_API_URL,
|
||||
apiKey: config.N8N_API_KEY,
|
||||
timeout: config.N8N_API_TIMEOUT,
|
||||
maxRetries: config.N8N_API_MAX_RETRIES,
|
||||
};
|
||||
}
|
||||
function isN8nApiConfigured() {
|
||||
const config = getN8nApiConfig();
|
||||
return config !== null;
|
||||
}
|
||||
function getN8nApiConfigFromContext(context) {
|
||||
if (!context.n8nApiUrl || !context.n8nApiKey) {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
baseUrl: context.n8nApiUrl,
|
||||
apiKey: context.n8nApiKey,
|
||||
timeout: context.n8nApiTimeout ?? 30000,
|
||||
maxRetries: context.n8nApiMaxRetries ?? 3,
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=n8n-api.js.map
|
||||
1
dist/config/n8n-api.js.map
vendored
Normal file
1
dist/config/n8n-api.js.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"n8n-api.js","sourceRoot":"","sources":["../../src/config/n8n-api.ts"],"names":[],"mappings":";;;;;AAgBA,0CA0BC;AAGD,gDAGC;AAMD,gEAgBC;AAtED,6BAAwB;AACxB,oDAA4B;AAI5B,MAAM,kBAAkB,GAAG,OAAC,CAAC,MAAM,CAAC;IAClC,WAAW,EAAE,OAAC,CAAC,MAAM,EAAE,CAAC,GAAG,EAAE,CAAC,QAAQ,EAAE;IACxC,WAAW,EAAE,OAAC,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,QAAQ,EAAE;IACzC,eAAe,EAAE,OAAC,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,QAAQ,EAAE,CAAC,OAAO,CAAC,KAAK,CAAC;IAC5D,mBAAmB,EAAE,OAAC,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC,QAAQ,EAAE,CAAC,OAAO,CAAC,CAAC,CAAC;CAC7D,CAAC,CAAC;AAGH,IAAI,SAAS,GAAG,KAAK,CAAC;AAGtB,SAAgB,eAAe;IAE7B,IAAI,CAAC,SAAS,EAAE,CAAC;QACf,gBAAM,CAAC,MAAM,EAAE,CAAC;QAChB,SAAS,GAAG,IAAI,CAAC;IACnB,CAAC;IAED,MAAM,MAAM,GAAG,kBAAkB,CAAC,SAAS,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC;IAEzD,IAAI,CAAC,MAAM,CAAC,OAAO,EAAE,CAAC;QACpB,OAAO,IAAI,CAAC;IACd,CAAC;IAED,MAAM,MAAM,GAAG,MAAM,CAAC,IAAI,CAAC;IAG3B,IAAI,CAAC,MAAM,CAAC,WAAW,IAAI,CAAC,MAAM,CAAC,WAAW,EAAE,CAAC;QAC/C,OAAO,IAAI,CAAC;IACd,CAAC;IAED,OAAO;QACL,OAAO,EAAE,MAAM,CAAC,WAAW;QAC3B,MAAM,EAAE,MAAM,CAAC,WAAW;QAC1B,OAAO,EAAE,MAAM,CAAC,eAAe;QAC/B,UAAU,EAAE,MAAM,CAAC,mBAAmB;KACvC,CAAC;AACJ,CAAC;AAGD,SAAgB,kBAAkB;IAChC,MAAM,MAAM,GAAG,eAAe,EAAE,CAAC;IACjC,OAAO,MAAM,KAAK,IAAI,CAAC;AACzB,CAAC;AAMD,SAAgB,0BAA0B,CAAC,OAK1C;IACC,IAAI,CAAC,OAAO,CAAC,SAAS,IAAI,CAAC,OAAO,CAAC,SAAS,EAAE,CAAC;QAC7C,OAAO,IAAI,CAAC;IACd,CAAC;IAED,OAAO;QACL,OAAO,EAAE,OAAO,CAAC,SAAS;QAC1B,MAAM,EAAE,OAAO,CAAC,SAAS;QACzB,OAAO,EAAE,OAAO,CAAC,aAAa,IAAI,KAAK;QACvC,UAAU,EAAE,OAAO,CAAC,gBAAgB,IAAI,CAAC;KAC1C,CAAC;AACJ,CAAC"}
|
||||
123
dist/constants/type-structures.d.ts
vendored
Normal file
123
dist/constants/type-structures.d.ts
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
import type { NodePropertyTypes } from 'n8n-workflow';
|
||||
import type { TypeStructure } from '../types/type-structures';
|
||||
export declare const TYPE_STRUCTURES: Record<NodePropertyTypes, TypeStructure>;
|
||||
export declare const COMPLEX_TYPE_EXAMPLES: {
|
||||
collection: {
|
||||
basic: {
|
||||
name: string;
|
||||
email: string;
|
||||
};
|
||||
nested: {
|
||||
user: {
|
||||
firstName: string;
|
||||
lastName: string;
|
||||
};
|
||||
preferences: {
|
||||
theme: string;
|
||||
notifications: boolean;
|
||||
};
|
||||
};
|
||||
withExpressions: {
|
||||
id: string;
|
||||
timestamp: string;
|
||||
data: string;
|
||||
};
|
||||
};
|
||||
fixedCollection: {
|
||||
httpHeaders: {
|
||||
headers: {
|
||||
name: string;
|
||||
value: string;
|
||||
}[];
|
||||
};
|
||||
queryParameters: {
|
||||
queryParameters: {
|
||||
name: string;
|
||||
value: string;
|
||||
}[];
|
||||
};
|
||||
multipleCollections: {
|
||||
headers: {
|
||||
name: string;
|
||||
value: string;
|
||||
}[];
|
||||
queryParameters: {
|
||||
name: string;
|
||||
value: string;
|
||||
}[];
|
||||
};
|
||||
};
|
||||
filter: {
|
||||
simple: {
|
||||
conditions: {
|
||||
id: string;
|
||||
leftValue: string;
|
||||
operator: {
|
||||
type: string;
|
||||
operation: string;
|
||||
};
|
||||
rightValue: string;
|
||||
}[];
|
||||
combinator: string;
|
||||
};
|
||||
complex: {
|
||||
conditions: ({
|
||||
id: string;
|
||||
leftValue: string;
|
||||
operator: {
|
||||
type: string;
|
||||
operation: string;
|
||||
};
|
||||
rightValue: number;
|
||||
} | {
|
||||
id: string;
|
||||
leftValue: string;
|
||||
operator: {
|
||||
type: string;
|
||||
operation: string;
|
||||
};
|
||||
rightValue: string;
|
||||
})[];
|
||||
combinator: string;
|
||||
};
|
||||
};
|
||||
resourceMapper: {
|
||||
autoMap: {
|
||||
mappingMode: string;
|
||||
value: {};
|
||||
};
|
||||
manual: {
|
||||
mappingMode: string;
|
||||
value: {
|
||||
firstName: string;
|
||||
lastName: string;
|
||||
email: string;
|
||||
status: string;
|
||||
};
|
||||
};
|
||||
};
|
||||
assignmentCollection: {
|
||||
basic: {
|
||||
assignments: {
|
||||
id: string;
|
||||
name: string;
|
||||
value: string;
|
||||
type: string;
|
||||
}[];
|
||||
};
|
||||
multiple: {
|
||||
assignments: ({
|
||||
id: string;
|
||||
name: string;
|
||||
value: string;
|
||||
type: string;
|
||||
} | {
|
||||
id: string;
|
||||
name: string;
|
||||
value: boolean;
|
||||
type: string;
|
||||
})[];
|
||||
};
|
||||
};
|
||||
};
|
||||
//# sourceMappingURL=type-structures.d.ts.map
|
||||
1
dist/constants/type-structures.d.ts.map
vendored
Normal file
1
dist/constants/type-structures.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"type-structures.d.ts","sourceRoot":"","sources":["../../src/constants/type-structures.ts"],"names":[],"mappings":"AAaA,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,cAAc,CAAC;AACtD,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,0BAA0B,CAAC;AAe9D,eAAO,MAAM,eAAe,EAAE,MAAM,CAAC,iBAAiB,EAAE,aAAa,CAilBpE,CAAC;AAUF,eAAO,MAAM,qBAAqB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA4GjC,CAAC"}
|
||||
654
dist/constants/type-structures.js
vendored
Normal file
654
dist/constants/type-structures.js
vendored
Normal file
@@ -0,0 +1,654 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.COMPLEX_TYPE_EXAMPLES = exports.TYPE_STRUCTURES = void 0;
|
||||
exports.TYPE_STRUCTURES = {
|
||||
string: {
|
||||
type: 'primitive',
|
||||
jsType: 'string',
|
||||
description: 'A text value that can contain any characters',
|
||||
example: 'Hello World',
|
||||
examples: ['', 'A simple text', '{{ $json.name }}', 'https://example.com'],
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: ['Most common property type', 'Supports n8n expressions'],
|
||||
},
|
||||
number: {
|
||||
type: 'primitive',
|
||||
jsType: 'number',
|
||||
description: 'A numeric value (integer or decimal)',
|
||||
example: 42,
|
||||
examples: [0, -10, 3.14, 100],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: ['Can be constrained with min/max in typeOptions'],
|
||||
},
|
||||
boolean: {
|
||||
type: 'primitive',
|
||||
jsType: 'boolean',
|
||||
description: 'A true/false toggle value',
|
||||
example: true,
|
||||
examples: [true, false],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: ['Rendered as checkbox in n8n UI'],
|
||||
},
|
||||
dateTime: {
|
||||
type: 'primitive',
|
||||
jsType: 'string',
|
||||
description: 'A date and time value in ISO 8601 format',
|
||||
example: '2024-01-20T10:30:00Z',
|
||||
examples: [
|
||||
'2024-01-20T10:30:00Z',
|
||||
'2024-01-20',
|
||||
'{{ $now }}',
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
pattern: '^\\d{4}-\\d{2}-\\d{2}(T\\d{2}:\\d{2}:\\d{2}(\\.\\d{3})?Z?)?$',
|
||||
},
|
||||
notes: ['Accepts ISO 8601 format', 'Can use n8n date expressions'],
|
||||
},
|
||||
color: {
|
||||
type: 'primitive',
|
||||
jsType: 'string',
|
||||
description: 'A color value in hex format',
|
||||
example: '#FF5733',
|
||||
examples: ['#FF5733', '#000000', '#FFFFFF', '{{ $json.color }}'],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
pattern: '^#[0-9A-Fa-f]{6}$',
|
||||
},
|
||||
notes: ['Must be 6-digit hex color', 'Rendered with color picker in UI'],
|
||||
},
|
||||
json: {
|
||||
type: 'primitive',
|
||||
jsType: 'string',
|
||||
description: 'A JSON string that can be parsed into any structure',
|
||||
example: '{"key": "value", "nested": {"data": 123}}',
|
||||
examples: [
|
||||
'{}',
|
||||
'{"name": "John", "age": 30}',
|
||||
'[1, 2, 3]',
|
||||
'{{ $json }}',
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: ['Must be valid JSON when parsed', 'Often used for custom payloads'],
|
||||
},
|
||||
options: {
|
||||
type: 'primitive',
|
||||
jsType: 'string',
|
||||
description: 'Single selection from a list of predefined options',
|
||||
example: 'option1',
|
||||
examples: ['GET', 'POST', 'channelMessage', 'update'],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'Value must match one of the defined option values',
|
||||
'Rendered as dropdown in UI',
|
||||
'Options defined in property.options array',
|
||||
],
|
||||
},
|
||||
multiOptions: {
|
||||
type: 'array',
|
||||
jsType: 'array',
|
||||
description: 'Multiple selections from a list of predefined options',
|
||||
structure: {
|
||||
items: {
|
||||
type: 'string',
|
||||
description: 'Selected option value',
|
||||
},
|
||||
},
|
||||
example: ['option1', 'option2'],
|
||||
examples: [[], ['GET', 'POST'], ['read', 'write', 'delete']],
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'Array of option values',
|
||||
'Each value must exist in property.options',
|
||||
'Rendered as multi-select dropdown',
|
||||
],
|
||||
},
|
||||
collection: {
|
||||
type: 'collection',
|
||||
jsType: 'object',
|
||||
description: 'A group of related properties with dynamic values',
|
||||
structure: {
|
||||
properties: {
|
||||
'<propertyName>': {
|
||||
type: 'any',
|
||||
description: 'Any nested property from the collection definition',
|
||||
},
|
||||
},
|
||||
flexible: true,
|
||||
},
|
||||
example: {
|
||||
name: 'John Doe',
|
||||
email: 'john@example.com',
|
||||
age: 30,
|
||||
},
|
||||
examples: [
|
||||
{},
|
||||
{ key1: 'value1', key2: 123 },
|
||||
{ nested: { deep: { value: true } } },
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Properties defined in property.values array',
|
||||
'Each property can be any type',
|
||||
'UI renders as expandable section',
|
||||
],
|
||||
},
|
||||
fixedCollection: {
|
||||
type: 'collection',
|
||||
jsType: 'object',
|
||||
description: 'A collection with predefined groups of properties',
|
||||
structure: {
|
||||
properties: {
|
||||
'<collectionName>': {
|
||||
type: 'array',
|
||||
description: 'Array of collection items',
|
||||
items: {
|
||||
type: 'object',
|
||||
description: 'Collection item with defined properties',
|
||||
},
|
||||
},
|
||||
},
|
||||
required: [],
|
||||
},
|
||||
example: {
|
||||
headers: [
|
||||
{ name: 'Content-Type', value: 'application/json' },
|
||||
{ name: 'Authorization', value: 'Bearer token' },
|
||||
],
|
||||
},
|
||||
examples: [
|
||||
{},
|
||||
{ queryParameters: [{ name: 'id', value: '123' }] },
|
||||
{
|
||||
headers: [{ name: 'Accept', value: '*/*' }],
|
||||
queryParameters: [{ name: 'limit', value: '10' }],
|
||||
},
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Each collection has predefined structure',
|
||||
'Often used for headers, parameters, etc.',
|
||||
'Supports multiple values per collection',
|
||||
],
|
||||
},
|
||||
resourceLocator: {
|
||||
type: 'special',
|
||||
jsType: 'object',
|
||||
description: 'A flexible way to specify a resource by ID, name, URL, or list',
|
||||
structure: {
|
||||
properties: {
|
||||
mode: {
|
||||
type: 'string',
|
||||
description: 'How the resource is specified',
|
||||
enum: ['id', 'url', 'list'],
|
||||
required: true,
|
||||
},
|
||||
value: {
|
||||
type: 'string',
|
||||
description: 'The resource identifier',
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
required: ['mode', 'value'],
|
||||
},
|
||||
example: {
|
||||
mode: 'id',
|
||||
value: 'abc123',
|
||||
},
|
||||
examples: [
|
||||
{ mode: 'url', value: 'https://example.com/resource/123' },
|
||||
{ mode: 'list', value: 'item-from-dropdown' },
|
||||
{ mode: 'id', value: '{{ $json.resourceId }}' },
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Provides flexible resource selection',
|
||||
'Mode determines how value is interpreted',
|
||||
'UI adapts based on selected mode',
|
||||
],
|
||||
},
|
||||
resourceMapper: {
|
||||
type: 'special',
|
||||
jsType: 'object',
|
||||
description: 'Maps input data fields to resource fields with transformation options',
|
||||
structure: {
|
||||
properties: {
|
||||
mappingMode: {
|
||||
type: 'string',
|
||||
description: 'How fields are mapped',
|
||||
enum: ['defineBelow', 'autoMapInputData'],
|
||||
},
|
||||
value: {
|
||||
type: 'object',
|
||||
description: 'Field mappings',
|
||||
properties: {
|
||||
'<fieldName>': {
|
||||
type: 'string',
|
||||
description: 'Expression or value for this field',
|
||||
},
|
||||
},
|
||||
flexible: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
example: {
|
||||
mappingMode: 'defineBelow',
|
||||
value: {
|
||||
name: '{{ $json.fullName }}',
|
||||
email: '{{ $json.emailAddress }}',
|
||||
status: 'active',
|
||||
},
|
||||
},
|
||||
examples: [
|
||||
{ mappingMode: 'autoMapInputData', value: {} },
|
||||
{
|
||||
mappingMode: 'defineBelow',
|
||||
value: { id: '{{ $json.userId }}', name: '{{ $json.name }}' },
|
||||
},
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Complex mapping with UI assistance',
|
||||
'Can auto-map or manually define',
|
||||
'Supports field transformations',
|
||||
],
|
||||
},
|
||||
filter: {
|
||||
type: 'special',
|
||||
jsType: 'object',
|
||||
description: 'Defines conditions for filtering data with boolean logic',
|
||||
structure: {
|
||||
properties: {
|
||||
conditions: {
|
||||
type: 'array',
|
||||
description: 'Array of filter conditions',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
id: {
|
||||
type: 'string',
|
||||
description: 'Unique condition identifier',
|
||||
required: true,
|
||||
},
|
||||
leftValue: {
|
||||
type: 'any',
|
||||
description: 'Left side of comparison',
|
||||
},
|
||||
operator: {
|
||||
type: 'object',
|
||||
description: 'Comparison operator',
|
||||
required: true,
|
||||
properties: {
|
||||
type: {
|
||||
type: 'string',
|
||||
enum: ['string', 'number', 'boolean', 'dateTime', 'array', 'object'],
|
||||
required: true,
|
||||
},
|
||||
operation: {
|
||||
type: 'string',
|
||||
description: 'Operation to perform',
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
rightValue: {
|
||||
type: 'any',
|
||||
description: 'Right side of comparison',
|
||||
},
|
||||
},
|
||||
},
|
||||
required: true,
|
||||
},
|
||||
combinator: {
|
||||
type: 'string',
|
||||
description: 'How to combine conditions',
|
||||
enum: ['and', 'or'],
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
required: ['conditions', 'combinator'],
|
||||
},
|
||||
example: {
|
||||
conditions: [
|
||||
{
|
||||
id: 'abc-123',
|
||||
leftValue: '{{ $json.status }}',
|
||||
operator: { type: 'string', operation: 'equals' },
|
||||
rightValue: 'active',
|
||||
},
|
||||
],
|
||||
combinator: 'and',
|
||||
},
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Advanced filtering UI in n8n',
|
||||
'Supports complex boolean logic',
|
||||
'Operations vary by data type',
|
||||
],
|
||||
},
|
||||
assignmentCollection: {
|
||||
type: 'special',
|
||||
jsType: 'object',
|
||||
description: 'Defines variable assignments with expressions',
|
||||
structure: {
|
||||
properties: {
|
||||
assignments: {
|
||||
type: 'array',
|
||||
description: 'Array of variable assignments',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
id: {
|
||||
type: 'string',
|
||||
description: 'Unique assignment identifier',
|
||||
required: true,
|
||||
},
|
||||
name: {
|
||||
type: 'string',
|
||||
description: 'Variable name',
|
||||
required: true,
|
||||
},
|
||||
value: {
|
||||
type: 'any',
|
||||
description: 'Value to assign',
|
||||
required: true,
|
||||
},
|
||||
type: {
|
||||
type: 'string',
|
||||
description: 'Data type of the value',
|
||||
enum: ['string', 'number', 'boolean', 'array', 'object'],
|
||||
},
|
||||
},
|
||||
},
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
required: ['assignments'],
|
||||
},
|
||||
example: {
|
||||
assignments: [
|
||||
{
|
||||
id: 'abc-123',
|
||||
name: 'userName',
|
||||
value: '{{ $json.name }}',
|
||||
type: 'string',
|
||||
},
|
||||
{
|
||||
id: 'def-456',
|
||||
name: 'userAge',
|
||||
value: 30,
|
||||
type: 'number',
|
||||
},
|
||||
],
|
||||
},
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Used in Set node and similar',
|
||||
'Each assignment can use expressions',
|
||||
'Type helps with validation',
|
||||
],
|
||||
},
|
||||
credentials: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Reference to credential configuration',
|
||||
example: 'googleSheetsOAuth2Api',
|
||||
examples: ['httpBasicAuth', 'slackOAuth2Api', 'postgresApi'],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'References credential type name',
|
||||
'Credential must be configured in n8n',
|
||||
'Type name matches credential definition',
|
||||
],
|
||||
},
|
||||
credentialsSelect: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Dropdown to select from available credentials',
|
||||
example: 'credential-id-123',
|
||||
examples: ['cred-abc', 'cred-def', '{{ $credentials.id }}'],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'User selects from configured credentials',
|
||||
'Returns credential ID',
|
||||
'Used when multiple credential instances exist',
|
||||
],
|
||||
},
|
||||
hidden: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Hidden property not shown in UI (used for internal logic)',
|
||||
example: '',
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Not rendered in UI',
|
||||
'Can store metadata or computed values',
|
||||
'Often used for version tracking',
|
||||
],
|
||||
},
|
||||
button: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Clickable button that triggers an action',
|
||||
example: '',
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'Triggers action when clicked',
|
||||
'Does not store a value',
|
||||
'Action defined in routing property',
|
||||
],
|
||||
},
|
||||
callout: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Informational message box (warning, info, success, error)',
|
||||
example: '',
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'Display-only, no value stored',
|
||||
'Used for warnings and hints',
|
||||
'Style controlled by typeOptions',
|
||||
],
|
||||
},
|
||||
notice: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Notice message displayed to user',
|
||||
example: '',
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: ['Similar to callout', 'Display-only element', 'Provides contextual information'],
|
||||
},
|
||||
workflowSelector: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Dropdown to select another workflow',
|
||||
example: 'workflow-123',
|
||||
examples: ['wf-abc', '{{ $json.workflowId }}'],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Selects from available workflows',
|
||||
'Returns workflow ID',
|
||||
'Used in Execute Workflow node',
|
||||
],
|
||||
},
|
||||
curlImport: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Import configuration from cURL command',
|
||||
example: 'curl -X GET https://api.example.com/data',
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'Parses cURL command to populate fields',
|
||||
'Used in HTTP Request node',
|
||||
'One-time import feature',
|
||||
],
|
||||
},
|
||||
};
|
||||
exports.COMPLEX_TYPE_EXAMPLES = {
|
||||
collection: {
|
||||
basic: {
|
||||
name: 'John Doe',
|
||||
email: 'john@example.com',
|
||||
},
|
||||
nested: {
|
||||
user: {
|
||||
firstName: 'Jane',
|
||||
lastName: 'Smith',
|
||||
},
|
||||
preferences: {
|
||||
theme: 'dark',
|
||||
notifications: true,
|
||||
},
|
||||
},
|
||||
withExpressions: {
|
||||
id: '{{ $json.userId }}',
|
||||
timestamp: '{{ $now }}',
|
||||
data: '{{ $json.payload }}',
|
||||
},
|
||||
},
|
||||
fixedCollection: {
|
||||
httpHeaders: {
|
||||
headers: [
|
||||
{ name: 'Content-Type', value: 'application/json' },
|
||||
{ name: 'Authorization', value: 'Bearer {{ $credentials.token }}' },
|
||||
],
|
||||
},
|
||||
queryParameters: {
|
||||
queryParameters: [
|
||||
{ name: 'page', value: '1' },
|
||||
{ name: 'limit', value: '100' },
|
||||
],
|
||||
},
|
||||
multipleCollections: {
|
||||
headers: [{ name: 'Accept', value: 'application/json' }],
|
||||
queryParameters: [{ name: 'filter', value: 'active' }],
|
||||
},
|
||||
},
|
||||
filter: {
|
||||
simple: {
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
leftValue: '{{ $json.status }}',
|
||||
operator: { type: 'string', operation: 'equals' },
|
||||
rightValue: 'active',
|
||||
},
|
||||
],
|
||||
combinator: 'and',
|
||||
},
|
||||
complex: {
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
leftValue: '{{ $json.age }}',
|
||||
operator: { type: 'number', operation: 'gt' },
|
||||
rightValue: 18,
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
leftValue: '{{ $json.country }}',
|
||||
operator: { type: 'string', operation: 'equals' },
|
||||
rightValue: 'US',
|
||||
},
|
||||
],
|
||||
combinator: 'and',
|
||||
},
|
||||
},
|
||||
resourceMapper: {
|
||||
autoMap: {
|
||||
mappingMode: 'autoMapInputData',
|
||||
value: {},
|
||||
},
|
||||
manual: {
|
||||
mappingMode: 'defineBelow',
|
||||
value: {
|
||||
firstName: '{{ $json.first_name }}',
|
||||
lastName: '{{ $json.last_name }}',
|
||||
email: '{{ $json.email_address }}',
|
||||
status: 'active',
|
||||
},
|
||||
},
|
||||
},
|
||||
assignmentCollection: {
|
||||
basic: {
|
||||
assignments: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'fullName',
|
||||
value: '{{ $json.firstName }} {{ $json.lastName }}',
|
||||
type: 'string',
|
||||
},
|
||||
],
|
||||
},
|
||||
multiple: {
|
||||
assignments: [
|
||||
{ id: '1', name: 'userName', value: '{{ $json.name }}', type: 'string' },
|
||||
{ id: '2', name: 'userAge', value: '{{ $json.age }}', type: 'number' },
|
||||
{ id: '3', name: 'isActive', value: true, type: 'boolean' },
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
//# sourceMappingURL=type-structures.js.map
|
||||
1
dist/constants/type-structures.js.map
vendored
Normal file
1
dist/constants/type-structures.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
33
dist/database/database-adapter.d.ts
vendored
Normal file
33
dist/database/database-adapter.d.ts
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
export interface DatabaseAdapter {
|
||||
prepare(sql: string): PreparedStatement;
|
||||
exec(sql: string): void;
|
||||
close(): void;
|
||||
pragma(key: string, value?: any): any;
|
||||
readonly inTransaction: boolean;
|
||||
transaction<T>(fn: () => T): T;
|
||||
checkFTS5Support(): boolean;
|
||||
}
|
||||
export interface PreparedStatement {
|
||||
run(...params: any[]): RunResult;
|
||||
get(...params: any[]): any;
|
||||
all(...params: any[]): any[];
|
||||
iterate(...params: any[]): IterableIterator<any>;
|
||||
pluck(toggle?: boolean): this;
|
||||
expand(toggle?: boolean): this;
|
||||
raw(toggle?: boolean): this;
|
||||
columns(): ColumnDefinition[];
|
||||
bind(...params: any[]): this;
|
||||
}
|
||||
export interface RunResult {
|
||||
changes: number;
|
||||
lastInsertRowid: number | bigint;
|
||||
}
|
||||
export interface ColumnDefinition {
|
||||
name: string;
|
||||
column: string | null;
|
||||
table: string | null;
|
||||
database: string | null;
|
||||
type: string | null;
|
||||
}
|
||||
export declare function createDatabaseAdapter(dbPath: string): Promise<DatabaseAdapter>;
|
||||
//# sourceMappingURL=database-adapter.d.ts.map
|
||||
1
dist/database/database-adapter.d.ts.map
vendored
Normal file
1
dist/database/database-adapter.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"database-adapter.d.ts","sourceRoot":"","sources":["../../src/database/database-adapter.ts"],"names":[],"mappings":"AAQA,MAAM,WAAW,eAAe;IAC9B,OAAO,CAAC,GAAG,EAAE,MAAM,GAAG,iBAAiB,CAAC;IACxC,IAAI,CAAC,GAAG,EAAE,MAAM,GAAG,IAAI,CAAC;IACxB,KAAK,IAAI,IAAI,CAAC;IACd,MAAM,CAAC,GAAG,EAAE,MAAM,EAAE,KAAK,CAAC,EAAE,GAAG,GAAG,GAAG,CAAC;IACtC,QAAQ,CAAC,aAAa,EAAE,OAAO,CAAC;IAChC,WAAW,CAAC,CAAC,EAAE,EAAE,EAAE,MAAM,CAAC,GAAG,CAAC,CAAC;IAC/B,gBAAgB,IAAI,OAAO,CAAC;CAC7B;AAED,MAAM,WAAW,iBAAiB;IAChC,GAAG,CAAC,GAAG,MAAM,EAAE,GAAG,EAAE,GAAG,SAAS,CAAC;IACjC,GAAG,CAAC,GAAG,MAAM,EAAE,GAAG,EAAE,GAAG,GAAG,CAAC;IAC3B,GAAG,CAAC,GAAG,MAAM,EAAE,GAAG,EAAE,GAAG,GAAG,EAAE,CAAC;IAC7B,OAAO,CAAC,GAAG,MAAM,EAAE,GAAG,EAAE,GAAG,gBAAgB,CAAC,GAAG,CAAC,CAAC;IACjD,KAAK,CAAC,MAAM,CAAC,EAAE,OAAO,GAAG,IAAI,CAAC;IAC9B,MAAM,CAAC,MAAM,CAAC,EAAE,OAAO,GAAG,IAAI,CAAC;IAC/B,GAAG,CAAC,MAAM,CAAC,EAAE,OAAO,GAAG,IAAI,CAAC;IAC5B,OAAO,IAAI,gBAAgB,EAAE,CAAC;IAC9B,IAAI,CAAC,GAAG,MAAM,EAAE,GAAG,EAAE,GAAG,IAAI,CAAC;CAC9B;AAED,MAAM,WAAW,SAAS;IACxB,OAAO,EAAE,MAAM,CAAC;IAChB,eAAe,EAAE,MAAM,GAAG,MAAM,CAAC;CAClC;AAED,MAAM,WAAW,gBAAgB;IAC/B,IAAI,EAAE,MAAM,CAAC;IACb,MAAM,EAAE,MAAM,GAAG,IAAI,CAAC;IACtB,KAAK,EAAE,MAAM,GAAG,IAAI,CAAC;IACrB,QAAQ,EAAE,MAAM,GAAG,IAAI,CAAC;IACxB,IAAI,EAAE,MAAM,GAAG,IAAI,CAAC;CACrB;AAMD,wBAAsB,qBAAqB,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,eAAe,CAAC,CAoDpF"}
|
||||
420
dist/database/database-adapter.js
vendored
Normal file
420
dist/database/database-adapter.js
vendored
Normal file
@@ -0,0 +1,420 @@
|
||||
"use strict";
|
||||
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
||||
if (k2 === undefined) k2 = k;
|
||||
var desc = Object.getOwnPropertyDescriptor(m, k);
|
||||
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
||||
desc = { enumerable: true, get: function() { return m[k]; } };
|
||||
}
|
||||
Object.defineProperty(o, k2, desc);
|
||||
}) : (function(o, m, k, k2) {
|
||||
if (k2 === undefined) k2 = k;
|
||||
o[k2] = m[k];
|
||||
}));
|
||||
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
||||
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
||||
}) : function(o, v) {
|
||||
o["default"] = v;
|
||||
});
|
||||
var __importStar = (this && this.__importStar) || (function () {
|
||||
var ownKeys = function(o) {
|
||||
ownKeys = Object.getOwnPropertyNames || function (o) {
|
||||
var ar = [];
|
||||
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
||||
return ar;
|
||||
};
|
||||
return ownKeys(o);
|
||||
};
|
||||
return function (mod) {
|
||||
if (mod && mod.__esModule) return mod;
|
||||
var result = {};
|
||||
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
||||
__setModuleDefault(result, mod);
|
||||
return result;
|
||||
};
|
||||
})();
|
||||
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.createDatabaseAdapter = createDatabaseAdapter;
|
||||
const fs_1 = require("fs");
|
||||
const fsSync = __importStar(require("fs"));
|
||||
const path_1 = __importDefault(require("path"));
|
||||
const logger_1 = require("../utils/logger");
|
||||
async function createDatabaseAdapter(dbPath) {
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.info(`Node.js version: ${process.version}`);
|
||||
}
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.info(`Platform: ${process.platform} ${process.arch}`);
|
||||
}
|
||||
try {
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.info('Attempting to use better-sqlite3...');
|
||||
}
|
||||
const adapter = await createBetterSQLiteAdapter(dbPath);
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.info('Successfully initialized better-sqlite3 adapter');
|
||||
}
|
||||
return adapter;
|
||||
}
|
||||
catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
if (errorMessage.includes('NODE_MODULE_VERSION') || errorMessage.includes('was compiled against a different Node.js version')) {
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.warn(`Node.js version mismatch detected. Better-sqlite3 was compiled for a different Node.js version.`);
|
||||
}
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.warn(`Current Node.js version: ${process.version}`);
|
||||
}
|
||||
}
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.warn('Failed to initialize better-sqlite3, falling back to sql.js', error);
|
||||
}
|
||||
try {
|
||||
const adapter = await createSQLJSAdapter(dbPath);
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.info('Successfully initialized sql.js adapter (pure JavaScript, no native dependencies)');
|
||||
}
|
||||
return adapter;
|
||||
}
|
||||
catch (sqlJsError) {
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.error('Failed to initialize sql.js adapter', sqlJsError);
|
||||
}
|
||||
throw new Error('Failed to initialize any database adapter');
|
||||
}
|
||||
}
|
||||
}
|
||||
async function createBetterSQLiteAdapter(dbPath) {
|
||||
try {
|
||||
const Database = require('better-sqlite3');
|
||||
const db = new Database(dbPath);
|
||||
return new BetterSQLiteAdapter(db);
|
||||
}
|
||||
catch (error) {
|
||||
throw new Error(`Failed to create better-sqlite3 adapter: ${error}`);
|
||||
}
|
||||
}
|
||||
async function createSQLJSAdapter(dbPath) {
|
||||
let initSqlJs;
|
||||
try {
|
||||
initSqlJs = require('sql.js');
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error('Failed to load sql.js module:', error);
|
||||
throw new Error('sql.js module not found. This might be an issue with npm package installation.');
|
||||
}
|
||||
const SQL = await initSqlJs({
|
||||
locateFile: (file) => {
|
||||
if (file.endsWith('.wasm')) {
|
||||
const possiblePaths = [
|
||||
path_1.default.join(__dirname, '../../node_modules/sql.js/dist/', file),
|
||||
path_1.default.join(__dirname, '../../../sql.js/dist/', file),
|
||||
path_1.default.join(process.cwd(), 'node_modules/sql.js/dist/', file),
|
||||
path_1.default.join(path_1.default.dirname(require.resolve('sql.js')), '../dist/', file)
|
||||
];
|
||||
for (const tryPath of possiblePaths) {
|
||||
if (fsSync.existsSync(tryPath)) {
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.debug(`Found WASM file at: ${tryPath}`);
|
||||
}
|
||||
return tryPath;
|
||||
}
|
||||
}
|
||||
try {
|
||||
const wasmPath = require.resolve('sql.js/dist/sql-wasm.wasm');
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
logger_1.logger.debug(`Found WASM file via require.resolve: ${wasmPath}`);
|
||||
}
|
||||
return wasmPath;
|
||||
}
|
||||
catch (e) {
|
||||
logger_1.logger.warn(`Could not find WASM file, using default path: ${file}`);
|
||||
return file;
|
||||
}
|
||||
}
|
||||
return file;
|
||||
}
|
||||
});
|
||||
let db;
|
||||
try {
|
||||
const data = await fs_1.promises.readFile(dbPath);
|
||||
db = new SQL.Database(new Uint8Array(data));
|
||||
logger_1.logger.info(`Loaded existing database from ${dbPath}`);
|
||||
}
|
||||
catch (error) {
|
||||
db = new SQL.Database();
|
||||
logger_1.logger.info(`Created new database at ${dbPath}`);
|
||||
}
|
||||
return new SQLJSAdapter(db, dbPath);
|
||||
}
|
||||
class BetterSQLiteAdapter {
|
||||
constructor(db) {
|
||||
this.db = db;
|
||||
}
|
||||
prepare(sql) {
|
||||
const stmt = this.db.prepare(sql);
|
||||
return new BetterSQLiteStatement(stmt);
|
||||
}
|
||||
exec(sql) {
|
||||
this.db.exec(sql);
|
||||
}
|
||||
close() {
|
||||
this.db.close();
|
||||
}
|
||||
pragma(key, value) {
|
||||
return this.db.pragma(key, value);
|
||||
}
|
||||
get inTransaction() {
|
||||
return this.db.inTransaction;
|
||||
}
|
||||
transaction(fn) {
|
||||
return this.db.transaction(fn)();
|
||||
}
|
||||
checkFTS5Support() {
|
||||
try {
|
||||
this.exec("CREATE VIRTUAL TABLE IF NOT EXISTS test_fts5 USING fts5(content);");
|
||||
this.exec("DROP TABLE IF EXISTS test_fts5;");
|
||||
return true;
|
||||
}
|
||||
catch (error) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
class SQLJSAdapter {
|
||||
constructor(db, dbPath) {
|
||||
this.db = db;
|
||||
this.dbPath = dbPath;
|
||||
this.saveTimer = null;
|
||||
this.closed = false;
|
||||
const envInterval = process.env.SQLJS_SAVE_INTERVAL_MS;
|
||||
this.saveIntervalMs = envInterval ? parseInt(envInterval, 10) : SQLJSAdapter.DEFAULT_SAVE_INTERVAL_MS;
|
||||
if (isNaN(this.saveIntervalMs) || this.saveIntervalMs < 100 || this.saveIntervalMs > 60000) {
|
||||
logger_1.logger.warn(`Invalid SQLJS_SAVE_INTERVAL_MS value: ${envInterval} (must be 100-60000ms), ` +
|
||||
`using default ${SQLJSAdapter.DEFAULT_SAVE_INTERVAL_MS}ms`);
|
||||
this.saveIntervalMs = SQLJSAdapter.DEFAULT_SAVE_INTERVAL_MS;
|
||||
}
|
||||
logger_1.logger.debug(`SQLJSAdapter initialized with save interval: ${this.saveIntervalMs}ms`);
|
||||
}
|
||||
prepare(sql) {
|
||||
const stmt = this.db.prepare(sql);
|
||||
return new SQLJSStatement(stmt, () => this.scheduleSave());
|
||||
}
|
||||
exec(sql) {
|
||||
this.db.exec(sql);
|
||||
this.scheduleSave();
|
||||
}
|
||||
close() {
|
||||
if (this.closed) {
|
||||
logger_1.logger.debug('SQLJSAdapter already closed, skipping');
|
||||
return;
|
||||
}
|
||||
this.saveToFile();
|
||||
if (this.saveTimer) {
|
||||
clearTimeout(this.saveTimer);
|
||||
this.saveTimer = null;
|
||||
}
|
||||
this.db.close();
|
||||
this.closed = true;
|
||||
}
|
||||
pragma(key, value) {
|
||||
if (key === 'journal_mode' && value === 'WAL') {
|
||||
return 'memory';
|
||||
}
|
||||
return null;
|
||||
}
|
||||
get inTransaction() {
|
||||
return false;
|
||||
}
|
||||
transaction(fn) {
|
||||
try {
|
||||
this.exec('BEGIN');
|
||||
const result = fn();
|
||||
this.exec('COMMIT');
|
||||
return result;
|
||||
}
|
||||
catch (error) {
|
||||
this.exec('ROLLBACK');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
checkFTS5Support() {
|
||||
try {
|
||||
this.exec("CREATE VIRTUAL TABLE IF NOT EXISTS test_fts5 USING fts5(content);");
|
||||
this.exec("DROP TABLE IF EXISTS test_fts5;");
|
||||
return true;
|
||||
}
|
||||
catch (error) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
scheduleSave() {
|
||||
if (this.saveTimer) {
|
||||
clearTimeout(this.saveTimer);
|
||||
}
|
||||
this.saveTimer = setTimeout(() => {
|
||||
this.saveToFile();
|
||||
}, this.saveIntervalMs);
|
||||
}
|
||||
saveToFile() {
|
||||
try {
|
||||
const data = this.db.export();
|
||||
fsSync.writeFileSync(this.dbPath, data);
|
||||
logger_1.logger.debug(`Database saved to ${this.dbPath}`);
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error('Failed to save database', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
SQLJSAdapter.DEFAULT_SAVE_INTERVAL_MS = 5000;
|
||||
class BetterSQLiteStatement {
|
||||
constructor(stmt) {
|
||||
this.stmt = stmt;
|
||||
}
|
||||
run(...params) {
|
||||
return this.stmt.run(...params);
|
||||
}
|
||||
get(...params) {
|
||||
return this.stmt.get(...params);
|
||||
}
|
||||
all(...params) {
|
||||
return this.stmt.all(...params);
|
||||
}
|
||||
iterate(...params) {
|
||||
return this.stmt.iterate(...params);
|
||||
}
|
||||
pluck(toggle) {
|
||||
this.stmt.pluck(toggle);
|
||||
return this;
|
||||
}
|
||||
expand(toggle) {
|
||||
this.stmt.expand(toggle);
|
||||
return this;
|
||||
}
|
||||
raw(toggle) {
|
||||
this.stmt.raw(toggle);
|
||||
return this;
|
||||
}
|
||||
columns() {
|
||||
return this.stmt.columns();
|
||||
}
|
||||
bind(...params) {
|
||||
this.stmt.bind(...params);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
class SQLJSStatement {
|
||||
constructor(stmt, onModify) {
|
||||
this.stmt = stmt;
|
||||
this.onModify = onModify;
|
||||
this.boundParams = null;
|
||||
}
|
||||
run(...params) {
|
||||
try {
|
||||
if (params.length > 0) {
|
||||
this.bindParams(params);
|
||||
if (this.boundParams) {
|
||||
this.stmt.bind(this.boundParams);
|
||||
}
|
||||
}
|
||||
this.stmt.run();
|
||||
this.onModify();
|
||||
return {
|
||||
changes: 1,
|
||||
lastInsertRowid: 0
|
||||
};
|
||||
}
|
||||
catch (error) {
|
||||
this.stmt.reset();
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
get(...params) {
|
||||
try {
|
||||
if (params.length > 0) {
|
||||
this.bindParams(params);
|
||||
if (this.boundParams) {
|
||||
this.stmt.bind(this.boundParams);
|
||||
}
|
||||
}
|
||||
if (this.stmt.step()) {
|
||||
const result = this.stmt.getAsObject();
|
||||
this.stmt.reset();
|
||||
return this.convertIntegerColumns(result);
|
||||
}
|
||||
this.stmt.reset();
|
||||
return undefined;
|
||||
}
|
||||
catch (error) {
|
||||
this.stmt.reset();
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
all(...params) {
|
||||
try {
|
||||
if (params.length > 0) {
|
||||
this.bindParams(params);
|
||||
if (this.boundParams) {
|
||||
this.stmt.bind(this.boundParams);
|
||||
}
|
||||
}
|
||||
const results = [];
|
||||
while (this.stmt.step()) {
|
||||
results.push(this.convertIntegerColumns(this.stmt.getAsObject()));
|
||||
}
|
||||
this.stmt.reset();
|
||||
return results;
|
||||
}
|
||||
catch (error) {
|
||||
this.stmt.reset();
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
iterate(...params) {
|
||||
return this.all(...params)[Symbol.iterator]();
|
||||
}
|
||||
pluck(toggle) {
|
||||
return this;
|
||||
}
|
||||
expand(toggle) {
|
||||
return this;
|
||||
}
|
||||
raw(toggle) {
|
||||
return this;
|
||||
}
|
||||
columns() {
|
||||
return [];
|
||||
}
|
||||
bind(...params) {
|
||||
this.bindParams(params);
|
||||
return this;
|
||||
}
|
||||
bindParams(params) {
|
||||
if (params.length === 0) {
|
||||
this.boundParams = null;
|
||||
return;
|
||||
}
|
||||
if (params.length === 1 && typeof params[0] === 'object' && !Array.isArray(params[0]) && params[0] !== null) {
|
||||
this.boundParams = params[0];
|
||||
}
|
||||
else {
|
||||
this.boundParams = params.map(p => p === undefined ? null : p);
|
||||
}
|
||||
}
|
||||
convertIntegerColumns(row) {
|
||||
if (!row)
|
||||
return row;
|
||||
const integerColumns = ['is_ai_tool', 'is_trigger', 'is_webhook', 'is_versioned'];
|
||||
const converted = { ...row };
|
||||
for (const col of integerColumns) {
|
||||
if (col in converted && typeof converted[col] === 'string') {
|
||||
converted[col] = parseInt(converted[col], 10);
|
||||
}
|
||||
}
|
||||
return converted;
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=database-adapter.js.map
|
||||
1
dist/database/database-adapter.js.map
vendored
Normal file
1
dist/database/database-adapter.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
95
dist/database/node-repository.d.ts
vendored
Normal file
95
dist/database/node-repository.d.ts
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
import { DatabaseAdapter } from './database-adapter';
|
||||
import { ParsedNode } from '../parsers/node-parser';
|
||||
import { SQLiteStorageService } from '../services/sqlite-storage-service';
|
||||
export declare class NodeRepository {
|
||||
private db;
|
||||
constructor(dbOrService: DatabaseAdapter | SQLiteStorageService);
|
||||
saveNode(node: ParsedNode): void;
|
||||
getNode(nodeType: string): any;
|
||||
getAITools(): any[];
|
||||
private safeJsonParse;
|
||||
upsertNode(node: ParsedNode): void;
|
||||
getNodeByType(nodeType: string): any;
|
||||
getNodesByCategory(category: string): any[];
|
||||
searchNodes(query: string, mode?: 'OR' | 'AND' | 'FUZZY', limit?: number): any[];
|
||||
getAllNodes(limit?: number): any[];
|
||||
getNodeCount(): number;
|
||||
getAIToolNodes(): any[];
|
||||
getToolVariant(baseNodeType: string): any | null;
|
||||
getBaseNodeForToolVariant(toolNodeType: string): any | null;
|
||||
getToolVariants(): any[];
|
||||
getToolVariantCount(): number;
|
||||
getNodesByPackage(packageName: string): any[];
|
||||
searchNodeProperties(nodeType: string, query: string, maxResults?: number): any[];
|
||||
private parseNodeRow;
|
||||
getNodeOperations(nodeType: string, resource?: string): any[];
|
||||
getNodeResources(nodeType: string): any[];
|
||||
getOperationsForResource(nodeType: string, resource: string): any[];
|
||||
getAllOperations(): Map<string, any[]>;
|
||||
getAllResources(): Map<string, any[]>;
|
||||
getNodePropertyDefaults(nodeType: string): Record<string, any>;
|
||||
getDefaultOperationForResource(nodeType: string, resource?: string): string | undefined;
|
||||
saveNodeVersion(versionData: {
|
||||
nodeType: string;
|
||||
version: string;
|
||||
packageName: string;
|
||||
displayName: string;
|
||||
description?: string;
|
||||
category?: string;
|
||||
isCurrentMax?: boolean;
|
||||
propertiesSchema?: any;
|
||||
operations?: any;
|
||||
credentialsRequired?: any;
|
||||
outputs?: any;
|
||||
minimumN8nVersion?: string;
|
||||
breakingChanges?: any[];
|
||||
deprecatedProperties?: string[];
|
||||
addedProperties?: string[];
|
||||
releasedAt?: Date;
|
||||
}): void;
|
||||
getNodeVersions(nodeType: string): any[];
|
||||
getLatestNodeVersion(nodeType: string): any | null;
|
||||
getNodeVersion(nodeType: string, version: string): any | null;
|
||||
savePropertyChange(changeData: {
|
||||
nodeType: string;
|
||||
fromVersion: string;
|
||||
toVersion: string;
|
||||
propertyName: string;
|
||||
changeType: 'added' | 'removed' | 'renamed' | 'type_changed' | 'requirement_changed' | 'default_changed';
|
||||
isBreaking?: boolean;
|
||||
oldValue?: string;
|
||||
newValue?: string;
|
||||
migrationHint?: string;
|
||||
autoMigratable?: boolean;
|
||||
migrationStrategy?: any;
|
||||
severity?: 'LOW' | 'MEDIUM' | 'HIGH';
|
||||
}): void;
|
||||
getPropertyChanges(nodeType: string, fromVersion: string, toVersion: string): any[];
|
||||
getBreakingChanges(nodeType: string, fromVersion: string, toVersion?: string): any[];
|
||||
getAutoMigratableChanges(nodeType: string, fromVersion: string, toVersion: string): any[];
|
||||
hasVersionUpgradePath(nodeType: string, fromVersion: string, toVersion: string): boolean;
|
||||
getVersionedNodesCount(): number;
|
||||
private parseNodeVersionRow;
|
||||
private parsePropertyChangeRow;
|
||||
createWorkflowVersion(data: {
|
||||
workflowId: string;
|
||||
versionNumber: number;
|
||||
workflowName: string;
|
||||
workflowSnapshot: any;
|
||||
trigger: 'partial_update' | 'full_update' | 'autofix';
|
||||
operations?: any[];
|
||||
fixTypes?: string[];
|
||||
metadata?: any;
|
||||
}): number;
|
||||
getWorkflowVersions(workflowId: string, limit?: number): any[];
|
||||
getWorkflowVersion(versionId: number): any | null;
|
||||
getLatestWorkflowVersion(workflowId: string): any | null;
|
||||
deleteWorkflowVersion(versionId: number): void;
|
||||
deleteWorkflowVersionsByWorkflowId(workflowId: string): number;
|
||||
pruneWorkflowVersions(workflowId: string, keepCount: number): number;
|
||||
truncateWorkflowVersions(): number;
|
||||
getWorkflowVersionCount(workflowId: string): number;
|
||||
getVersionStorageStats(): any;
|
||||
private parseWorkflowVersionRow;
|
||||
}
|
||||
//# sourceMappingURL=node-repository.d.ts.map
|
||||
1
dist/database/node-repository.d.ts.map
vendored
Normal file
1
dist/database/node-repository.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"node-repository.d.ts","sourceRoot":"","sources":["../../src/database/node-repository.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,eAAe,EAAE,MAAM,oBAAoB,CAAC;AACrD,OAAO,EAAE,UAAU,EAAE,MAAM,wBAAwB,CAAC;AACpD,OAAO,EAAE,oBAAoB,EAAE,MAAM,oCAAoC,CAAC;AAG1E,qBAAa,cAAc;IACzB,OAAO,CAAC,EAAE,CAAkB;gBAEhB,WAAW,EAAE,eAAe,GAAG,oBAAoB;IAY/D,QAAQ,CAAC,IAAI,EAAE,UAAU,GAAG,IAAI;IAwChC,OAAO,CAAC,QAAQ,EAAE,MAAM,GAAG,GAAG;IA2B9B,UAAU,IAAI,GAAG,EAAE;IAgBnB,OAAO,CAAC,aAAa;IASrB,UAAU,CAAC,IAAI,EAAE,UAAU,GAAG,IAAI;IAIlC,aAAa,CAAC,QAAQ,EAAE,MAAM,GAAG,GAAG;IAIpC,kBAAkB,CAAC,QAAQ,EAAE,MAAM,GAAG,GAAG,EAAE;IAqB3C,WAAW,CAAC,KAAK,EAAE,MAAM,EAAE,IAAI,GAAE,IAAI,GAAG,KAAK,GAAG,OAAc,EAAE,KAAK,GAAE,MAAW,GAAG,GAAG,EAAE;IAwC1F,WAAW,CAAC,KAAK,CAAC,EAAE,MAAM,GAAG,GAAG,EAAE;IAUlC,YAAY,IAAI,MAAM;IAKtB,cAAc,IAAI,GAAG,EAAE;IAOvB,cAAc,CAAC,YAAY,EAAE,MAAM,GAAG,GAAG,GAAG,IAAI;IAYhD,yBAAyB,CAAC,YAAY,EAAE,MAAM,GAAG,GAAG,GAAG,IAAI;IAY3D,eAAe,IAAI,GAAG,EAAE;IAoBxB,mBAAmB,IAAI,MAAM;IAK7B,iBAAiB,CAAC,WAAW,EAAE,MAAM,GAAG,GAAG,EAAE;IAS7C,oBAAoB,CAAC,QAAQ,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,EAAE,UAAU,GAAE,MAAW,GAAG,GAAG,EAAE;IAmCrF,OAAO,CAAC,YAAY;IA4BpB,iBAAiB,CAAC,QAAQ,EAAE,MAAM,EAAE,QAAQ,CAAC,EAAE,MAAM,GAAG,GAAG,EAAE;IAmD7D,gBAAgB,CAAC,QAAQ,EAAE,MAAM,GAAG,GAAG,EAAE;IAmBzC,wBAAwB,CAAC,QAAQ,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,GAAG,GAAG,EAAE;IAyBnE,gBAAgB,IAAI,GAAG,CAAC,MAAM,EAAE,GAAG,EAAE,CAAC;IAiBtC,eAAe,IAAI,GAAG,CAAC,MAAM,EAAE,GAAG,EAAE,CAAC;IAiBrC,uBAAuB,CAAC,QAAQ,EAAE,MAAM,GAAG,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC;IAwB9D,8BAA8B,CAAC,QAAQ,EAAE,MAAM,EAAE,QAAQ,CAAC,EAAE,MAAM,GAAG,MAAM,GAAG,SAAS;IAuDvF,eAAe,CAAC,WAAW,EAAE;QAC3B,QAAQ,EAAE,MAAM,CAAC;QACjB,OAAO,EAAE,MAAM,CAAC;QAChB,WAAW,EAAE,MAAM,CAAC;QACpB,WAAW,EAAE,MAAM,CAAC;QACpB,WAAW,CAAC,EAAE,MAAM,CAAC;QACrB,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB,YAAY,CAAC,EAAE,OAAO,CAAC;QACvB,gBAAgB,CAAC,EAAE,GAAG,CAAC;QACvB,UAAU,CAAC,EAAE,GAAG,CAAC;QACjB,mBAAmB,CAAC,EAAE,GAAG,CAAC;QAC1B,OAAO,CAAC,EAAE,GAAG,CAAC;QACd,iBAAiB,CAAC,EAAE,MAAM,CAAC;QAC3B,eAAe,CAAC,EAAE,GAAG,EAAE,CAAC;QACxB,oBAAoB,CAAC,EAAE,MAAM,EAAE,CAAC;QAChC,eAAe,CAAC,EAAE,MAAM,EAAE,CAAC;QAC3B,UAAU,CAAC,EAAE,IAAI,CAAC;KACnB,GAAG,IAAI;IAkCR,eAAe,CAAC,QAAQ,EAAE,MAAM,GAAG,GAAG,EAAE;IAexC,oBAAoB,CAAC,QAAQ,EAAE,MAAM,GAAG,GAAG,GAAG,IAAI;IAgBlD,cAAc,CAAC,QAAQ,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,GAAG,GAAG,GAAG,IAAI;IAe7D,kBAAkB,CAAC,UAAU,EAAE;QAC7B,QAAQ,EAAE,MAAM,CAAC;QACjB,WAAW,EAAE,MAAM,CAAC;QACpB,SAAS,EAAE,MAAM,CAAC;QAClB,YAAY,EAAE,MAAM,CAAC;QACrB,UAAU,EAAE,OAAO,GAAG,SAAS,GAAG,SAAS,GAAG,cAAc,GAAG,qBAAqB,GAAG,iBAAiB,CAAC;QACzG,UAAU,CAAC,EAAE,OAAO,CAAC;QACrB,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB,aAAa,CAAC,EAAE,MAAM,CAAC;QACvB,cAAc,CAAC,EAAE,OAAO,CAAC;QACzB,iBAAiB,CAAC,EAAE,GAAG,CAAC;QACxB,QAAQ,CAAC,EAAE,KAAK,GAAG,QAAQ,GAAG,MAAM,CAAC;KACtC,GAAG,IAAI;IA4BR,kBAAkB,CAAC,QAAQ,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,GAAG,EAAE;IAgBnF,kBAAkB,CAAC,QAAQ,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,EAAE,SAAS,CAAC,EAAE,MAAM,GAAG,GAAG,EAAE;IA4BpF,wBAAwB,CAAC,QAAQ,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,GAAG,EAAE;IAkBzF,qBAAqB,CAAC,QAAQ,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,OAAO;IAcxF,sBAAsB,IAAI,MAAM;IAWhC,OAAO,CAAC,mBAAmB;IA0B3B,OAAO,CAAC,sBAAsB;IA0B9B,qBAAqB,CAAC,IAAI,EAAE;QAC1B,UAAU,EAAE,MAAM,CAAC;QACnB,aAAa,EAAE,MAAM,CAAC;QACtB,YAAY,EAAE,MAAM,CAAC;QACrB,gBAAgB,EAAE,GAAG,CAAC;QACtB,OAAO,EAAE,gBAAgB,GAAG,aAAa,GAAG,SAAS,CAAC;QACtD,UAAU,CAAC,EAAE,GAAG,EAAE,CAAC;QACnB,QAAQ,CAAC,EAAE,MAAM,EAAE,CAAC;QACpB,QAAQ,CAAC,EAAE,GAAG,CAAC;KAChB,GAAG,MAAM;IAyBV,mBAAmB,CAAC,UAAU,EAAE,MAAM,EAAE,KAAK,CAAC,EAAE,MAAM,GAAG,GAAG,EAAE;IAoB9D,kBAAkB,CAAC,SAAS,EAAE,MAAM,GAAG,GAAG,GAAG,IAAI;IAYjD,wBAAwB,CAAC,UAAU,EAAE,MAAM,GAAG,GAAG,GAAG,IAAI;IAexD,qBAAqB,CAAC,SAAS,EAAE,MAAM,GAAG,IAAI;IAS9C,kCAAkC,CAAC,UAAU,EAAE,MAAM,GAAG,MAAM;IAY9D,qBAAqB,CAAC,UAAU,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,MAAM;IAiCpE,wBAAwB,IAAI,MAAM;IAWlC,uBAAuB,CAAC,UAAU,EAAE,MAAM,GAAG,MAAM;IAWnD,sBAAsB,IAAI,GAAG;IAwC7B,OAAO,CAAC,uBAAuB;CAchC"}
|
||||
641
dist/database/node-repository.js
vendored
Normal file
641
dist/database/node-repository.js
vendored
Normal file
@@ -0,0 +1,641 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.NodeRepository = void 0;
|
||||
const sqlite_storage_service_1 = require("../services/sqlite-storage-service");
|
||||
const node_type_normalizer_1 = require("../utils/node-type-normalizer");
|
||||
class NodeRepository {
|
||||
constructor(dbOrService) {
|
||||
if (dbOrService instanceof sqlite_storage_service_1.SQLiteStorageService) {
|
||||
this.db = dbOrService.db;
|
||||
return;
|
||||
}
|
||||
this.db = dbOrService;
|
||||
}
|
||||
saveNode(node) {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT OR REPLACE INTO nodes (
|
||||
node_type, package_name, display_name, description,
|
||||
category, development_style, is_ai_tool, is_trigger,
|
||||
is_webhook, is_versioned, is_tool_variant, tool_variant_of,
|
||||
has_tool_variant, version, documentation,
|
||||
properties_schema, operations, credentials_required,
|
||||
outputs, output_names
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`);
|
||||
stmt.run(node.nodeType, node.packageName, node.displayName, node.description, node.category, node.style, node.isAITool ? 1 : 0, node.isTrigger ? 1 : 0, node.isWebhook ? 1 : 0, node.isVersioned ? 1 : 0, node.isToolVariant ? 1 : 0, node.toolVariantOf || null, node.hasToolVariant ? 1 : 0, node.version, node.documentation || null, JSON.stringify(node.properties, null, 2), JSON.stringify(node.operations, null, 2), JSON.stringify(node.credentials, null, 2), node.outputs ? JSON.stringify(node.outputs, null, 2) : null, node.outputNames ? JSON.stringify(node.outputNames, null, 2) : null);
|
||||
}
|
||||
getNode(nodeType) {
|
||||
const normalizedType = node_type_normalizer_1.NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM nodes WHERE node_type = ?
|
||||
`).get(normalizedType);
|
||||
if (!row && normalizedType !== nodeType) {
|
||||
const originalRow = this.db.prepare(`
|
||||
SELECT * FROM nodes WHERE node_type = ?
|
||||
`).get(nodeType);
|
||||
if (originalRow) {
|
||||
return this.parseNodeRow(originalRow);
|
||||
}
|
||||
}
|
||||
if (!row)
|
||||
return null;
|
||||
return this.parseNodeRow(row);
|
||||
}
|
||||
getAITools() {
|
||||
const rows = this.db.prepare(`
|
||||
SELECT node_type, display_name, description, package_name
|
||||
FROM nodes
|
||||
WHERE is_ai_tool = 1
|
||||
ORDER BY display_name
|
||||
`).all();
|
||||
return rows.map(row => ({
|
||||
nodeType: row.node_type,
|
||||
displayName: row.display_name,
|
||||
description: row.description,
|
||||
package: row.package_name
|
||||
}));
|
||||
}
|
||||
safeJsonParse(json, defaultValue) {
|
||||
try {
|
||||
return JSON.parse(json);
|
||||
}
|
||||
catch {
|
||||
return defaultValue;
|
||||
}
|
||||
}
|
||||
upsertNode(node) {
|
||||
this.saveNode(node);
|
||||
}
|
||||
getNodeByType(nodeType) {
|
||||
return this.getNode(nodeType);
|
||||
}
|
||||
getNodesByCategory(category) {
|
||||
const rows = this.db.prepare(`
|
||||
SELECT * FROM nodes WHERE category = ?
|
||||
ORDER BY display_name
|
||||
`).all(category);
|
||||
return rows.map(row => this.parseNodeRow(row));
|
||||
}
|
||||
searchNodes(query, mode = 'OR', limit = 20) {
|
||||
let sql = '';
|
||||
const params = [];
|
||||
if (mode === 'FUZZY') {
|
||||
sql = `
|
||||
SELECT * FROM nodes
|
||||
WHERE node_type LIKE ? OR display_name LIKE ? OR description LIKE ?
|
||||
ORDER BY display_name
|
||||
LIMIT ?
|
||||
`;
|
||||
const fuzzyQuery = `%${query}%`;
|
||||
params.push(fuzzyQuery, fuzzyQuery, fuzzyQuery, limit);
|
||||
}
|
||||
else {
|
||||
const words = query.split(/\s+/).filter(w => w.length > 0);
|
||||
const conditions = words.map(() => '(node_type LIKE ? OR display_name LIKE ? OR description LIKE ?)');
|
||||
const operator = mode === 'AND' ? ' AND ' : ' OR ';
|
||||
sql = `
|
||||
SELECT * FROM nodes
|
||||
WHERE ${conditions.join(operator)}
|
||||
ORDER BY display_name
|
||||
LIMIT ?
|
||||
`;
|
||||
for (const word of words) {
|
||||
const searchTerm = `%${word}%`;
|
||||
params.push(searchTerm, searchTerm, searchTerm);
|
||||
}
|
||||
params.push(limit);
|
||||
}
|
||||
const rows = this.db.prepare(sql).all(...params);
|
||||
return rows.map(row => this.parseNodeRow(row));
|
||||
}
|
||||
getAllNodes(limit) {
|
||||
let sql = 'SELECT * FROM nodes ORDER BY display_name';
|
||||
if (limit) {
|
||||
sql += ` LIMIT ${limit}`;
|
||||
}
|
||||
const rows = this.db.prepare(sql).all();
|
||||
return rows.map(row => this.parseNodeRow(row));
|
||||
}
|
||||
getNodeCount() {
|
||||
const result = this.db.prepare('SELECT COUNT(*) as count FROM nodes').get();
|
||||
return result.count;
|
||||
}
|
||||
getAIToolNodes() {
|
||||
return this.getAITools();
|
||||
}
|
||||
getToolVariant(baseNodeType) {
|
||||
if (!baseNodeType || typeof baseNodeType !== 'string' || !baseNodeType.includes('.')) {
|
||||
return null;
|
||||
}
|
||||
const toolNodeType = `${baseNodeType}Tool`;
|
||||
return this.getNode(toolNodeType);
|
||||
}
|
||||
getBaseNodeForToolVariant(toolNodeType) {
|
||||
const row = this.db.prepare(`
|
||||
SELECT tool_variant_of FROM nodes WHERE node_type = ?
|
||||
`).get(toolNodeType);
|
||||
if (!row?.tool_variant_of)
|
||||
return null;
|
||||
return this.getNode(row.tool_variant_of);
|
||||
}
|
||||
getToolVariants() {
|
||||
const rows = this.db.prepare(`
|
||||
SELECT node_type, display_name, description, package_name, tool_variant_of
|
||||
FROM nodes
|
||||
WHERE is_tool_variant = 1
|
||||
ORDER BY display_name
|
||||
`).all();
|
||||
return rows.map(row => ({
|
||||
nodeType: row.node_type,
|
||||
displayName: row.display_name,
|
||||
description: row.description,
|
||||
package: row.package_name,
|
||||
toolVariantOf: row.tool_variant_of
|
||||
}));
|
||||
}
|
||||
getToolVariantCount() {
|
||||
const result = this.db.prepare('SELECT COUNT(*) as count FROM nodes WHERE is_tool_variant = 1').get();
|
||||
return result.count;
|
||||
}
|
||||
getNodesByPackage(packageName) {
|
||||
const rows = this.db.prepare(`
|
||||
SELECT * FROM nodes WHERE package_name = ?
|
||||
ORDER BY display_name
|
||||
`).all(packageName);
|
||||
return rows.map(row => this.parseNodeRow(row));
|
||||
}
|
||||
searchNodeProperties(nodeType, query, maxResults = 20) {
|
||||
const node = this.getNode(nodeType);
|
||||
if (!node || !node.properties)
|
||||
return [];
|
||||
const results = [];
|
||||
const searchLower = query.toLowerCase();
|
||||
function searchProperties(properties, path = []) {
|
||||
for (const prop of properties) {
|
||||
if (results.length >= maxResults)
|
||||
break;
|
||||
const currentPath = [...path, prop.name || prop.displayName];
|
||||
const pathString = currentPath.join('.');
|
||||
if (prop.name?.toLowerCase().includes(searchLower) ||
|
||||
prop.displayName?.toLowerCase().includes(searchLower) ||
|
||||
prop.description?.toLowerCase().includes(searchLower)) {
|
||||
results.push({
|
||||
path: pathString,
|
||||
property: prop,
|
||||
description: prop.description
|
||||
});
|
||||
}
|
||||
if (prop.options) {
|
||||
searchProperties(prop.options, currentPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
searchProperties(node.properties);
|
||||
return results;
|
||||
}
|
||||
parseNodeRow(row) {
|
||||
return {
|
||||
nodeType: row.node_type,
|
||||
displayName: row.display_name,
|
||||
description: row.description,
|
||||
category: row.category,
|
||||
developmentStyle: row.development_style,
|
||||
package: row.package_name,
|
||||
isAITool: Number(row.is_ai_tool) === 1,
|
||||
isTrigger: Number(row.is_trigger) === 1,
|
||||
isWebhook: Number(row.is_webhook) === 1,
|
||||
isVersioned: Number(row.is_versioned) === 1,
|
||||
isToolVariant: Number(row.is_tool_variant) === 1,
|
||||
toolVariantOf: row.tool_variant_of || null,
|
||||
hasToolVariant: Number(row.has_tool_variant) === 1,
|
||||
version: row.version,
|
||||
properties: this.safeJsonParse(row.properties_schema, []),
|
||||
operations: this.safeJsonParse(row.operations, []),
|
||||
credentials: this.safeJsonParse(row.credentials_required, []),
|
||||
hasDocumentation: !!row.documentation,
|
||||
outputs: row.outputs ? this.safeJsonParse(row.outputs, null) : null,
|
||||
outputNames: row.output_names ? this.safeJsonParse(row.output_names, null) : null
|
||||
};
|
||||
}
|
||||
getNodeOperations(nodeType, resource) {
|
||||
const node = this.getNode(nodeType);
|
||||
if (!node)
|
||||
return [];
|
||||
const operations = [];
|
||||
if (node.operations) {
|
||||
if (Array.isArray(node.operations)) {
|
||||
operations.push(...node.operations);
|
||||
}
|
||||
else if (typeof node.operations === 'object') {
|
||||
if (resource && node.operations[resource]) {
|
||||
return node.operations[resource];
|
||||
}
|
||||
else {
|
||||
Object.values(node.operations).forEach(ops => {
|
||||
if (Array.isArray(ops)) {
|
||||
operations.push(...ops);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
if (node.properties && Array.isArray(node.properties)) {
|
||||
for (const prop of node.properties) {
|
||||
if (prop.name === 'operation' && prop.options) {
|
||||
if (resource && prop.displayOptions?.show?.resource) {
|
||||
const allowedResources = Array.isArray(prop.displayOptions.show.resource)
|
||||
? prop.displayOptions.show.resource
|
||||
: [prop.displayOptions.show.resource];
|
||||
if (!allowedResources.includes(resource)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
operations.push(...prop.options);
|
||||
}
|
||||
}
|
||||
}
|
||||
return operations;
|
||||
}
|
||||
getNodeResources(nodeType) {
|
||||
const node = this.getNode(nodeType);
|
||||
if (!node || !node.properties)
|
||||
return [];
|
||||
const resources = [];
|
||||
for (const prop of node.properties) {
|
||||
if (prop.name === 'resource' && prop.options) {
|
||||
resources.push(...prop.options);
|
||||
}
|
||||
}
|
||||
return resources;
|
||||
}
|
||||
getOperationsForResource(nodeType, resource) {
|
||||
const node = this.getNode(nodeType);
|
||||
if (!node || !node.properties)
|
||||
return [];
|
||||
const operations = [];
|
||||
for (const prop of node.properties) {
|
||||
if (prop.name === 'operation' && prop.displayOptions?.show?.resource) {
|
||||
const allowedResources = Array.isArray(prop.displayOptions.show.resource)
|
||||
? prop.displayOptions.show.resource
|
||||
: [prop.displayOptions.show.resource];
|
||||
if (allowedResources.includes(resource) && prop.options) {
|
||||
operations.push(...prop.options);
|
||||
}
|
||||
}
|
||||
}
|
||||
return operations;
|
||||
}
|
||||
getAllOperations() {
|
||||
const allOperations = new Map();
|
||||
const nodes = this.getAllNodes();
|
||||
for (const node of nodes) {
|
||||
const operations = this.getNodeOperations(node.nodeType);
|
||||
if (operations.length > 0) {
|
||||
allOperations.set(node.nodeType, operations);
|
||||
}
|
||||
}
|
||||
return allOperations;
|
||||
}
|
||||
getAllResources() {
|
||||
const allResources = new Map();
|
||||
const nodes = this.getAllNodes();
|
||||
for (const node of nodes) {
|
||||
const resources = this.getNodeResources(node.nodeType);
|
||||
if (resources.length > 0) {
|
||||
allResources.set(node.nodeType, resources);
|
||||
}
|
||||
}
|
||||
return allResources;
|
||||
}
|
||||
getNodePropertyDefaults(nodeType) {
|
||||
try {
|
||||
const node = this.getNode(nodeType);
|
||||
if (!node || !node.properties)
|
||||
return {};
|
||||
const defaults = {};
|
||||
for (const prop of node.properties) {
|
||||
if (prop.name && prop.default !== undefined) {
|
||||
defaults[prop.name] = prop.default;
|
||||
}
|
||||
}
|
||||
return defaults;
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`Error getting property defaults for ${nodeType}:`, error);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
getDefaultOperationForResource(nodeType, resource) {
|
||||
try {
|
||||
const node = this.getNode(nodeType);
|
||||
if (!node || !node.properties)
|
||||
return undefined;
|
||||
for (const prop of node.properties) {
|
||||
if (prop.name === 'operation') {
|
||||
if (resource && prop.displayOptions?.show?.resource) {
|
||||
const resourceDep = prop.displayOptions.show.resource;
|
||||
if (!Array.isArray(resourceDep) && typeof resourceDep !== 'string') {
|
||||
continue;
|
||||
}
|
||||
const allowedResources = Array.isArray(resourceDep)
|
||||
? resourceDep
|
||||
: [resourceDep];
|
||||
if (!allowedResources.includes(resource)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (prop.default !== undefined) {
|
||||
return prop.default;
|
||||
}
|
||||
if (prop.options && Array.isArray(prop.options) && prop.options.length > 0) {
|
||||
const firstOption = prop.options[0];
|
||||
return typeof firstOption === 'string' ? firstOption : firstOption.value;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`Error getting default operation for ${nodeType}:`, error);
|
||||
return undefined;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
saveNodeVersion(versionData) {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT OR REPLACE INTO node_versions (
|
||||
node_type, version, package_name, display_name, description,
|
||||
category, is_current_max, properties_schema, operations,
|
||||
credentials_required, outputs, minimum_n8n_version,
|
||||
breaking_changes, deprecated_properties, added_properties,
|
||||
released_at
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`);
|
||||
stmt.run(versionData.nodeType, versionData.version, versionData.packageName, versionData.displayName, versionData.description || null, versionData.category || null, versionData.isCurrentMax ? 1 : 0, versionData.propertiesSchema ? JSON.stringify(versionData.propertiesSchema) : null, versionData.operations ? JSON.stringify(versionData.operations) : null, versionData.credentialsRequired ? JSON.stringify(versionData.credentialsRequired) : null, versionData.outputs ? JSON.stringify(versionData.outputs) : null, versionData.minimumN8nVersion || null, versionData.breakingChanges ? JSON.stringify(versionData.breakingChanges) : null, versionData.deprecatedProperties ? JSON.stringify(versionData.deprecatedProperties) : null, versionData.addedProperties ? JSON.stringify(versionData.addedProperties) : null, versionData.releasedAt || null);
|
||||
}
|
||||
getNodeVersions(nodeType) {
|
||||
const normalizedType = node_type_normalizer_1.NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
const rows = this.db.prepare(`
|
||||
SELECT * FROM node_versions
|
||||
WHERE node_type = ?
|
||||
ORDER BY version DESC
|
||||
`).all(normalizedType);
|
||||
return rows.map(row => this.parseNodeVersionRow(row));
|
||||
}
|
||||
getLatestNodeVersion(nodeType) {
|
||||
const normalizedType = node_type_normalizer_1.NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM node_versions
|
||||
WHERE node_type = ? AND is_current_max = 1
|
||||
LIMIT 1
|
||||
`).get(normalizedType);
|
||||
if (!row)
|
||||
return null;
|
||||
return this.parseNodeVersionRow(row);
|
||||
}
|
||||
getNodeVersion(nodeType, version) {
|
||||
const normalizedType = node_type_normalizer_1.NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM node_versions
|
||||
WHERE node_type = ? AND version = ?
|
||||
`).get(normalizedType, version);
|
||||
if (!row)
|
||||
return null;
|
||||
return this.parseNodeVersionRow(row);
|
||||
}
|
||||
savePropertyChange(changeData) {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT INTO version_property_changes (
|
||||
node_type, from_version, to_version, property_name, change_type,
|
||||
is_breaking, old_value, new_value, migration_hint, auto_migratable,
|
||||
migration_strategy, severity
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`);
|
||||
stmt.run(changeData.nodeType, changeData.fromVersion, changeData.toVersion, changeData.propertyName, changeData.changeType, changeData.isBreaking ? 1 : 0, changeData.oldValue || null, changeData.newValue || null, changeData.migrationHint || null, changeData.autoMigratable ? 1 : 0, changeData.migrationStrategy ? JSON.stringify(changeData.migrationStrategy) : null, changeData.severity || 'MEDIUM');
|
||||
}
|
||||
getPropertyChanges(nodeType, fromVersion, toVersion) {
|
||||
const normalizedType = node_type_normalizer_1.NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
const rows = this.db.prepare(`
|
||||
SELECT * FROM version_property_changes
|
||||
WHERE node_type = ? AND from_version = ? AND to_version = ?
|
||||
ORDER BY severity DESC, property_name
|
||||
`).all(normalizedType, fromVersion, toVersion);
|
||||
return rows.map(row => this.parsePropertyChangeRow(row));
|
||||
}
|
||||
getBreakingChanges(nodeType, fromVersion, toVersion) {
|
||||
const normalizedType = node_type_normalizer_1.NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
let sql = `
|
||||
SELECT * FROM version_property_changes
|
||||
WHERE node_type = ? AND is_breaking = 1
|
||||
`;
|
||||
const params = [normalizedType];
|
||||
if (toVersion) {
|
||||
sql += ` AND from_version >= ? AND to_version <= ?`;
|
||||
params.push(fromVersion, toVersion);
|
||||
}
|
||||
else {
|
||||
sql += ` AND from_version >= ?`;
|
||||
params.push(fromVersion);
|
||||
}
|
||||
sql += ` ORDER BY from_version, to_version, severity DESC`;
|
||||
const rows = this.db.prepare(sql).all(...params);
|
||||
return rows.map(row => this.parsePropertyChangeRow(row));
|
||||
}
|
||||
getAutoMigratableChanges(nodeType, fromVersion, toVersion) {
|
||||
const normalizedType = node_type_normalizer_1.NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
const rows = this.db.prepare(`
|
||||
SELECT * FROM version_property_changes
|
||||
WHERE node_type = ?
|
||||
AND from_version = ?
|
||||
AND to_version = ?
|
||||
AND auto_migratable = 1
|
||||
ORDER BY severity DESC
|
||||
`).all(normalizedType, fromVersion, toVersion);
|
||||
return rows.map(row => this.parsePropertyChangeRow(row));
|
||||
}
|
||||
hasVersionUpgradePath(nodeType, fromVersion, toVersion) {
|
||||
const versions = this.getNodeVersions(nodeType);
|
||||
if (versions.length === 0)
|
||||
return false;
|
||||
const fromExists = versions.some(v => v.version === fromVersion);
|
||||
const toExists = versions.some(v => v.version === toVersion);
|
||||
return fromExists && toExists;
|
||||
}
|
||||
getVersionedNodesCount() {
|
||||
const result = this.db.prepare(`
|
||||
SELECT COUNT(DISTINCT node_type) as count
|
||||
FROM node_versions
|
||||
`).get();
|
||||
return result.count;
|
||||
}
|
||||
parseNodeVersionRow(row) {
|
||||
return {
|
||||
id: row.id,
|
||||
nodeType: row.node_type,
|
||||
version: row.version,
|
||||
packageName: row.package_name,
|
||||
displayName: row.display_name,
|
||||
description: row.description,
|
||||
category: row.category,
|
||||
isCurrentMax: Number(row.is_current_max) === 1,
|
||||
propertiesSchema: row.properties_schema ? this.safeJsonParse(row.properties_schema, []) : null,
|
||||
operations: row.operations ? this.safeJsonParse(row.operations, []) : null,
|
||||
credentialsRequired: row.credentials_required ? this.safeJsonParse(row.credentials_required, []) : null,
|
||||
outputs: row.outputs ? this.safeJsonParse(row.outputs, null) : null,
|
||||
minimumN8nVersion: row.minimum_n8n_version,
|
||||
breakingChanges: row.breaking_changes ? this.safeJsonParse(row.breaking_changes, []) : [],
|
||||
deprecatedProperties: row.deprecated_properties ? this.safeJsonParse(row.deprecated_properties, []) : [],
|
||||
addedProperties: row.added_properties ? this.safeJsonParse(row.added_properties, []) : [],
|
||||
releasedAt: row.released_at,
|
||||
createdAt: row.created_at
|
||||
};
|
||||
}
|
||||
parsePropertyChangeRow(row) {
|
||||
return {
|
||||
id: row.id,
|
||||
nodeType: row.node_type,
|
||||
fromVersion: row.from_version,
|
||||
toVersion: row.to_version,
|
||||
propertyName: row.property_name,
|
||||
changeType: row.change_type,
|
||||
isBreaking: Number(row.is_breaking) === 1,
|
||||
oldValue: row.old_value,
|
||||
newValue: row.new_value,
|
||||
migrationHint: row.migration_hint,
|
||||
autoMigratable: Number(row.auto_migratable) === 1,
|
||||
migrationStrategy: row.migration_strategy ? this.safeJsonParse(row.migration_strategy, null) : null,
|
||||
severity: row.severity,
|
||||
createdAt: row.created_at
|
||||
};
|
||||
}
|
||||
createWorkflowVersion(data) {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT INTO workflow_versions (
|
||||
workflow_id, version_number, workflow_name, workflow_snapshot,
|
||||
trigger, operations, fix_types, metadata
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`);
|
||||
const result = stmt.run(data.workflowId, data.versionNumber, data.workflowName, JSON.stringify(data.workflowSnapshot), data.trigger, data.operations ? JSON.stringify(data.operations) : null, data.fixTypes ? JSON.stringify(data.fixTypes) : null, data.metadata ? JSON.stringify(data.metadata) : null);
|
||||
return result.lastInsertRowid;
|
||||
}
|
||||
getWorkflowVersions(workflowId, limit) {
|
||||
let sql = `
|
||||
SELECT * FROM workflow_versions
|
||||
WHERE workflow_id = ?
|
||||
ORDER BY version_number DESC
|
||||
`;
|
||||
if (limit) {
|
||||
sql += ` LIMIT ?`;
|
||||
const rows = this.db.prepare(sql).all(workflowId, limit);
|
||||
return rows.map(row => this.parseWorkflowVersionRow(row));
|
||||
}
|
||||
const rows = this.db.prepare(sql).all(workflowId);
|
||||
return rows.map(row => this.parseWorkflowVersionRow(row));
|
||||
}
|
||||
getWorkflowVersion(versionId) {
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM workflow_versions WHERE id = ?
|
||||
`).get(versionId);
|
||||
if (!row)
|
||||
return null;
|
||||
return this.parseWorkflowVersionRow(row);
|
||||
}
|
||||
getLatestWorkflowVersion(workflowId) {
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM workflow_versions
|
||||
WHERE workflow_id = ?
|
||||
ORDER BY version_number DESC
|
||||
LIMIT 1
|
||||
`).get(workflowId);
|
||||
if (!row)
|
||||
return null;
|
||||
return this.parseWorkflowVersionRow(row);
|
||||
}
|
||||
deleteWorkflowVersion(versionId) {
|
||||
this.db.prepare(`
|
||||
DELETE FROM workflow_versions WHERE id = ?
|
||||
`).run(versionId);
|
||||
}
|
||||
deleteWorkflowVersionsByWorkflowId(workflowId) {
|
||||
const result = this.db.prepare(`
|
||||
DELETE FROM workflow_versions WHERE workflow_id = ?
|
||||
`).run(workflowId);
|
||||
return result.changes;
|
||||
}
|
||||
pruneWorkflowVersions(workflowId, keepCount) {
|
||||
const versions = this.db.prepare(`
|
||||
SELECT id FROM workflow_versions
|
||||
WHERE workflow_id = ?
|
||||
ORDER BY version_number DESC
|
||||
`).all(workflowId);
|
||||
if (versions.length <= keepCount) {
|
||||
return 0;
|
||||
}
|
||||
const idsToDelete = versions.slice(keepCount).map(v => v.id);
|
||||
if (idsToDelete.length === 0) {
|
||||
return 0;
|
||||
}
|
||||
const placeholders = idsToDelete.map(() => '?').join(',');
|
||||
const result = this.db.prepare(`
|
||||
DELETE FROM workflow_versions WHERE id IN (${placeholders})
|
||||
`).run(...idsToDelete);
|
||||
return result.changes;
|
||||
}
|
||||
truncateWorkflowVersions() {
|
||||
const result = this.db.prepare(`
|
||||
DELETE FROM workflow_versions
|
||||
`).run();
|
||||
return result.changes;
|
||||
}
|
||||
getWorkflowVersionCount(workflowId) {
|
||||
const result = this.db.prepare(`
|
||||
SELECT COUNT(*) as count FROM workflow_versions WHERE workflow_id = ?
|
||||
`).get(workflowId);
|
||||
return result.count;
|
||||
}
|
||||
getVersionStorageStats() {
|
||||
const totalResult = this.db.prepare(`
|
||||
SELECT COUNT(*) as count FROM workflow_versions
|
||||
`).get();
|
||||
const sizeResult = this.db.prepare(`
|
||||
SELECT SUM(LENGTH(workflow_snapshot)) as total_size FROM workflow_versions
|
||||
`).get();
|
||||
const byWorkflow = this.db.prepare(`
|
||||
SELECT
|
||||
workflow_id,
|
||||
workflow_name,
|
||||
COUNT(*) as version_count,
|
||||
SUM(LENGTH(workflow_snapshot)) as total_size,
|
||||
MAX(created_at) as last_backup
|
||||
FROM workflow_versions
|
||||
GROUP BY workflow_id
|
||||
ORDER BY version_count DESC
|
||||
`).all();
|
||||
return {
|
||||
totalVersions: totalResult.count,
|
||||
totalSize: sizeResult.total_size || 0,
|
||||
byWorkflow: byWorkflow.map(row => ({
|
||||
workflowId: row.workflow_id,
|
||||
workflowName: row.workflow_name,
|
||||
versionCount: row.version_count,
|
||||
totalSize: row.total_size,
|
||||
lastBackup: row.last_backup
|
||||
}))
|
||||
};
|
||||
}
|
||||
parseWorkflowVersionRow(row) {
|
||||
return {
|
||||
id: row.id,
|
||||
workflowId: row.workflow_id,
|
||||
versionNumber: row.version_number,
|
||||
workflowName: row.workflow_name,
|
||||
workflowSnapshot: this.safeJsonParse(row.workflow_snapshot, null),
|
||||
trigger: row.trigger,
|
||||
operations: row.operations ? this.safeJsonParse(row.operations, null) : null,
|
||||
fixTypes: row.fix_types ? this.safeJsonParse(row.fix_types, null) : null,
|
||||
metadata: row.metadata ? this.safeJsonParse(row.metadata, null) : null,
|
||||
createdAt: row.created_at
|
||||
};
|
||||
}
|
||||
}
|
||||
exports.NodeRepository = NodeRepository;
|
||||
//# sourceMappingURL=node-repository.js.map
|
||||
1
dist/database/node-repository.js.map
vendored
Normal file
1
dist/database/node-repository.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
10
dist/errors/validation-service-error.d.ts
vendored
Normal file
10
dist/errors/validation-service-error.d.ts
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
export declare class ValidationServiceError extends Error {
|
||||
readonly nodeType?: string | undefined;
|
||||
readonly property?: string | undefined;
|
||||
readonly cause?: Error | undefined;
|
||||
constructor(message: string, nodeType?: string | undefined, property?: string | undefined, cause?: Error | undefined);
|
||||
static jsonParseError(nodeType: string, cause: Error): ValidationServiceError;
|
||||
static nodeNotFound(nodeType: string): ValidationServiceError;
|
||||
static dataExtractionError(nodeType: string, dataType: string, cause?: Error): ValidationServiceError;
|
||||
}
|
||||
//# sourceMappingURL=validation-service-error.d.ts.map
|
||||
1
dist/errors/validation-service-error.d.ts.map
vendored
Normal file
1
dist/errors/validation-service-error.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"validation-service-error.d.ts","sourceRoot":"","sources":["../../src/errors/validation-service-error.ts"],"names":[],"mappings":"AAGA,qBAAa,sBAAuB,SAAQ,KAAK;aAG7B,QAAQ,CAAC,EAAE,MAAM;aACjB,QAAQ,CAAC,EAAE,MAAM;aACjB,KAAK,CAAC,EAAE,KAAK;gBAH7B,OAAO,EAAE,MAAM,EACC,QAAQ,CAAC,EAAE,MAAM,YAAA,EACjB,QAAQ,CAAC,EAAE,MAAM,YAAA,EACjB,KAAK,CAAC,EAAE,KAAK,YAAA;IAc/B,MAAM,CAAC,cAAc,CAAC,QAAQ,EAAE,MAAM,EAAE,KAAK,EAAE,KAAK,GAAG,sBAAsB;IAY7E,MAAM,CAAC,YAAY,CAAC,QAAQ,EAAE,MAAM,GAAG,sBAAsB;IAU7D,MAAM,CAAC,mBAAmB,CAAC,QAAQ,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,EAAE,KAAK,CAAC,EAAE,KAAK,GAAG,sBAAsB;CAQtG"}
|
||||
26
dist/errors/validation-service-error.js
vendored
Normal file
26
dist/errors/validation-service-error.js
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ValidationServiceError = void 0;
|
||||
class ValidationServiceError extends Error {
|
||||
constructor(message, nodeType, property, cause) {
|
||||
super(message);
|
||||
this.nodeType = nodeType;
|
||||
this.property = property;
|
||||
this.cause = cause;
|
||||
this.name = 'ValidationServiceError';
|
||||
if (Error.captureStackTrace) {
|
||||
Error.captureStackTrace(this, ValidationServiceError);
|
||||
}
|
||||
}
|
||||
static jsonParseError(nodeType, cause) {
|
||||
return new ValidationServiceError(`Failed to parse JSON data for node ${nodeType}`, nodeType, undefined, cause);
|
||||
}
|
||||
static nodeNotFound(nodeType) {
|
||||
return new ValidationServiceError(`Node type ${nodeType} not found in repository`, nodeType);
|
||||
}
|
||||
static dataExtractionError(nodeType, dataType, cause) {
|
||||
return new ValidationServiceError(`Failed to extract ${dataType} for node ${nodeType}`, nodeType, dataType, cause);
|
||||
}
|
||||
}
|
||||
exports.ValidationServiceError = ValidationServiceError;
|
||||
//# sourceMappingURL=validation-service-error.js.map
|
||||
1
dist/errors/validation-service-error.js.map
vendored
Normal file
1
dist/errors/validation-service-error.js.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"validation-service-error.js","sourceRoot":"","sources":["../../src/errors/validation-service-error.ts"],"names":[],"mappings":";;;AAGA,MAAa,sBAAuB,SAAQ,KAAK;IAC/C,YACE,OAAe,EACC,QAAiB,EACjB,QAAiB,EACjB,KAAa;QAE7B,KAAK,CAAC,OAAO,CAAC,CAAC;QAJC,aAAQ,GAAR,QAAQ,CAAS;QACjB,aAAQ,GAAR,QAAQ,CAAS;QACjB,UAAK,GAAL,KAAK,CAAQ;QAG7B,IAAI,CAAC,IAAI,GAAG,wBAAwB,CAAC;QAGrC,IAAI,KAAK,CAAC,iBAAiB,EAAE,CAAC;YAC5B,KAAK,CAAC,iBAAiB,CAAC,IAAI,EAAE,sBAAsB,CAAC,CAAC;QACxD,CAAC;IACH,CAAC;IAKD,MAAM,CAAC,cAAc,CAAC,QAAgB,EAAE,KAAY;QAClD,OAAO,IAAI,sBAAsB,CAC/B,sCAAsC,QAAQ,EAAE,EAChD,QAAQ,EACR,SAAS,EACT,KAAK,CACN,CAAC;IACJ,CAAC;IAKD,MAAM,CAAC,YAAY,CAAC,QAAgB;QAClC,OAAO,IAAI,sBAAsB,CAC/B,aAAa,QAAQ,0BAA0B,EAC/C,QAAQ,CACT,CAAC;IACJ,CAAC;IAKD,MAAM,CAAC,mBAAmB,CAAC,QAAgB,EAAE,QAAgB,EAAE,KAAa;QAC1E,OAAO,IAAI,sBAAsB,CAC/B,qBAAqB,QAAQ,aAAa,QAAQ,EAAE,EACpD,QAAQ,EACR,QAAQ,EACR,KAAK,CACN,CAAC;IACJ,CAAC;CACF;AAjDD,wDAiDC"}
|
||||
52
dist/http-server-single-session.d.ts
vendored
Normal file
52
dist/http-server-single-session.d.ts
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env node
|
||||
import express from 'express';
|
||||
import { InstanceContext } from './types/instance-context';
|
||||
import { SessionState } from './types/session-state';
|
||||
export declare class SingleSessionHTTPServer {
|
||||
private transports;
|
||||
private servers;
|
||||
private sessionMetadata;
|
||||
private sessionContexts;
|
||||
private contextSwitchLocks;
|
||||
private session;
|
||||
private consoleManager;
|
||||
private expressServer;
|
||||
private sessionTimeout;
|
||||
private authToken;
|
||||
private cleanupTimer;
|
||||
constructor();
|
||||
private startSessionCleanup;
|
||||
private cleanupExpiredSessions;
|
||||
private removeSession;
|
||||
private getActiveSessionCount;
|
||||
private canCreateSession;
|
||||
private isValidSessionId;
|
||||
private sanitizeErrorForClient;
|
||||
private updateSessionAccess;
|
||||
private switchSessionContext;
|
||||
private performContextSwitch;
|
||||
private getSessionMetrics;
|
||||
private loadAuthToken;
|
||||
private validateEnvironment;
|
||||
handleRequest(req: express.Request, res: express.Response, instanceContext?: InstanceContext): Promise<void>;
|
||||
private resetSessionSSE;
|
||||
private isExpired;
|
||||
private isSessionExpired;
|
||||
start(): Promise<void>;
|
||||
shutdown(): Promise<void>;
|
||||
getSessionInfo(): {
|
||||
active: boolean;
|
||||
sessionId?: string;
|
||||
age?: number;
|
||||
sessions?: {
|
||||
total: number;
|
||||
active: number;
|
||||
expired: number;
|
||||
max: number;
|
||||
sessionIds: string[];
|
||||
};
|
||||
};
|
||||
exportSessionState(): SessionState[];
|
||||
restoreSessionState(sessions: SessionState[]): number;
|
||||
}
|
||||
//# sourceMappingURL=http-server-single-session.d.ts.map
|
||||
1
dist/http-server-single-session.d.ts.map
vendored
Normal file
1
dist/http-server-single-session.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"http-server-single-session.d.ts","sourceRoot":"","sources":["../src/http-server-single-session.ts"],"names":[],"mappings":";AAMA,OAAO,OAAO,MAAM,SAAS,CAAC;AAoB9B,OAAO,EAAE,eAAe,EAA2B,MAAM,0BAA0B,CAAC;AACpF,OAAO,EAAE,YAAY,EAAE,MAAM,uBAAuB,CAAC;AAuErD,qBAAa,uBAAuB;IAElC,OAAO,CAAC,UAAU,CAA8D;IAChF,OAAO,CAAC,OAAO,CAA0D;IACzE,OAAO,CAAC,eAAe,CAAsE;IAC7F,OAAO,CAAC,eAAe,CAA4D;IACnF,OAAO,CAAC,kBAAkB,CAAyC;IACnE,OAAO,CAAC,OAAO,CAAwB;IACvC,OAAO,CAAC,cAAc,CAAwB;IAC9C,OAAO,CAAC,aAAa,CAAM;IAC3B,OAAO,CAAC,cAAc,CAAkB;IACxC,OAAO,CAAC,SAAS,CAAuB;IACxC,OAAO,CAAC,YAAY,CAA+B;;IAcnD,OAAO,CAAC,mBAAmB;IAmB3B,OAAO,CAAC,sBAAsB;YAqChB,aAAa;IAuC3B,OAAO,CAAC,qBAAqB;IAO7B,OAAO,CAAC,gBAAgB;IAkBxB,OAAO,CAAC,gBAAgB;IASxB,OAAO,CAAC,sBAAsB;IAkC9B,OAAO,CAAC,mBAAmB;YASb,oBAAoB;YAwBpB,oBAAoB;IAwBlC,OAAO,CAAC,iBAAiB;IAsBzB,OAAO,CAAC,aAAa;IA2BrB,OAAO,CAAC,mBAAmB;IAoDrB,aAAa,CACjB,GAAG,EAAE,OAAO,CAAC,OAAO,EACpB,GAAG,EAAE,OAAO,CAAC,QAAQ,EACrB,eAAe,CAAC,EAAE,eAAe,GAChC,OAAO,CAAC,IAAI,CAAC;YAmOF,eAAe;IA8C7B,OAAO,CAAC,SAAS;IAYjB,OAAO,CAAC,gBAAgB;IASlB,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;IAgnBtB,QAAQ,IAAI,OAAO,CAAC,IAAI,CAAC;IAkD/B,cAAc,IAAI;QAChB,MAAM,EAAE,OAAO,CAAC;QAChB,SAAS,CAAC,EAAE,MAAM,CAAC;QACnB,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,QAAQ,CAAC,EAAE;YACT,KAAK,EAAE,MAAM,CAAC;YACd,MAAM,EAAE,MAAM,CAAC;YACf,OAAO,EAAE,MAAM,CAAC;YAChB,GAAG,EAAE,MAAM,CAAC;YACZ,UAAU,EAAE,MAAM,EAAE,CAAC;SACtB,CAAC;KACH;IAmDM,kBAAkB,IAAI,YAAY,EAAE;IAoEpC,mBAAmB,CAAC,QAAQ,EAAE,YAAY,EAAE,GAAG,MAAM;CAsG7D"}
|
||||
1180
dist/http-server-single-session.js
vendored
Normal file
1180
dist/http-server-single-session.js
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
dist/http-server-single-session.js.map
vendored
Normal file
1
dist/http-server-single-session.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
9
dist/http-server.d.ts
vendored
Normal file
9
dist/http-server.d.ts
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env node
|
||||
export declare function loadAuthToken(): string | null;
|
||||
export declare function startFixedHTTPServer(): Promise<void>;
|
||||
declare module './mcp/server' {
|
||||
interface N8NDocumentationMCPServer {
|
||||
executeTool(name: string, args: any): Promise<any>;
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=http-server.d.ts.map
|
||||
1
dist/http-server.d.ts.map
vendored
Normal file
1
dist/http-server.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"http-server.d.ts","sourceRoot":"","sources":["../src/http-server.ts"],"names":[],"mappings":";AA0CA,wBAAgB,aAAa,IAAI,MAAM,GAAG,IAAI,CAsB7C;AA+DD,wBAAsB,oBAAoB,kBA+dzC;AAGD,OAAO,QAAQ,cAAc,CAAC;IAC5B,UAAU,yBAAyB;QACjC,WAAW,CAAC,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,GAAG,GAAG,OAAO,CAAC,GAAG,CAAC,CAAC;KACpD;CACF"}
|
||||
478
dist/http-server.js
vendored
Normal file
478
dist/http-server.js
vendored
Normal file
@@ -0,0 +1,478 @@
|
||||
#!/usr/bin/env node
|
||||
"use strict";
|
||||
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.loadAuthToken = loadAuthToken;
|
||||
exports.startFixedHTTPServer = startFixedHTTPServer;
|
||||
const express_1 = __importDefault(require("express"));
|
||||
const tools_1 = require("./mcp/tools");
|
||||
const tools_n8n_manager_1 = require("./mcp/tools-n8n-manager");
|
||||
const server_1 = require("./mcp/server");
|
||||
const logger_1 = require("./utils/logger");
|
||||
const auth_1 = require("./utils/auth");
|
||||
const version_1 = require("./utils/version");
|
||||
const n8n_api_1 = require("./config/n8n-api");
|
||||
const dotenv_1 = __importDefault(require("dotenv"));
|
||||
const fs_1 = require("fs");
|
||||
const url_detector_1 = require("./utils/url-detector");
|
||||
const protocol_version_1 = require("./utils/protocol-version");
|
||||
dotenv_1.default.config();
|
||||
let expressServer;
|
||||
let authToken = null;
|
||||
function loadAuthToken() {
|
||||
if (process.env.AUTH_TOKEN) {
|
||||
logger_1.logger.info('Using AUTH_TOKEN from environment variable');
|
||||
return process.env.AUTH_TOKEN;
|
||||
}
|
||||
if (process.env.AUTH_TOKEN_FILE) {
|
||||
try {
|
||||
const token = (0, fs_1.readFileSync)(process.env.AUTH_TOKEN_FILE, 'utf-8').trim();
|
||||
logger_1.logger.info(`Loaded AUTH_TOKEN from file: ${process.env.AUTH_TOKEN_FILE}`);
|
||||
return token;
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error(`Failed to read AUTH_TOKEN_FILE: ${process.env.AUTH_TOKEN_FILE}`, error);
|
||||
console.error(`ERROR: Failed to read AUTH_TOKEN_FILE: ${process.env.AUTH_TOKEN_FILE}`);
|
||||
console.error(error instanceof Error ? error.message : 'Unknown error');
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
function validateEnvironment() {
|
||||
authToken = loadAuthToken();
|
||||
if (!authToken || authToken.trim() === '') {
|
||||
logger_1.logger.error('No authentication token found or token is empty');
|
||||
console.error('ERROR: AUTH_TOKEN is required for HTTP mode and cannot be empty');
|
||||
console.error('Set AUTH_TOKEN environment variable or AUTH_TOKEN_FILE pointing to a file containing the token');
|
||||
console.error('Generate AUTH_TOKEN with: openssl rand -base64 32');
|
||||
process.exit(1);
|
||||
}
|
||||
authToken = authToken.trim();
|
||||
if (authToken.length < 32) {
|
||||
logger_1.logger.warn('AUTH_TOKEN should be at least 32 characters for security');
|
||||
console.warn('WARNING: AUTH_TOKEN should be at least 32 characters for security');
|
||||
}
|
||||
if (authToken === 'REPLACE_THIS_AUTH_TOKEN_32_CHARS_MIN_abcdefgh') {
|
||||
logger_1.logger.warn('⚠️ SECURITY WARNING: Using default AUTH_TOKEN - CHANGE IMMEDIATELY!');
|
||||
logger_1.logger.warn('Generate secure token with: openssl rand -base64 32');
|
||||
if (process.env.MCP_MODE === 'http') {
|
||||
console.warn('\n⚠️ SECURITY WARNING ⚠️');
|
||||
console.warn('Using default AUTH_TOKEN - CHANGE IMMEDIATELY!');
|
||||
console.warn('Generate secure token: openssl rand -base64 32');
|
||||
console.warn('Update via Railway dashboard environment variables\n');
|
||||
}
|
||||
}
|
||||
}
|
||||
async function shutdown() {
|
||||
logger_1.logger.info('Shutting down HTTP server...');
|
||||
console.log('Shutting down HTTP server...');
|
||||
if (expressServer) {
|
||||
expressServer.close(() => {
|
||||
logger_1.logger.info('HTTP server closed');
|
||||
console.log('HTTP server closed');
|
||||
process.exit(0);
|
||||
});
|
||||
setTimeout(() => {
|
||||
logger_1.logger.error('Forced shutdown after timeout');
|
||||
process.exit(1);
|
||||
}, 10000);
|
||||
}
|
||||
else {
|
||||
process.exit(0);
|
||||
}
|
||||
}
|
||||
async function startFixedHTTPServer() {
|
||||
validateEnvironment();
|
||||
const app = (0, express_1.default)();
|
||||
const trustProxy = process.env.TRUST_PROXY ? Number(process.env.TRUST_PROXY) : 0;
|
||||
if (trustProxy > 0) {
|
||||
app.set('trust proxy', trustProxy);
|
||||
logger_1.logger.info(`Trust proxy enabled with ${trustProxy} hop(s)`);
|
||||
}
|
||||
app.use((req, res, next) => {
|
||||
res.setHeader('X-Content-Type-Options', 'nosniff');
|
||||
res.setHeader('X-Frame-Options', 'DENY');
|
||||
res.setHeader('X-XSS-Protection', '1; mode=block');
|
||||
res.setHeader('Strict-Transport-Security', 'max-age=31536000; includeSubDomains');
|
||||
next();
|
||||
});
|
||||
app.use((req, res, next) => {
|
||||
const allowedOrigin = process.env.CORS_ORIGIN || '*';
|
||||
res.setHeader('Access-Control-Allow-Origin', allowedOrigin);
|
||||
res.setHeader('Access-Control-Allow-Methods', 'POST, GET, OPTIONS');
|
||||
res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization, Accept');
|
||||
res.setHeader('Access-Control-Max-Age', '86400');
|
||||
if (req.method === 'OPTIONS') {
|
||||
res.sendStatus(204);
|
||||
return;
|
||||
}
|
||||
next();
|
||||
});
|
||||
app.use((req, res, next) => {
|
||||
logger_1.logger.info(`${req.method} ${req.path}`, {
|
||||
ip: req.ip,
|
||||
userAgent: req.get('user-agent'),
|
||||
contentLength: req.get('content-length')
|
||||
});
|
||||
next();
|
||||
});
|
||||
const mcpServer = new server_1.N8NDocumentationMCPServer();
|
||||
logger_1.logger.info('Created persistent MCP server instance');
|
||||
app.get('/', (req, res) => {
|
||||
const port = parseInt(process.env.PORT || '3000');
|
||||
const host = process.env.HOST || '0.0.0.0';
|
||||
const baseUrl = (0, url_detector_1.detectBaseUrl)(req, host, port);
|
||||
const endpoints = (0, url_detector_1.formatEndpointUrls)(baseUrl);
|
||||
res.json({
|
||||
name: 'n8n Documentation MCP Server',
|
||||
version: version_1.PROJECT_VERSION,
|
||||
description: 'Model Context Protocol server providing comprehensive n8n node documentation and workflow management',
|
||||
endpoints: {
|
||||
health: {
|
||||
url: endpoints.health,
|
||||
method: 'GET',
|
||||
description: 'Health check and status information'
|
||||
},
|
||||
mcp: {
|
||||
url: endpoints.mcp,
|
||||
method: 'GET/POST',
|
||||
description: 'MCP endpoint - GET for info, POST for JSON-RPC'
|
||||
}
|
||||
},
|
||||
authentication: {
|
||||
type: 'Bearer Token',
|
||||
header: 'Authorization: Bearer <token>',
|
||||
required_for: ['POST /mcp']
|
||||
},
|
||||
documentation: 'https://github.com/czlonkowski/n8n-mcp'
|
||||
});
|
||||
});
|
||||
app.get('/health', (req, res) => {
|
||||
res.json({
|
||||
status: 'ok',
|
||||
mode: 'http-fixed',
|
||||
version: version_1.PROJECT_VERSION,
|
||||
uptime: Math.floor(process.uptime()),
|
||||
memory: {
|
||||
used: Math.round(process.memoryUsage().heapUsed / 1024 / 1024),
|
||||
total: Math.round(process.memoryUsage().heapTotal / 1024 / 1024),
|
||||
unit: 'MB'
|
||||
},
|
||||
timestamp: new Date().toISOString()
|
||||
});
|
||||
});
|
||||
app.get('/version', (req, res) => {
|
||||
res.json({
|
||||
version: version_1.PROJECT_VERSION,
|
||||
buildTime: new Date().toISOString(),
|
||||
tools: tools_1.n8nDocumentationToolsFinal.map(t => t.name),
|
||||
commit: process.env.GIT_COMMIT || 'unknown'
|
||||
});
|
||||
});
|
||||
app.get('/test-tools', async (req, res) => {
|
||||
try {
|
||||
const result = await mcpServer.executeTool('get_node_essentials', { nodeType: 'nodes-base.httpRequest' });
|
||||
res.json({ status: 'ok', hasData: !!result, toolCount: tools_1.n8nDocumentationToolsFinal.length });
|
||||
}
|
||||
catch (error) {
|
||||
res.json({ status: 'error', message: error instanceof Error ? error.message : 'Unknown error' });
|
||||
}
|
||||
});
|
||||
app.get('/mcp', (req, res) => {
|
||||
res.json({
|
||||
description: 'n8n Documentation MCP Server',
|
||||
version: version_1.PROJECT_VERSION,
|
||||
endpoints: {
|
||||
mcp: {
|
||||
method: 'POST',
|
||||
path: '/mcp',
|
||||
description: 'Main MCP JSON-RPC endpoint',
|
||||
authentication: 'Bearer token required'
|
||||
},
|
||||
health: {
|
||||
method: 'GET',
|
||||
path: '/health',
|
||||
description: 'Health check endpoint',
|
||||
authentication: 'None'
|
||||
},
|
||||
root: {
|
||||
method: 'GET',
|
||||
path: '/',
|
||||
description: 'API information',
|
||||
authentication: 'None'
|
||||
}
|
||||
},
|
||||
documentation: 'https://github.com/czlonkowski/n8n-mcp'
|
||||
});
|
||||
});
|
||||
app.post('/mcp', async (req, res) => {
|
||||
const startTime = Date.now();
|
||||
const authHeader = req.headers.authorization;
|
||||
if (!authHeader) {
|
||||
logger_1.logger.warn('Authentication failed: Missing Authorization header', {
|
||||
ip: req.ip,
|
||||
userAgent: req.get('user-agent'),
|
||||
reason: 'no_auth_header'
|
||||
});
|
||||
res.status(401).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32001,
|
||||
message: 'Unauthorized'
|
||||
},
|
||||
id: null
|
||||
});
|
||||
return;
|
||||
}
|
||||
if (!authHeader.startsWith('Bearer ')) {
|
||||
logger_1.logger.warn('Authentication failed: Invalid Authorization header format (expected Bearer token)', {
|
||||
ip: req.ip,
|
||||
userAgent: req.get('user-agent'),
|
||||
reason: 'invalid_auth_format',
|
||||
headerPrefix: authHeader.substring(0, Math.min(authHeader.length, 10)) + '...'
|
||||
});
|
||||
res.status(401).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32001,
|
||||
message: 'Unauthorized'
|
||||
},
|
||||
id: null
|
||||
});
|
||||
return;
|
||||
}
|
||||
const token = authHeader.slice(7).trim();
|
||||
const isValidToken = authToken &&
|
||||
auth_1.AuthManager.timingSafeCompare(token, authToken);
|
||||
if (!isValidToken) {
|
||||
logger_1.logger.warn('Authentication failed: Invalid token', {
|
||||
ip: req.ip,
|
||||
userAgent: req.get('user-agent'),
|
||||
reason: 'invalid_token'
|
||||
});
|
||||
res.status(401).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32001,
|
||||
message: 'Unauthorized'
|
||||
},
|
||||
id: null
|
||||
});
|
||||
return;
|
||||
}
|
||||
try {
|
||||
let body = '';
|
||||
req.on('data', chunk => {
|
||||
body += chunk.toString();
|
||||
});
|
||||
req.on('end', async () => {
|
||||
try {
|
||||
const jsonRpcRequest = JSON.parse(body);
|
||||
logger_1.logger.debug('Received JSON-RPC request:', { method: jsonRpcRequest.method });
|
||||
let response;
|
||||
switch (jsonRpcRequest.method) {
|
||||
case 'initialize':
|
||||
const negotiationResult = (0, protocol_version_1.negotiateProtocolVersion)(jsonRpcRequest.params?.protocolVersion, jsonRpcRequest.params?.clientInfo, req.get('user-agent'), req.headers);
|
||||
(0, protocol_version_1.logProtocolNegotiation)(negotiationResult, logger_1.logger, 'HTTP_SERVER_INITIALIZE');
|
||||
response = {
|
||||
jsonrpc: '2.0',
|
||||
result: {
|
||||
protocolVersion: negotiationResult.version,
|
||||
capabilities: {
|
||||
tools: {},
|
||||
resources: {}
|
||||
},
|
||||
serverInfo: {
|
||||
name: 'n8n-documentation-mcp',
|
||||
version: version_1.PROJECT_VERSION
|
||||
}
|
||||
},
|
||||
id: jsonRpcRequest.id
|
||||
};
|
||||
break;
|
||||
case 'tools/list':
|
||||
const tools = [...tools_1.n8nDocumentationToolsFinal];
|
||||
if ((0, n8n_api_1.isN8nApiConfigured)()) {
|
||||
tools.push(...tools_n8n_manager_1.n8nManagementTools);
|
||||
}
|
||||
response = {
|
||||
jsonrpc: '2.0',
|
||||
result: {
|
||||
tools
|
||||
},
|
||||
id: jsonRpcRequest.id
|
||||
};
|
||||
break;
|
||||
case 'tools/call':
|
||||
const toolName = jsonRpcRequest.params?.name;
|
||||
const toolArgs = jsonRpcRequest.params?.arguments || {};
|
||||
try {
|
||||
const result = await mcpServer.executeTool(toolName, toolArgs);
|
||||
let responseText = JSON.stringify(result, null, 2);
|
||||
const mcpResult = {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: responseText
|
||||
}
|
||||
]
|
||||
};
|
||||
if (toolName.startsWith('validate_')) {
|
||||
const resultSize = responseText.length;
|
||||
if (resultSize > 1000000) {
|
||||
logger_1.logger.warn(`Validation tool ${toolName} response is very large (${resultSize} chars). ` +
|
||||
`Truncating for HTTP transport safety.`);
|
||||
mcpResult.content[0].text = responseText.substring(0, 999000) +
|
||||
'\n\n[Response truncated due to size limits]';
|
||||
}
|
||||
else {
|
||||
mcpResult.structuredContent = result;
|
||||
}
|
||||
}
|
||||
response = {
|
||||
jsonrpc: '2.0',
|
||||
result: mcpResult,
|
||||
id: jsonRpcRequest.id
|
||||
};
|
||||
}
|
||||
catch (error) {
|
||||
response = {
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32603,
|
||||
message: `Error executing tool ${toolName}: ${error instanceof Error ? error.message : 'Unknown error'}`
|
||||
},
|
||||
id: jsonRpcRequest.id
|
||||
};
|
||||
}
|
||||
break;
|
||||
default:
|
||||
response = {
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32601,
|
||||
message: `Method not found: ${jsonRpcRequest.method}`
|
||||
},
|
||||
id: jsonRpcRequest.id
|
||||
};
|
||||
}
|
||||
res.setHeader('Content-Type', 'application/json');
|
||||
res.json(response);
|
||||
const duration = Date.now() - startTime;
|
||||
logger_1.logger.info('MCP request completed', {
|
||||
duration,
|
||||
method: jsonRpcRequest.method
|
||||
});
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error('Error processing request:', error);
|
||||
res.status(400).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32700,
|
||||
message: 'Parse error',
|
||||
data: error instanceof Error ? error.message : 'Unknown error'
|
||||
},
|
||||
id: null
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error('MCP request error:', error);
|
||||
if (!res.headersSent) {
|
||||
res.status(500).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32603,
|
||||
message: 'Internal server error',
|
||||
data: process.env.NODE_ENV === 'development'
|
||||
? error.message
|
||||
: undefined
|
||||
},
|
||||
id: null
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
app.use((req, res) => {
|
||||
res.status(404).json({
|
||||
error: 'Not found',
|
||||
message: `Cannot ${req.method} ${req.path}`
|
||||
});
|
||||
});
|
||||
app.use((err, req, res, next) => {
|
||||
logger_1.logger.error('Express error handler:', err);
|
||||
if (!res.headersSent) {
|
||||
res.status(500).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32603,
|
||||
message: 'Internal server error',
|
||||
data: process.env.NODE_ENV === 'development' ? err.message : undefined
|
||||
},
|
||||
id: null
|
||||
});
|
||||
}
|
||||
});
|
||||
const port = parseInt(process.env.PORT || '3000');
|
||||
const host = process.env.HOST || '0.0.0.0';
|
||||
expressServer = app.listen(port, host, () => {
|
||||
logger_1.logger.info(`n8n MCP Fixed HTTP Server started`, { port, host });
|
||||
const baseUrl = (0, url_detector_1.getStartupBaseUrl)(host, port);
|
||||
const endpoints = (0, url_detector_1.formatEndpointUrls)(baseUrl);
|
||||
console.log(`n8n MCP Fixed HTTP Server running on ${host}:${port}`);
|
||||
console.log(`Health check: ${endpoints.health}`);
|
||||
console.log(`MCP endpoint: ${endpoints.mcp}`);
|
||||
console.log('\nPress Ctrl+C to stop the server');
|
||||
if (authToken === 'REPLACE_THIS_AUTH_TOKEN_32_CHARS_MIN_abcdefgh') {
|
||||
setInterval(() => {
|
||||
logger_1.logger.warn('⚠️ Still using default AUTH_TOKEN - security risk!');
|
||||
if (process.env.MCP_MODE === 'http') {
|
||||
console.warn('⚠️ REMINDER: Still using default AUTH_TOKEN - please change it!');
|
||||
}
|
||||
}, 300000);
|
||||
}
|
||||
if (process.env.BASE_URL || process.env.PUBLIC_URL) {
|
||||
console.log(`\nPublic URL configured: ${baseUrl}`);
|
||||
}
|
||||
else if (process.env.TRUST_PROXY && Number(process.env.TRUST_PROXY) > 0) {
|
||||
console.log(`\nNote: TRUST_PROXY is enabled. URLs will be auto-detected from proxy headers.`);
|
||||
}
|
||||
});
|
||||
expressServer.on('error', (error) => {
|
||||
if (error.code === 'EADDRINUSE') {
|
||||
logger_1.logger.error(`Port ${port} is already in use`);
|
||||
console.error(`ERROR: Port ${port} is already in use`);
|
||||
process.exit(1);
|
||||
}
|
||||
else {
|
||||
logger_1.logger.error('Server error:', error);
|
||||
console.error('Server error:', error);
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
process.on('SIGTERM', shutdown);
|
||||
process.on('SIGINT', shutdown);
|
||||
process.on('uncaughtException', (error) => {
|
||||
logger_1.logger.error('Uncaught exception:', error);
|
||||
console.error('Uncaught exception:', error);
|
||||
shutdown();
|
||||
});
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
logger_1.logger.error('Unhandled rejection:', reason);
|
||||
console.error('Unhandled rejection at:', promise, 'reason:', reason);
|
||||
shutdown();
|
||||
});
|
||||
}
|
||||
if (typeof require !== 'undefined' && require.main === module) {
|
||||
startFixedHTTPServer().catch(error => {
|
||||
logger_1.logger.error('Failed to start Fixed HTTP server:', error);
|
||||
console.error('Failed to start Fixed HTTP server:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
}
|
||||
//# sourceMappingURL=http-server.js.map
|
||||
1
dist/http-server.js.map
vendored
Normal file
1
dist/http-server.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
11
dist/index.d.ts
vendored
Normal file
11
dist/index.d.ts
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
export { N8NMCPEngine, EngineHealth, EngineOptions } from './mcp-engine';
|
||||
export { SingleSessionHTTPServer } from './http-server-single-session';
|
||||
export { ConsoleManager } from './utils/console-manager';
|
||||
export { N8NDocumentationMCPServer } from './mcp/server';
|
||||
export type { InstanceContext } from './types/instance-context';
|
||||
export { validateInstanceContext, isInstanceContext } from './types/instance-context';
|
||||
export type { SessionState } from './types/session-state';
|
||||
export type { Tool, CallToolResult, ListToolsResult } from '@modelcontextprotocol/sdk/types.js';
|
||||
import N8NMCPEngine from './mcp-engine';
|
||||
export default N8NMCPEngine;
|
||||
//# sourceMappingURL=index.d.ts.map
|
||||
1
dist/index.d.ts.map
vendored
Normal file
1
dist/index.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAOA,OAAO,EAAE,YAAY,EAAE,YAAY,EAAE,aAAa,EAAE,MAAM,cAAc,CAAC;AACzE,OAAO,EAAE,uBAAuB,EAAE,MAAM,8BAA8B,CAAC;AACvE,OAAO,EAAE,cAAc,EAAE,MAAM,yBAAyB,CAAC;AACzD,OAAO,EAAE,yBAAyB,EAAE,MAAM,cAAc,CAAC;AAGzD,YAAY,EACV,eAAe,EAChB,MAAM,0BAA0B,CAAC;AAClC,OAAO,EACL,uBAAuB,EACvB,iBAAiB,EAClB,MAAM,0BAA0B,CAAC;AAClC,YAAY,EACV,YAAY,EACb,MAAM,uBAAuB,CAAC;AAG/B,YAAY,EACV,IAAI,EACJ,cAAc,EACd,eAAe,EAChB,MAAM,oCAAoC,CAAC;AAG5C,OAAO,YAAY,MAAM,cAAc,CAAC;AACxC,eAAe,YAAY,CAAC"}
|
||||
20
dist/index.js
vendored
Normal file
20
dist/index.js
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
"use strict";
|
||||
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.isInstanceContext = exports.validateInstanceContext = exports.N8NDocumentationMCPServer = exports.ConsoleManager = exports.SingleSessionHTTPServer = exports.N8NMCPEngine = void 0;
|
||||
var mcp_engine_1 = require("./mcp-engine");
|
||||
Object.defineProperty(exports, "N8NMCPEngine", { enumerable: true, get: function () { return mcp_engine_1.N8NMCPEngine; } });
|
||||
var http_server_single_session_1 = require("./http-server-single-session");
|
||||
Object.defineProperty(exports, "SingleSessionHTTPServer", { enumerable: true, get: function () { return http_server_single_session_1.SingleSessionHTTPServer; } });
|
||||
var console_manager_1 = require("./utils/console-manager");
|
||||
Object.defineProperty(exports, "ConsoleManager", { enumerable: true, get: function () { return console_manager_1.ConsoleManager; } });
|
||||
var server_1 = require("./mcp/server");
|
||||
Object.defineProperty(exports, "N8NDocumentationMCPServer", { enumerable: true, get: function () { return server_1.N8NDocumentationMCPServer; } });
|
||||
var instance_context_1 = require("./types/instance-context");
|
||||
Object.defineProperty(exports, "validateInstanceContext", { enumerable: true, get: function () { return instance_context_1.validateInstanceContext; } });
|
||||
Object.defineProperty(exports, "isInstanceContext", { enumerable: true, get: function () { return instance_context_1.isInstanceContext; } });
|
||||
const mcp_engine_2 = __importDefault(require("./mcp-engine"));
|
||||
exports.default = mcp_engine_2.default;
|
||||
//# sourceMappingURL=index.js.map
|
||||
1
dist/index.js.map
vendored
Normal file
1
dist/index.js.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":";;;;;;AAOA,2CAAyE;AAAhE,0GAAA,YAAY,OAAA;AACrB,2EAAuE;AAA9D,qIAAA,uBAAuB,OAAA;AAChC,2DAAyD;AAAhD,iHAAA,cAAc,OAAA;AACvB,uCAAyD;AAAhD,mHAAA,yBAAyB,OAAA;AAMlC,6DAGkC;AAFhC,2HAAA,uBAAuB,OAAA;AACvB,qHAAA,iBAAiB,OAAA;AAcnB,8DAAwC;AACxC,kBAAe,oBAAY,CAAC"}
|
||||
11
dist/loaders/node-loader.d.ts
vendored
Normal file
11
dist/loaders/node-loader.d.ts
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
export interface LoadedNode {
|
||||
packageName: string;
|
||||
nodeName: string;
|
||||
NodeClass: any;
|
||||
}
|
||||
export declare class N8nNodeLoader {
|
||||
private readonly CORE_PACKAGES;
|
||||
loadAllNodes(): Promise<LoadedNode[]>;
|
||||
private loadPackageNodes;
|
||||
}
|
||||
//# sourceMappingURL=node-loader.d.ts.map
|
||||
1
dist/loaders/node-loader.d.ts.map
vendored
Normal file
1
dist/loaders/node-loader.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"node-loader.d.ts","sourceRoot":"","sources":["../../src/loaders/node-loader.ts"],"names":[],"mappings":"AAEA,MAAM,WAAW,UAAU;IACzB,WAAW,EAAE,MAAM,CAAC;IACpB,QAAQ,EAAE,MAAM,CAAC;IACjB,SAAS,EAAE,GAAG,CAAC;CAChB;AAED,qBAAa,aAAa;IACxB,OAAO,CAAC,QAAQ,CAAC,aAAa,CAG5B;IAEI,YAAY,IAAI,OAAO,CAAC,UAAU,EAAE,CAAC;YAmB7B,gBAAgB;CAqD/B"}
|
||||
79
dist/loaders/node-loader.js
vendored
Normal file
79
dist/loaders/node-loader.js
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
"use strict";
|
||||
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.N8nNodeLoader = void 0;
|
||||
const path_1 = __importDefault(require("path"));
|
||||
class N8nNodeLoader {
|
||||
constructor() {
|
||||
this.CORE_PACKAGES = [
|
||||
{ name: 'n8n-nodes-base', path: 'n8n-nodes-base' },
|
||||
{ name: '@n8n/n8n-nodes-langchain', path: '@n8n/n8n-nodes-langchain' }
|
||||
];
|
||||
}
|
||||
async loadAllNodes() {
|
||||
const results = [];
|
||||
for (const pkg of this.CORE_PACKAGES) {
|
||||
try {
|
||||
console.log(`\n📦 Loading package: ${pkg.name} from ${pkg.path}`);
|
||||
const packageJson = require(`${pkg.path}/package.json`);
|
||||
console.log(` Found ${Object.keys(packageJson.n8n?.nodes || {}).length} nodes in package.json`);
|
||||
const nodes = await this.loadPackageNodes(pkg.name, pkg.path, packageJson);
|
||||
results.push(...nodes);
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`Failed to load ${pkg.name}:`, error);
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
async loadPackageNodes(packageName, packagePath, packageJson) {
|
||||
const n8nConfig = packageJson.n8n || {};
|
||||
const nodes = [];
|
||||
const nodesList = n8nConfig.nodes || [];
|
||||
if (Array.isArray(nodesList)) {
|
||||
for (const nodePath of nodesList) {
|
||||
try {
|
||||
const fullPath = require.resolve(`${packagePath}/${nodePath}`);
|
||||
const nodeModule = require(fullPath);
|
||||
const nodeNameMatch = nodePath.match(/\/([^\/]+)\.node\.(js|ts)$/);
|
||||
const nodeName = nodeNameMatch ? nodeNameMatch[1] : path_1.default.basename(nodePath, '.node.js');
|
||||
const NodeClass = nodeModule.default || nodeModule[nodeName] || Object.values(nodeModule)[0];
|
||||
if (NodeClass) {
|
||||
nodes.push({ packageName, nodeName, NodeClass });
|
||||
console.log(` ✓ Loaded ${nodeName} from ${packageName}`);
|
||||
}
|
||||
else {
|
||||
console.warn(` ⚠ No valid export found for ${nodeName} in ${packageName}`);
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
console.error(` ✗ Failed to load node from ${packageName}/${nodePath}:`, error.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (const [nodeName, nodePath] of Object.entries(nodesList)) {
|
||||
try {
|
||||
const fullPath = require.resolve(`${packagePath}/${nodePath}`);
|
||||
const nodeModule = require(fullPath);
|
||||
const NodeClass = nodeModule.default || nodeModule[nodeName] || Object.values(nodeModule)[0];
|
||||
if (NodeClass) {
|
||||
nodes.push({ packageName, nodeName, NodeClass });
|
||||
console.log(` ✓ Loaded ${nodeName} from ${packageName}`);
|
||||
}
|
||||
else {
|
||||
console.warn(` ⚠ No valid export found for ${nodeName} in ${packageName}`);
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
console.error(` ✗ Failed to load node ${nodeName} from ${packageName}:`, error.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
return nodes;
|
||||
}
|
||||
}
|
||||
exports.N8nNodeLoader = N8nNodeLoader;
|
||||
//# sourceMappingURL=node-loader.js.map
|
||||
1
dist/loaders/node-loader.js.map
vendored
Normal file
1
dist/loaders/node-loader.js.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"node-loader.js","sourceRoot":"","sources":["../../src/loaders/node-loader.ts"],"names":[],"mappings":";;;;;;AAAA,gDAAwB;AAQxB,MAAa,aAAa;IAA1B;QACmB,kBAAa,GAAG;YAC/B,EAAE,IAAI,EAAE,gBAAgB,EAAE,IAAI,EAAE,gBAAgB,EAAE;YAClD,EAAE,IAAI,EAAE,0BAA0B,EAAE,IAAI,EAAE,0BAA0B,EAAE;SACvE,CAAC;IA0EJ,CAAC;IAxEC,KAAK,CAAC,YAAY;QAChB,MAAM,OAAO,GAAiB,EAAE,CAAC;QAEjC,KAAK,MAAM,GAAG,IAAI,IAAI,CAAC,aAAa,EAAE,CAAC;YACrC,IAAI,CAAC;gBACH,OAAO,CAAC,GAAG,CAAC,yBAAyB,GAAG,CAAC,IAAI,SAAS,GAAG,CAAC,IAAI,EAAE,CAAC,CAAC;gBAElE,MAAM,WAAW,GAAG,OAAO,CAAC,GAAG,GAAG,CAAC,IAAI,eAAe,CAAC,CAAC;gBACxD,OAAO,CAAC,GAAG,CAAC,WAAW,MAAM,CAAC,IAAI,CAAC,WAAW,CAAC,GAAG,EAAE,KAAK,IAAI,EAAE,CAAC,CAAC,MAAM,wBAAwB,CAAC,CAAC;gBACjG,MAAM,KAAK,GAAG,MAAM,IAAI,CAAC,gBAAgB,CAAC,GAAG,CAAC,IAAI,EAAE,GAAG,CAAC,IAAI,EAAE,WAAW,CAAC,CAAC;gBAC3E,OAAO,CAAC,IAAI,CAAC,GAAG,KAAK,CAAC,CAAC;YACzB,CAAC;YAAC,OAAO,KAAK,EAAE,CAAC;gBACf,OAAO,CAAC,KAAK,CAAC,kBAAkB,GAAG,CAAC,IAAI,GAAG,EAAE,KAAK,CAAC,CAAC;YACtD,CAAC;QACH,CAAC;QAED,OAAO,OAAO,CAAC;IACjB,CAAC;IAEO,KAAK,CAAC,gBAAgB,CAAC,WAAmB,EAAE,WAAmB,EAAE,WAAgB;QACvF,MAAM,SAAS,GAAG,WAAW,CAAC,GAAG,IAAI,EAAE,CAAC;QACxC,MAAM,KAAK,GAAiB,EAAE,CAAC;QAG/B,MAAM,SAAS,GAAG,SAAS,CAAC,KAAK,IAAI,EAAE,CAAC;QAExC,IAAI,KAAK,CAAC,OAAO,CAAC,SAAS,CAAC,EAAE,CAAC;YAE7B,KAAK,MAAM,QAAQ,IAAI,SAAS,EAAE,CAAC;gBACjC,IAAI,CAAC;oBACH,MAAM,QAAQ,GAAG,OAAO,CAAC,OAAO,CAAC,GAAG,WAAW,IAAI,QAAQ,EAAE,CAAC,CAAC;oBAC/D,MAAM,UAAU,GAAG,OAAO,CAAC,QAAQ,CAAC,CAAC;oBAGrC,MAAM,aAAa,GAAG,QAAQ,CAAC,KAAK,CAAC,4BAA4B,CAAC,CAAC;oBACnE,MAAM,QAAQ,GAAG,aAAa,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,cAAI,CAAC,QAAQ,CAAC,QAAQ,EAAE,UAAU,CAAC,CAAC;oBAGxF,MAAM,SAAS,GAAG,UAAU,CAAC,OAAO,IAAI,UAAU,CAAC,QAAQ,CAAC,IAAI,MAAM,CAAC,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC;oBAC7F,IAAI,SAAS,EAAE,CAAC;wBACd,KAAK,CAAC,IAAI,CAAC,EAAE,WAAW,EAAE,QAAQ,EAAE,SAAS,EAAE,CAAC,CAAC;wBACjD,OAAO,CAAC,GAAG,CAAC,cAAc,QAAQ,SAAS,WAAW,EAAE,CAAC,CAAC;oBAC5D,CAAC;yBAAM,CAAC;wBACN,OAAO,CAAC,IAAI,CAAC,iCAAiC,QAAQ,OAAO,WAAW,EAAE,CAAC,CAAC;oBAC9E,CAAC;gBACH,CAAC;gBAAC,OAAO,KAAK,EAAE,CAAC;oBACf,OAAO,CAAC,KAAK,CAAC,gCAAgC,WAAW,IAAI,QAAQ,GAAG,EAAG,KAAe,CAAC,OAAO,CAAC,CAAC;gBACtG,CAAC;YACH,CAAC;QACH,CAAC;aAAM,CAAC;YAEN,KAAK,MAAM,CAAC,QAAQ,EAAE,QAAQ,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,SAAS,CAAC,EAAE,CAAC;gBAC7D,IAAI,CAAC;oBACH,MAAM,QAAQ,GAAG,OAAO,CAAC,OAAO,CAAC,GAAG,WAAW,IAAI,QAAkB,EAAE,CAAC,CAAC;oBACzE,MAAM,UAAU,GAAG,OAAO,CAAC,QAAQ,CAAC,CAAC;oBAGrC,MAAM,SAAS,GAAG,UAAU,CAAC,OAAO,IAAI,UAAU,CAAC,QAAQ,CAAC,IAAI,MAAM,CAAC,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC;oBAC7F,IAAI,SAAS,EAAE,CAAC;wBACd,KAAK,CAAC,IAAI,CAAC,EAAE,WAAW,EAAE,QAAQ,EAAE,SAAS,EAAE,CAAC,CAAC;wBACjD,OAAO,CAAC,GAAG,CAAC,cAAc,QAAQ,SAAS,WAAW,EAAE,CAAC,CAAC;oBAC5D,CAAC;yBAAM,CAAC;wBACN,OAAO,CAAC,IAAI,CAAC,iCAAiC,QAAQ,OAAO,WAAW,EAAE,CAAC,CAAC;oBAC9E,CAAC;gBACH,CAAC;gBAAC,OAAO,KAAK,EAAE,CAAC;oBACf,OAAO,CAAC,KAAK,CAAC,2BAA2B,QAAQ,SAAS,WAAW,GAAG,EAAG,KAAe,CAAC,OAAO,CAAC,CAAC;gBACtG,CAAC;YACH,CAAC;QACH,CAAC;QAED,OAAO,KAAK,CAAC;IACf,CAAC;CACF;AA9ED,sCA8EC"}
|
||||
7
dist/mappers/docs-mapper.d.ts
vendored
Normal file
7
dist/mappers/docs-mapper.d.ts
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
export declare class DocsMapper {
|
||||
private docsPath;
|
||||
private readonly KNOWN_FIXES;
|
||||
fetchDocumentation(nodeType: string): Promise<string | null>;
|
||||
private enhanceLoopNodeDocumentation;
|
||||
}
|
||||
//# sourceMappingURL=docs-mapper.d.ts.map
|
||||
1
dist/mappers/docs-mapper.d.ts.map
vendored
Normal file
1
dist/mappers/docs-mapper.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"docs-mapper.d.ts","sourceRoot":"","sources":["../../src/mappers/docs-mapper.ts"],"names":[],"mappings":"AAGA,qBAAa,UAAU;IACrB,OAAO,CAAC,QAAQ,CAAwC;IAGxD,OAAO,CAAC,QAAQ,CAAC,WAAW,CAU1B;IAEI,kBAAkB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;IAkDlE,OAAO,CAAC,4BAA4B;CAmDrC"}
|
||||
106
dist/mappers/docs-mapper.js
vendored
Normal file
106
dist/mappers/docs-mapper.js
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
"use strict";
|
||||
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.DocsMapper = void 0;
|
||||
const fs_1 = require("fs");
|
||||
const path_1 = __importDefault(require("path"));
|
||||
class DocsMapper {
|
||||
constructor() {
|
||||
this.docsPath = path_1.default.join(process.cwd(), 'n8n-docs');
|
||||
this.KNOWN_FIXES = {
|
||||
'httpRequest': 'httprequest',
|
||||
'code': 'code',
|
||||
'webhook': 'webhook',
|
||||
'respondToWebhook': 'respondtowebhook',
|
||||
'n8n-nodes-base.httpRequest': 'httprequest',
|
||||
'n8n-nodes-base.code': 'code',
|
||||
'n8n-nodes-base.webhook': 'webhook',
|
||||
'n8n-nodes-base.respondToWebhook': 'respondtowebhook'
|
||||
};
|
||||
}
|
||||
async fetchDocumentation(nodeType) {
|
||||
const fixedType = this.KNOWN_FIXES[nodeType] || nodeType;
|
||||
const nodeName = fixedType.split('.').pop()?.toLowerCase();
|
||||
if (!nodeName) {
|
||||
console.log(`⚠️ Could not extract node name from: ${nodeType}`);
|
||||
return null;
|
||||
}
|
||||
console.log(`📄 Looking for docs for: ${nodeType} -> ${nodeName}`);
|
||||
const possiblePaths = [
|
||||
`docs/integrations/builtin/core-nodes/n8n-nodes-base.${nodeName}.md`,
|
||||
`docs/integrations/builtin/app-nodes/n8n-nodes-base.${nodeName}.md`,
|
||||
`docs/integrations/builtin/trigger-nodes/n8n-nodes-base.${nodeName}.md`,
|
||||
`docs/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.${nodeName}.md`,
|
||||
`docs/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.${nodeName}.md`,
|
||||
`docs/integrations/builtin/core-nodes/n8n-nodes-base.${nodeName}/index.md`,
|
||||
`docs/integrations/builtin/app-nodes/n8n-nodes-base.${nodeName}/index.md`,
|
||||
`docs/integrations/builtin/trigger-nodes/n8n-nodes-base.${nodeName}/index.md`,
|
||||
`docs/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.${nodeName}/index.md`,
|
||||
`docs/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.${nodeName}/index.md`
|
||||
];
|
||||
for (const relativePath of possiblePaths) {
|
||||
try {
|
||||
const fullPath = path_1.default.join(this.docsPath, relativePath);
|
||||
let content = await fs_1.promises.readFile(fullPath, 'utf-8');
|
||||
console.log(` ✓ Found docs at: ${relativePath}`);
|
||||
content = this.enhanceLoopNodeDocumentation(nodeType, content);
|
||||
return content;
|
||||
}
|
||||
catch (error) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
console.log(` ✗ No docs found for ${nodeName}`);
|
||||
return null;
|
||||
}
|
||||
enhanceLoopNodeDocumentation(nodeType, content) {
|
||||
if (nodeType.includes('splitInBatches')) {
|
||||
const outputGuidance = `
|
||||
|
||||
## CRITICAL OUTPUT CONNECTION INFORMATION
|
||||
|
||||
**⚠️ OUTPUT INDICES ARE COUNTERINTUITIVE ⚠️**
|
||||
|
||||
The SplitInBatches node has TWO outputs with specific indices:
|
||||
- **Output 0 (index 0) = "done"**: Receives final processed data when loop completes
|
||||
- **Output 1 (index 1) = "loop"**: Receives current batch data during iteration
|
||||
|
||||
### Correct Connection Pattern:
|
||||
1. Connect nodes that PROCESS items inside the loop to **Output 1 ("loop")**
|
||||
2. Connect nodes that run AFTER the loop completes to **Output 0 ("done")**
|
||||
3. The last processing node in the loop must connect back to the SplitInBatches node
|
||||
|
||||
### Common Mistake:
|
||||
AI assistants often connect these backwards because the logical flow (loop first, then done) doesn't match the technical indices (done=0, loop=1).
|
||||
|
||||
`;
|
||||
const insertPoint = content.indexOf('## When to use');
|
||||
if (insertPoint > -1) {
|
||||
content = content.slice(0, insertPoint) + outputGuidance + content.slice(insertPoint);
|
||||
}
|
||||
else {
|
||||
content = outputGuidance + '\n' + content;
|
||||
}
|
||||
}
|
||||
if (nodeType.includes('.if')) {
|
||||
const outputGuidance = `
|
||||
|
||||
## Output Connection Information
|
||||
|
||||
The IF node has TWO outputs:
|
||||
- **Output 0 (index 0) = "true"**: Items that match the condition
|
||||
- **Output 1 (index 1) = "false"**: Items that do not match the condition
|
||||
|
||||
`;
|
||||
const insertPoint = content.indexOf('## Node parameters');
|
||||
if (insertPoint > -1) {
|
||||
content = content.slice(0, insertPoint) + outputGuidance + content.slice(insertPoint);
|
||||
}
|
||||
}
|
||||
return content;
|
||||
}
|
||||
}
|
||||
exports.DocsMapper = DocsMapper;
|
||||
//# sourceMappingURL=docs-mapper.js.map
|
||||
1
dist/mappers/docs-mapper.js.map
vendored
Normal file
1
dist/mappers/docs-mapper.js.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"docs-mapper.js","sourceRoot":"","sources":["../../src/mappers/docs-mapper.ts"],"names":[],"mappings":";;;;;;AAAA,2BAAoC;AACpC,gDAAwB;AAExB,MAAa,UAAU;IAAvB;QACU,aAAQ,GAAG,cAAI,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,EAAE,UAAU,CAAC,CAAC;QAGvC,gBAAW,GAA2B;YACrD,aAAa,EAAE,aAAa;YAC5B,MAAM,EAAE,MAAM;YACd,SAAS,EAAE,SAAS;YACpB,kBAAkB,EAAE,kBAAkB;YAEtC,4BAA4B,EAAE,aAAa;YAC3C,qBAAqB,EAAE,MAAM;YAC7B,wBAAwB,EAAE,SAAS;YACnC,iCAAiC,EAAE,kBAAkB;SACtD,CAAC;IAuGJ,CAAC;IArGC,KAAK,CAAC,kBAAkB,CAAC,QAAgB;QAEvC,MAAM,SAAS,GAAG,IAAI,CAAC,WAAW,CAAC,QAAQ,CAAC,IAAI,QAAQ,CAAC;QAGzD,MAAM,QAAQ,GAAG,SAAS,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,GAAG,EAAE,EAAE,WAAW,EAAE,CAAC;QAC3D,IAAI,CAAC,QAAQ,EAAE,CAAC;YACd,OAAO,CAAC,GAAG,CAAC,yCAAyC,QAAQ,EAAE,CAAC,CAAC;YACjE,OAAO,IAAI,CAAC;QACd,CAAC;QAED,OAAO,CAAC,GAAG,CAAC,4BAA4B,QAAQ,OAAO,QAAQ,EAAE,CAAC,CAAC;QAGnE,MAAM,aAAa,GAAG;YAEpB,uDAAuD,QAAQ,KAAK;YACpE,sDAAsD,QAAQ,KAAK;YACnE,0DAA0D,QAAQ,KAAK;YACvE,0EAA0E,QAAQ,KAAK;YACvF,yEAAyE,QAAQ,KAAK;YAEtF,uDAAuD,QAAQ,WAAW;YAC1E,sDAAsD,QAAQ,WAAW;YACzE,0DAA0D,QAAQ,WAAW;YAC7E,0EAA0E,QAAQ,WAAW;YAC7F,yEAAyE,QAAQ,WAAW;SAC7F,CAAC;QAGF,KAAK,MAAM,YAAY,IAAI,aAAa,EAAE,CAAC;YACzC,IAAI,CAAC;gBACH,MAAM,QAAQ,GAAG,cAAI,CAAC,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE,YAAY,CAAC,CAAC;gBACxD,IAAI,OAAO,GAAG,MAAM,aAAE,CAAC,QAAQ,CAAC,QAAQ,EAAE,OAAO,CAAC,CAAC;gBACnD,OAAO,CAAC,GAAG,CAAC,sBAAsB,YAAY,EAAE,CAAC,CAAC;gBAGlD,OAAO,GAAG,IAAI,CAAC,4BAA4B,CAAC,QAAQ,EAAE,OAAO,CAAC,CAAC;gBAE/D,OAAO,OAAO,CAAC;YACjB,CAAC;YAAC,OAAO,KAAK,EAAE,CAAC;gBAEf,SAAS;YACX,CAAC;QACH,CAAC;QAED,OAAO,CAAC,GAAG,CAAC,yBAAyB,QAAQ,EAAE,CAAC,CAAC;QACjD,OAAO,IAAI,CAAC;IACd,CAAC;IAEO,4BAA4B,CAAC,QAAgB,EAAE,OAAe;QAEpE,IAAI,QAAQ,CAAC,QAAQ,CAAC,gBAAgB,CAAC,EAAE,CAAC;YACxC,MAAM,cAAc,GAAG;;;;;;;;;;;;;;;;;;CAkB5B,CAAC;YAEI,MAAM,WAAW,GAAG,OAAO,CAAC,OAAO,CAAC,gBAAgB,CAAC,CAAC;YACtD,IAAI,WAAW,GAAG,CAAC,CAAC,EAAE,CAAC;gBACrB,OAAO,GAAG,OAAO,CAAC,KAAK,CAAC,CAAC,EAAE,WAAW,CAAC,GAAG,cAAc,GAAG,OAAO,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC;YACxF,CAAC;iBAAM,CAAC;gBAEN,OAAO,GAAG,cAAc,GAAG,IAAI,GAAG,OAAO,CAAC;YAC5C,CAAC;QACH,CAAC;QAGD,IAAI,QAAQ,CAAC,QAAQ,CAAC,KAAK,CAAC,EAAE,CAAC;YAC7B,MAAM,cAAc,GAAG;;;;;;;;CAQ5B,CAAC;YACI,MAAM,WAAW,GAAG,OAAO,CAAC,OAAO,CAAC,oBAAoB,CAAC,CAAC;YAC1D,IAAI,WAAW,GAAG,CAAC,CAAC,EAAE,CAAC;gBACrB,OAAO,GAAG,OAAO,CAAC,KAAK,CAAC,CAAC,EAAE,WAAW,CAAC,GAAG,cAAc,GAAG,OAAO,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC;YACxF,CAAC;QACH,CAAC;QAED,OAAO,OAAO,CAAC;IACjB,CAAC;CACF;AArHD,gCAqHC"}
|
||||
36
dist/mcp-engine.d.ts
vendored
Normal file
36
dist/mcp-engine.d.ts
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
import { Request, Response } from 'express';
|
||||
import { InstanceContext } from './types/instance-context';
|
||||
import { SessionState } from './types/session-state';
|
||||
export interface EngineHealth {
|
||||
status: 'healthy' | 'unhealthy';
|
||||
uptime: number;
|
||||
sessionActive: boolean;
|
||||
memoryUsage: {
|
||||
used: number;
|
||||
total: number;
|
||||
unit: string;
|
||||
};
|
||||
version: string;
|
||||
}
|
||||
export interface EngineOptions {
|
||||
sessionTimeout?: number;
|
||||
logLevel?: 'error' | 'warn' | 'info' | 'debug';
|
||||
}
|
||||
export declare class N8NMCPEngine {
|
||||
private server;
|
||||
private startTime;
|
||||
constructor(options?: EngineOptions);
|
||||
processRequest(req: Request, res: Response, instanceContext?: InstanceContext): Promise<void>;
|
||||
healthCheck(): Promise<EngineHealth>;
|
||||
getSessionInfo(): {
|
||||
active: boolean;
|
||||
sessionId?: string;
|
||||
age?: number;
|
||||
};
|
||||
exportSessionState(): SessionState[];
|
||||
restoreSessionState(sessions: SessionState[]): number;
|
||||
shutdown(): Promise<void>;
|
||||
start(): Promise<void>;
|
||||
}
|
||||
export default N8NMCPEngine;
|
||||
//# sourceMappingURL=mcp-engine.d.ts.map
|
||||
1
dist/mcp-engine.d.ts.map
vendored
Normal file
1
dist/mcp-engine.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"mcp-engine.d.ts","sourceRoot":"","sources":["../src/mcp-engine.ts"],"names":[],"mappings":"AAOA,OAAO,EAAE,OAAO,EAAE,QAAQ,EAAE,MAAM,SAAS,CAAC;AAG5C,OAAO,EAAE,eAAe,EAAE,MAAM,0BAA0B,CAAC;AAC3D,OAAO,EAAE,YAAY,EAAE,MAAM,uBAAuB,CAAC;AAErD,MAAM,WAAW,YAAY;IAC3B,MAAM,EAAE,SAAS,GAAG,WAAW,CAAC;IAChC,MAAM,EAAE,MAAM,CAAC;IACf,aAAa,EAAE,OAAO,CAAC;IACvB,WAAW,EAAE;QACX,IAAI,EAAE,MAAM,CAAC;QACb,KAAK,EAAE,MAAM,CAAC;QACd,IAAI,EAAE,MAAM,CAAC;KACd,CAAC;IACF,OAAO,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,aAAa;IAC5B,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,QAAQ,CAAC,EAAE,OAAO,GAAG,MAAM,GAAG,MAAM,GAAG,OAAO,CAAC;CAChD;AAED,qBAAa,YAAY;IACvB,OAAO,CAAC,MAAM,CAA0B;IACxC,OAAO,CAAC,SAAS,CAAO;gBAEZ,OAAO,GAAE,aAAkB;IA8BjC,cAAc,CAClB,GAAG,EAAE,OAAO,EACZ,GAAG,EAAE,QAAQ,EACb,eAAe,CAAC,EAAE,eAAe,GAChC,OAAO,CAAC,IAAI,CAAC;IAkBV,WAAW,IAAI,OAAO,CAAC,YAAY,CAAC;IAgC1C,cAAc,IAAI;QAAE,MAAM,EAAE,OAAO,CAAC;QAAC,SAAS,CAAC,EAAE,MAAM,CAAC;QAAC,GAAG,CAAC,EAAE,MAAM,CAAA;KAAE;IAoBvE,kBAAkB,IAAI,YAAY,EAAE;IAwBpC,mBAAmB,CAAC,QAAQ,EAAE,YAAY,EAAE,GAAG,MAAM;IAiB/C,QAAQ,IAAI,OAAO,CAAC,IAAI,CAAC;IASzB,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;CAG7B;AA2CD,eAAe,YAAY,CAAC"}
|
||||
77
dist/mcp-engine.js
vendored
Normal file
77
dist/mcp-engine.js
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.N8NMCPEngine = void 0;
|
||||
const http_server_single_session_1 = require("./http-server-single-session");
|
||||
const logger_1 = require("./utils/logger");
|
||||
class N8NMCPEngine {
|
||||
constructor(options = {}) {
|
||||
this.server = new http_server_single_session_1.SingleSessionHTTPServer();
|
||||
this.startTime = new Date();
|
||||
if (options.logLevel) {
|
||||
process.env.LOG_LEVEL = options.logLevel;
|
||||
}
|
||||
}
|
||||
async processRequest(req, res, instanceContext) {
|
||||
try {
|
||||
await this.server.handleRequest(req, res, instanceContext);
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error('Engine processRequest error:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
async healthCheck() {
|
||||
try {
|
||||
const sessionInfo = this.server.getSessionInfo();
|
||||
const memoryUsage = process.memoryUsage();
|
||||
return {
|
||||
status: 'healthy',
|
||||
uptime: Math.floor((Date.now() - this.startTime.getTime()) / 1000),
|
||||
sessionActive: sessionInfo.active,
|
||||
memoryUsage: {
|
||||
used: Math.round(memoryUsage.heapUsed / 1024 / 1024),
|
||||
total: Math.round(memoryUsage.heapTotal / 1024 / 1024),
|
||||
unit: 'MB'
|
||||
},
|
||||
version: '2.24.1'
|
||||
};
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error('Health check failed:', error);
|
||||
return {
|
||||
status: 'unhealthy',
|
||||
uptime: 0,
|
||||
sessionActive: false,
|
||||
memoryUsage: { used: 0, total: 0, unit: 'MB' },
|
||||
version: '2.24.1'
|
||||
};
|
||||
}
|
||||
}
|
||||
getSessionInfo() {
|
||||
return this.server.getSessionInfo();
|
||||
}
|
||||
exportSessionState() {
|
||||
if (!this.server) {
|
||||
logger_1.logger.warn('Cannot export sessions: server not initialized');
|
||||
return [];
|
||||
}
|
||||
return this.server.exportSessionState();
|
||||
}
|
||||
restoreSessionState(sessions) {
|
||||
if (!this.server) {
|
||||
logger_1.logger.warn('Cannot restore sessions: server not initialized');
|
||||
return 0;
|
||||
}
|
||||
return this.server.restoreSessionState(sessions);
|
||||
}
|
||||
async shutdown() {
|
||||
logger_1.logger.info('Shutting down N8N MCP Engine...');
|
||||
await this.server.shutdown();
|
||||
}
|
||||
async start() {
|
||||
await this.server.start();
|
||||
}
|
||||
}
|
||||
exports.N8NMCPEngine = N8NMCPEngine;
|
||||
exports.default = N8NMCPEngine;
|
||||
//# sourceMappingURL=mcp-engine.js.map
|
||||
1
dist/mcp-engine.js.map
vendored
Normal file
1
dist/mcp-engine.js.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"mcp-engine.js","sourceRoot":"","sources":["../src/mcp-engine.ts"],"names":[],"mappings":";;;AAQA,6EAAuE;AACvE,2CAAwC;AAqBxC,MAAa,YAAY;IAIvB,YAAY,UAAyB,EAAE;QACrC,IAAI,CAAC,MAAM,GAAG,IAAI,oDAAuB,EAAE,CAAC;QAC5C,IAAI,CAAC,SAAS,GAAG,IAAI,IAAI,EAAE,CAAC;QAE5B,IAAI,OAAO,CAAC,QAAQ,EAAE,CAAC;YACrB,OAAO,CAAC,GAAG,CAAC,SAAS,GAAG,OAAO,CAAC,QAAQ,CAAC;QAC3C,CAAC;IACH,CAAC;IAuBD,KAAK,CAAC,cAAc,CAClB,GAAY,EACZ,GAAa,EACb,eAAiC;QAEjC,IAAI,CAAC;YACH,MAAM,IAAI,CAAC,MAAM,CAAC,aAAa,CAAC,GAAG,EAAE,GAAG,EAAE,eAAe,CAAC,CAAC;QAC7D,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YACf,eAAM,CAAC,KAAK,CAAC,8BAA8B,EAAE,KAAK,CAAC,CAAC;YACpD,MAAM,KAAK,CAAC;QACd,CAAC;IACH,CAAC;IAWD,KAAK,CAAC,WAAW;QACf,IAAI,CAAC;YACH,MAAM,WAAW,GAAG,IAAI,CAAC,MAAM,CAAC,cAAc,EAAE,CAAC;YACjD,MAAM,WAAW,GAAG,OAAO,CAAC,WAAW,EAAE,CAAC;YAE1C,OAAO;gBACL,MAAM,EAAE,SAAS;gBACjB,MAAM,EAAE,IAAI,CAAC,KAAK,CAAC,CAAC,IAAI,CAAC,GAAG,EAAE,GAAG,IAAI,CAAC,SAAS,CAAC,OAAO,EAAE,CAAC,GAAG,IAAI,CAAC;gBAClE,aAAa,EAAE,WAAW,CAAC,MAAM;gBACjC,WAAW,EAAE;oBACX,IAAI,EAAE,IAAI,CAAC,KAAK,CAAC,WAAW,CAAC,QAAQ,GAAG,IAAI,GAAG,IAAI,CAAC;oBACpD,KAAK,EAAE,IAAI,CAAC,KAAK,CAAC,WAAW,CAAC,SAAS,GAAG,IAAI,GAAG,IAAI,CAAC;oBACtD,IAAI,EAAE,IAAI;iBACX;gBACD,OAAO,EAAE,QAAQ;aAClB,CAAC;QACJ,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YACf,eAAM,CAAC,KAAK,CAAC,sBAAsB,EAAE,KAAK,CAAC,CAAC;YAC5C,OAAO;gBACL,MAAM,EAAE,WAAW;gBACnB,MAAM,EAAE,CAAC;gBACT,aAAa,EAAE,KAAK;gBACpB,WAAW,EAAE,EAAE,IAAI,EAAE,CAAC,EAAE,KAAK,EAAE,CAAC,EAAE,IAAI,EAAE,IAAI,EAAE;gBAC9C,OAAO,EAAE,QAAQ;aAClB,CAAC;QACJ,CAAC;IACH,CAAC;IAMD,cAAc;QACZ,OAAO,IAAI,CAAC,MAAM,CAAC,cAAc,EAAE,CAAC;IACtC,CAAC;IAkBD,kBAAkB;QAChB,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;YACjB,eAAM,CAAC,IAAI,CAAC,gDAAgD,CAAC,CAAC;YAC9D,OAAO,EAAE,CAAC;QACZ,CAAC;QACD,OAAO,IAAI,CAAC,MAAM,CAAC,kBAAkB,EAAE,CAAC;IAC1C,CAAC;IAkBD,mBAAmB,CAAC,QAAwB;QAC1C,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;YACjB,eAAM,CAAC,IAAI,CAAC,iDAAiD,CAAC,CAAC;YAC/D,OAAO,CAAC,CAAC;QACX,CAAC;QACD,OAAO,IAAI,CAAC,MAAM,CAAC,mBAAmB,CAAC,QAAQ,CAAC,CAAC;IACnD,CAAC;IAWD,KAAK,CAAC,QAAQ;QACZ,eAAM,CAAC,IAAI,CAAC,iCAAiC,CAAC,CAAC;QAC/C,MAAM,IAAI,CAAC,MAAM,CAAC,QAAQ,EAAE,CAAC;IAC/B,CAAC;IAMD,KAAK,CAAC,KAAK;QACT,MAAM,IAAI,CAAC,MAAM,CAAC,KAAK,EAAE,CAAC;IAC5B,CAAC;CACF;AAjKD,oCAiKC;AA2CD,kBAAe,YAAY,CAAC"}
|
||||
47
dist/mcp-tools-engine.d.ts
vendored
Normal file
47
dist/mcp-tools-engine.d.ts
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
import { NodeRepository } from './database/node-repository';
|
||||
import { WorkflowValidationResult } from './services/workflow-validator';
|
||||
export declare class MCPEngine {
|
||||
private repository;
|
||||
private workflowValidator;
|
||||
constructor(repository: NodeRepository);
|
||||
listNodes(args?: any): Promise<any[]>;
|
||||
searchNodes(args: any): Promise<any[]>;
|
||||
getNodeInfo(args: any): Promise<any>;
|
||||
getNodeEssentials(args: any): Promise<{
|
||||
nodeType: any;
|
||||
displayName: any;
|
||||
description: any;
|
||||
category: any;
|
||||
required: import("./services/property-filter").SimplifiedProperty[];
|
||||
common: import("./services/property-filter").SimplifiedProperty[];
|
||||
} | null>;
|
||||
getNodeDocumentation(args: any): Promise<any>;
|
||||
validateNodeOperation(args: any): Promise<import("./services/config-validator").ValidationResult | {
|
||||
valid: boolean;
|
||||
errors: {
|
||||
type: string;
|
||||
property: string;
|
||||
message: string;
|
||||
}[];
|
||||
warnings: never[];
|
||||
suggestions: never[];
|
||||
visibleProperties: never[];
|
||||
hiddenProperties: never[];
|
||||
}>;
|
||||
validateNodeMinimal(args: any): Promise<{
|
||||
missingFields: never[];
|
||||
error: string;
|
||||
} | {
|
||||
missingFields: string[];
|
||||
error?: undefined;
|
||||
}>;
|
||||
searchNodeProperties(args: any): Promise<any[]>;
|
||||
listAITools(args: any): Promise<any[]>;
|
||||
getDatabaseStatistics(args: any): Promise<{
|
||||
totalNodes: number;
|
||||
aiToolsCount: number;
|
||||
categories: string[];
|
||||
}>;
|
||||
validateWorkflow(args: any): Promise<WorkflowValidationResult>;
|
||||
}
|
||||
//# sourceMappingURL=mcp-tools-engine.d.ts.map
|
||||
1
dist/mcp-tools-engine.d.ts.map
vendored
Normal file
1
dist/mcp-tools-engine.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"mcp-tools-engine.d.ts","sourceRoot":"","sources":["../src/mcp-tools-engine.ts"],"names":[],"mappings":"AAIA,OAAO,EAAE,cAAc,EAAE,MAAM,4BAA4B,CAAC;AAK5D,OAAO,EAAqB,wBAAwB,EAAE,MAAM,+BAA+B,CAAC;AAE5F,qBAAa,SAAS;IAGR,OAAO,CAAC,UAAU;IAF9B,OAAO,CAAC,iBAAiB,CAAoB;gBAEzB,UAAU,EAAE,cAAc;IAIxC,SAAS,CAAC,IAAI,GAAE,GAAQ;IAIxB,WAAW,CAAC,IAAI,EAAE,GAAG;IAIrB,WAAW,CAAC,IAAI,EAAE,GAAG;IAIrB,iBAAiB,CAAC,IAAI,EAAE,GAAG;;;;;;;;IAgB3B,oBAAoB,CAAC,IAAI,EAAE,GAAG;IAK9B,qBAAqB,CAAC,IAAI,EAAE,GAAG;;;;;;;;;;;;IAqB/B,mBAAmB,CAAC,IAAI,EAAE,GAAG;;;;;;;IAmB7B,oBAAoB,CAAC,IAAI,EAAE,GAAG;IAI9B,WAAW,CAAC,IAAI,EAAE,GAAG;IAIrB,qBAAqB,CAAC,IAAI,EAAE,GAAG;;;;;IAU/B,gBAAgB,CAAC,IAAI,EAAE,GAAG,GAAG,OAAO,CAAC,wBAAwB,CAAC;CAGrE"}
|
||||
89
dist/mcp-tools-engine.js
vendored
Normal file
89
dist/mcp-tools-engine.js
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.MCPEngine = void 0;
|
||||
const property_filter_1 = require("./services/property-filter");
|
||||
const config_validator_1 = require("./services/config-validator");
|
||||
const enhanced_config_validator_1 = require("./services/enhanced-config-validator");
|
||||
const workflow_validator_1 = require("./services/workflow-validator");
|
||||
class MCPEngine {
|
||||
constructor(repository) {
|
||||
this.repository = repository;
|
||||
this.workflowValidator = new workflow_validator_1.WorkflowValidator(repository, enhanced_config_validator_1.EnhancedConfigValidator);
|
||||
}
|
||||
async listNodes(args = {}) {
|
||||
return this.repository.getAllNodes(args.limit);
|
||||
}
|
||||
async searchNodes(args) {
|
||||
return this.repository.searchNodes(args.query, args.mode || 'OR', args.limit || 20);
|
||||
}
|
||||
async getNodeInfo(args) {
|
||||
return this.repository.getNodeByType(args.nodeType);
|
||||
}
|
||||
async getNodeEssentials(args) {
|
||||
const node = await this.repository.getNodeByType(args.nodeType);
|
||||
if (!node)
|
||||
return null;
|
||||
const essentials = property_filter_1.PropertyFilter.getEssentials(node.properties || [], args.nodeType);
|
||||
return {
|
||||
nodeType: node.nodeType,
|
||||
displayName: node.displayName,
|
||||
description: node.description,
|
||||
category: node.category,
|
||||
required: essentials.required,
|
||||
common: essentials.common
|
||||
};
|
||||
}
|
||||
async getNodeDocumentation(args) {
|
||||
const node = await this.repository.getNodeByType(args.nodeType);
|
||||
return node?.documentation || null;
|
||||
}
|
||||
async validateNodeOperation(args) {
|
||||
const node = await this.repository.getNodeByType(args.nodeType);
|
||||
if (!node) {
|
||||
return {
|
||||
valid: false,
|
||||
errors: [{ type: 'invalid_configuration', property: '', message: 'Node type not found' }],
|
||||
warnings: [],
|
||||
suggestions: [],
|
||||
visibleProperties: [],
|
||||
hiddenProperties: []
|
||||
};
|
||||
}
|
||||
const userProvidedKeys = new Set(Object.keys(args.config || {}));
|
||||
return config_validator_1.ConfigValidator.validate(args.nodeType, args.config, node.properties || [], userProvidedKeys);
|
||||
}
|
||||
async validateNodeMinimal(args) {
|
||||
const node = await this.repository.getNodeByType(args.nodeType);
|
||||
if (!node) {
|
||||
return { missingFields: [], error: 'Node type not found' };
|
||||
}
|
||||
const missingFields = [];
|
||||
const requiredFields = property_filter_1.PropertyFilter.getEssentials(node.properties || [], args.nodeType).required;
|
||||
for (const field of requiredFields) {
|
||||
if (!args.config[field.name]) {
|
||||
missingFields.push(field.name);
|
||||
}
|
||||
}
|
||||
return { missingFields };
|
||||
}
|
||||
async searchNodeProperties(args) {
|
||||
return this.repository.searchNodeProperties(args.nodeType, args.query, args.maxResults || 20);
|
||||
}
|
||||
async listAITools(args) {
|
||||
return this.repository.getAIToolNodes();
|
||||
}
|
||||
async getDatabaseStatistics(args) {
|
||||
const count = await this.repository.getNodeCount();
|
||||
const aiTools = await this.repository.getAIToolNodes();
|
||||
return {
|
||||
totalNodes: count,
|
||||
aiToolsCount: aiTools.length,
|
||||
categories: ['trigger', 'transform', 'output', 'input']
|
||||
};
|
||||
}
|
||||
async validateWorkflow(args) {
|
||||
return this.workflowValidator.validateWorkflow(args.workflow, args.options);
|
||||
}
|
||||
}
|
||||
exports.MCPEngine = MCPEngine;
|
||||
//# sourceMappingURL=mcp-tools-engine.js.map
|
||||
1
dist/mcp-tools-engine.js.map
vendored
Normal file
1
dist/mcp-tools-engine.js.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"mcp-tools-engine.js","sourceRoot":"","sources":["../src/mcp-tools-engine.ts"],"names":[],"mappings":";;;AAKA,gEAA4D;AAE5D,kEAA8D;AAC9D,oFAA+E;AAC/E,sEAA4F;AAE5F,MAAa,SAAS;IAGpB,YAAoB,UAA0B;QAA1B,eAAU,GAAV,UAAU,CAAgB;QAC5C,IAAI,CAAC,iBAAiB,GAAG,IAAI,sCAAiB,CAAC,UAAU,EAAE,mDAAuB,CAAC,CAAC;IACtF,CAAC;IAED,KAAK,CAAC,SAAS,CAAC,OAAY,EAAE;QAC5B,OAAO,IAAI,CAAC,UAAU,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;IACjD,CAAC;IAED,KAAK,CAAC,WAAW,CAAC,IAAS;QACzB,OAAO,IAAI,CAAC,UAAU,CAAC,WAAW,CAAC,IAAI,CAAC,KAAK,EAAE,IAAI,CAAC,IAAI,IAAI,IAAI,EAAE,IAAI,CAAC,KAAK,IAAI,EAAE,CAAC,CAAC;IACtF,CAAC;IAED,KAAK,CAAC,WAAW,CAAC,IAAS;QACzB,OAAO,IAAI,CAAC,UAAU,CAAC,aAAa,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;IACtD,CAAC;IAED,KAAK,CAAC,iBAAiB,CAAC,IAAS;QAC/B,MAAM,IAAI,GAAG,MAAM,IAAI,CAAC,UAAU,CAAC,aAAa,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;QAChE,IAAI,CAAC,IAAI;YAAE,OAAO,IAAI,CAAC;QAGvB,MAAM,UAAU,GAAG,gCAAc,CAAC,aAAa,CAAC,IAAI,CAAC,UAAU,IAAI,EAAE,EAAE,IAAI,CAAC,QAAQ,CAAC,CAAC;QACtF,OAAO;YACL,QAAQ,EAAE,IAAI,CAAC,QAAQ;YACvB,WAAW,EAAE,IAAI,CAAC,WAAW;YAC7B,WAAW,EAAE,IAAI,CAAC,WAAW;YAC7B,QAAQ,EAAE,IAAI,CAAC,QAAQ;YACvB,QAAQ,EAAE,UAAU,CAAC,QAAQ;YAC7B,MAAM,EAAE,UAAU,CAAC,MAAM;SAC1B,CAAC;IACJ,CAAC;IAED,KAAK,CAAC,oBAAoB,CAAC,IAAS;QAClC,MAAM,IAAI,GAAG,MAAM,IAAI,CAAC,UAAU,CAAC,aAAa,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;QAChE,OAAO,IAAI,EAAE,aAAa,IAAI,IAAI,CAAC;IACrC,CAAC;IAED,KAAK,CAAC,qBAAqB,CAAC,IAAS;QAEnC,MAAM,IAAI,GAAG,MAAM,IAAI,CAAC,UAAU,CAAC,aAAa,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;QAChE,IAAI,CAAC,IAAI,EAAE,CAAC;YACV,OAAO;gBACL,KAAK,EAAE,KAAK;gBACZ,MAAM,EAAE,CAAC,EAAE,IAAI,EAAE,uBAAuB,EAAE,QAAQ,EAAE,EAAE,EAAE,OAAO,EAAE,qBAAqB,EAAE,CAAC;gBACzF,QAAQ,EAAE,EAAE;gBACZ,WAAW,EAAE,EAAE;gBACf,iBAAiB,EAAE,EAAE;gBACrB,gBAAgB,EAAE,EAAE;aACrB,CAAC;QACJ,CAAC;QAID,MAAM,gBAAgB,GAAG,IAAI,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,MAAM,IAAI,EAAE,CAAC,CAAC,CAAC;QAEjE,OAAO,kCAAe,CAAC,QAAQ,CAAC,IAAI,CAAC,QAAQ,EAAE,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,UAAU,IAAI,EAAE,EAAE,gBAAgB,CAAC,CAAC;IACvG,CAAC;IAED,KAAK,CAAC,mBAAmB,CAAC,IAAS;QAEjC,MAAM,IAAI,GAAG,MAAM,IAAI,CAAC,UAAU,CAAC,aAAa,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;QAChE,IAAI,CAAC,IAAI,EAAE,CAAC;YACV,OAAO,EAAE,aAAa,EAAE,EAAE,EAAE,KAAK,EAAE,qBAAqB,EAAE,CAAC;QAC7D,CAAC;QAED,MAAM,aAAa,GAAa,EAAE,CAAC;QACnC,MAAM,cAAc,GAAG,gCAAc,CAAC,aAAa,CAAC,IAAI,CAAC,UAAU,IAAI,EAAE,EAAE,IAAI,CAAC,QAAQ,CAAC,CAAC,QAAQ,CAAC;QAEnG,KAAK,MAAM,KAAK,IAAI,cAAc,EAAE,CAAC;YACnC,IAAI,CAAC,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC;gBAC7B,aAAa,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC;YACjC,CAAC;QACH,CAAC;QAED,OAAO,EAAE,aAAa,EAAE,CAAC;IAC3B,CAAC;IAED,KAAK,CAAC,oBAAoB,CAAC,IAAS;QAClC,OAAO,IAAI,CAAC,UAAU,CAAC,oBAAoB,CAAC,IAAI,CAAC,QAAQ,EAAE,IAAI,CAAC,KAAK,EAAE,IAAI,CAAC,UAAU,IAAI,EAAE,CAAC,CAAC;IAChG,CAAC;IAED,KAAK,CAAC,WAAW,CAAC,IAAS;QACzB,OAAO,IAAI,CAAC,UAAU,CAAC,cAAc,EAAE,CAAC;IAC1C,CAAC;IAED,KAAK,CAAC,qBAAqB,CAAC,IAAS;QACnC,MAAM,KAAK,GAAG,MAAM,IAAI,CAAC,UAAU,CAAC,YAAY,EAAE,CAAC;QACnD,MAAM,OAAO,GAAG,MAAM,IAAI,CAAC,UAAU,CAAC,cAAc,EAAE,CAAC;QACvD,OAAO;YACL,UAAU,EAAE,KAAK;YACjB,YAAY,EAAE,OAAO,CAAC,MAAM;YAC5B,UAAU,EAAE,CAAC,SAAS,EAAE,WAAW,EAAE,QAAQ,EAAE,OAAO,CAAC;SACxD,CAAC;IACJ,CAAC;IAED,KAAK,CAAC,gBAAgB,CAAC,IAAS;QAC9B,OAAO,IAAI,CAAC,iBAAiB,CAAC,gBAAgB,CAAC,IAAI,CAAC,QAAQ,EAAE,IAAI,CAAC,OAAO,CAAC,CAAC;IAC9E,CAAC;CACF;AArGD,8BAqGC"}
|
||||
29
dist/mcp/handlers-n8n-manager.d.ts
vendored
Normal file
29
dist/mcp/handlers-n8n-manager.d.ts
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
import { N8nApiClient } from '../services/n8n-api-client';
|
||||
import { McpToolResponse } from '../types/n8n-api';
|
||||
import { NodeRepository } from '../database/node-repository';
|
||||
import { InstanceContext } from '../types/instance-context';
|
||||
import { TemplateService } from '../templates/template-service';
|
||||
export declare function getInstanceCacheStatistics(): string;
|
||||
export declare function getInstanceCacheMetrics(): import("../utils/cache-utils").CacheMetrics;
|
||||
export declare function clearInstanceCache(): void;
|
||||
export declare function getN8nApiClient(context?: InstanceContext): N8nApiClient | null;
|
||||
export declare function handleCreateWorkflow(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleGetWorkflow(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleGetWorkflowDetails(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleGetWorkflowStructure(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleGetWorkflowMinimal(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleUpdateWorkflow(args: unknown, repository: NodeRepository, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleDeleteWorkflow(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleListWorkflows(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleValidateWorkflow(args: unknown, repository: NodeRepository, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleAutofixWorkflow(args: unknown, repository: NodeRepository, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleTestWorkflow(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleGetExecution(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleListExecutions(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleDeleteExecution(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleHealthCheck(context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleDiagnostic(request: any, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleWorkflowVersions(args: unknown, repository: NodeRepository, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleDeployTemplate(args: unknown, templateService: TemplateService, repository: NodeRepository, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
export declare function handleTriggerWebhookWorkflow(args: unknown, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
//# sourceMappingURL=handlers-n8n-manager.d.ts.map
|
||||
1
dist/mcp/handlers-n8n-manager.d.ts.map
vendored
Normal file
1
dist/mcp/handlers-n8n-manager.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"handlers-n8n-manager.d.ts","sourceRoot":"","sources":["../../src/mcp/handlers-n8n-manager.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,MAAM,4BAA4B,CAAC;AAE1D,OAAO,EAML,eAAe,EAGhB,MAAM,kBAAkB,CAAC;AAkB1B,OAAO,EAAE,cAAc,EAAE,MAAM,6BAA6B,CAAC;AAC7D,OAAO,EAAE,eAAe,EAA2B,MAAM,2BAA2B,CAAC;AAOrF,OAAO,EAAE,eAAe,EAAE,MAAM,+BAA+B,CAAC;AAqNhE,wBAAgB,0BAA0B,IAAI,MAAM,CAEnD;AAMD,wBAAgB,uBAAuB,gDAEtC;AAKD,wBAAgB,kBAAkB,IAAI,IAAI,CAIzC;AAED,wBAAgB,eAAe,CAAC,OAAO,CAAC,EAAE,eAAe,GAAG,YAAY,GAAG,IAAI,CAgF9E;AAqHD,wBAAsB,oBAAoB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAmF7G;AAED,wBAAsB,iBAAiB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAiC1G;AAED,wBAAsB,wBAAwB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAoDjH;AAED,wBAAsB,0BAA0B,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAmDnH;AAED,wBAAsB,wBAAwB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAyCjH;AAED,wBAAsB,oBAAoB,CACxC,IAAI,EAAE,OAAO,EACb,UAAU,EAAE,cAAc,EAC1B,OAAO,CAAC,EAAE,eAAe,GACxB,OAAO,CAAC,eAAe,CAAC,CA8H1B;AAeD,wBAAsB,oBAAoB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAsC7G;AAED,wBAAsB,mBAAmB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAiE5G;AAED,wBAAsB,sBAAsB,CAC1C,IAAI,EAAE,OAAO,EACb,UAAU,EAAE,cAAc,EAC1B,OAAO,CAAC,EAAE,eAAe,GACxB,OAAO,CAAC,eAAe,CAAC,CA0F1B;AAED,wBAAsB,qBAAqB,CACzC,IAAI,EAAE,OAAO,EACb,UAAU,EAAE,cAAc,EAC1B,OAAO,CAAC,EAAE,eAAe,GACxB,OAAO,CAAC,eAAe,CAAC,CAoK1B;AAQD,wBAAsB,kBAAkB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAwJ3G;AAED,wBAAsB,kBAAkB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CA8H3G;AAED,wBAAsB,oBAAoB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAgD7G;AAED,wBAAsB,qBAAqB,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAiC9G;AAID,wBAAsB,iBAAiB,CAAC,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAwG3F;AAkLD,wBAAsB,gBAAgB,CAAC,OAAO,EAAE,GAAG,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAkQxG;AAED,wBAAsB,sBAAsB,CAC1C,IAAI,EAAE,OAAO,EACb,UAAU,EAAE,cAAc,EAC1B,OAAO,CAAC,EAAE,eAAe,GACxB,OAAO,CAAC,eAAe,CAAC,CAsL1B;AA+BD,wBAAsB,oBAAoB,CACxC,IAAI,EAAE,OAAO,EACb,eAAe,EAAE,eAAe,EAChC,UAAU,EAAE,cAAc,EAC1B,OAAO,CAAC,EAAE,eAAe,GACxB,OAAO,CAAC,eAAe,CAAC,CAoM1B;AAQD,wBAAsB,4BAA4B,CAAC,IAAI,EAAE,OAAO,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC,CAyErH"}
|
||||
2026
dist/mcp/handlers-n8n-manager.js
vendored
Normal file
2026
dist/mcp/handlers-n8n-manager.js
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
dist/mcp/handlers-n8n-manager.js.map
vendored
Normal file
1
dist/mcp/handlers-n8n-manager.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
5
dist/mcp/handlers-workflow-diff.d.ts
vendored
Normal file
5
dist/mcp/handlers-workflow-diff.d.ts
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
import { McpToolResponse } from '../types/n8n-api';
|
||||
import { InstanceContext } from '../types/instance-context';
|
||||
import { NodeRepository } from '../database/node-repository';
|
||||
export declare function handleUpdatePartialWorkflow(args: unknown, repository: NodeRepository, context?: InstanceContext): Promise<McpToolResponse>;
|
||||
//# sourceMappingURL=handlers-workflow-diff.d.ts.map
|
||||
1
dist/mcp/handlers-workflow-diff.d.ts.map
vendored
Normal file
1
dist/mcp/handlers-workflow-diff.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"handlers-workflow-diff.d.ts","sourceRoot":"","sources":["../../src/mcp/handlers-workflow-diff.ts"],"names":[],"mappings":"AAMA,OAAO,EAAE,eAAe,EAAE,MAAM,kBAAkB,CAAC;AAMnD,OAAO,EAAE,eAAe,EAAE,MAAM,2BAA2B,CAAC;AAE5D,OAAO,EAAE,cAAc,EAAE,MAAM,6BAA6B,CAAC;AA0D7D,wBAAsB,2BAA2B,CAC/C,IAAI,EAAE,OAAO,EACb,UAAU,EAAE,cAAc,EAC1B,OAAO,CAAC,EAAE,eAAe,GACxB,OAAO,CAAC,eAAe,CAAC,CA6V1B"}
|
||||
461
dist/mcp/handlers-workflow-diff.js
vendored
Normal file
461
dist/mcp/handlers-workflow-diff.js
vendored
Normal file
@@ -0,0 +1,461 @@
|
||||
"use strict";
|
||||
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
||||
if (k2 === undefined) k2 = k;
|
||||
var desc = Object.getOwnPropertyDescriptor(m, k);
|
||||
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
||||
desc = { enumerable: true, get: function() { return m[k]; } };
|
||||
}
|
||||
Object.defineProperty(o, k2, desc);
|
||||
}) : (function(o, m, k, k2) {
|
||||
if (k2 === undefined) k2 = k;
|
||||
o[k2] = m[k];
|
||||
}));
|
||||
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
||||
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
||||
}) : function(o, v) {
|
||||
o["default"] = v;
|
||||
});
|
||||
var __importStar = (this && this.__importStar) || (function () {
|
||||
var ownKeys = function(o) {
|
||||
ownKeys = Object.getOwnPropertyNames || function (o) {
|
||||
var ar = [];
|
||||
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
||||
return ar;
|
||||
};
|
||||
return ownKeys(o);
|
||||
};
|
||||
return function (mod) {
|
||||
if (mod && mod.__esModule) return mod;
|
||||
var result = {};
|
||||
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
||||
__setModuleDefault(result, mod);
|
||||
return result;
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.handleUpdatePartialWorkflow = handleUpdatePartialWorkflow;
|
||||
const zod_1 = require("zod");
|
||||
const workflow_diff_engine_1 = require("../services/workflow-diff-engine");
|
||||
const handlers_n8n_manager_1 = require("./handlers-n8n-manager");
|
||||
const n8n_errors_1 = require("../utils/n8n-errors");
|
||||
const logger_1 = require("../utils/logger");
|
||||
const n8n_validation_1 = require("../services/n8n-validation");
|
||||
const workflow_versioning_service_1 = require("../services/workflow-versioning-service");
|
||||
const workflow_validator_1 = require("../services/workflow-validator");
|
||||
const enhanced_config_validator_1 = require("../services/enhanced-config-validator");
|
||||
let cachedValidator = null;
|
||||
function getValidator(repository) {
|
||||
if (!cachedValidator) {
|
||||
cachedValidator = new workflow_validator_1.WorkflowValidator(repository, enhanced_config_validator_1.EnhancedConfigValidator);
|
||||
}
|
||||
return cachedValidator;
|
||||
}
|
||||
const workflowDiffSchema = zod_1.z.object({
|
||||
id: zod_1.z.string(),
|
||||
operations: zod_1.z.array(zod_1.z.object({
|
||||
type: zod_1.z.string(),
|
||||
description: zod_1.z.string().optional(),
|
||||
node: zod_1.z.any().optional(),
|
||||
nodeId: zod_1.z.string().optional(),
|
||||
nodeName: zod_1.z.string().optional(),
|
||||
updates: zod_1.z.any().optional(),
|
||||
position: zod_1.z.tuple([zod_1.z.number(), zod_1.z.number()]).optional(),
|
||||
source: zod_1.z.string().optional(),
|
||||
target: zod_1.z.string().optional(),
|
||||
from: zod_1.z.string().optional(),
|
||||
to: zod_1.z.string().optional(),
|
||||
sourceOutput: zod_1.z.string().optional(),
|
||||
targetInput: zod_1.z.string().optional(),
|
||||
sourceIndex: zod_1.z.number().optional(),
|
||||
targetIndex: zod_1.z.number().optional(),
|
||||
branch: zod_1.z.enum(['true', 'false']).optional(),
|
||||
case: zod_1.z.number().optional(),
|
||||
ignoreErrors: zod_1.z.boolean().optional(),
|
||||
dryRun: zod_1.z.boolean().optional(),
|
||||
connections: zod_1.z.any().optional(),
|
||||
settings: zod_1.z.any().optional(),
|
||||
name: zod_1.z.string().optional(),
|
||||
tag: zod_1.z.string().optional(),
|
||||
})),
|
||||
validateOnly: zod_1.z.boolean().optional(),
|
||||
continueOnError: zod_1.z.boolean().optional(),
|
||||
createBackup: zod_1.z.boolean().optional(),
|
||||
intent: zod_1.z.string().optional(),
|
||||
});
|
||||
async function handleUpdatePartialWorkflow(args, repository, context) {
|
||||
const startTime = Date.now();
|
||||
const sessionId = `mutation_${Date.now()}_${Math.random().toString(36).slice(2, 11)}`;
|
||||
let workflowBefore = null;
|
||||
let validationBefore = null;
|
||||
let validationAfter = null;
|
||||
try {
|
||||
if (process.env.DEBUG_MCP === 'true') {
|
||||
logger_1.logger.debug('Workflow diff request received', {
|
||||
argsType: typeof args,
|
||||
hasWorkflowId: args && typeof args === 'object' && 'workflowId' in args,
|
||||
operationCount: args && typeof args === 'object' && 'operations' in args ?
|
||||
args.operations?.length : 0
|
||||
});
|
||||
}
|
||||
const input = workflowDiffSchema.parse(args);
|
||||
const client = (0, handlers_n8n_manager_1.getN8nApiClient)(context);
|
||||
if (!client) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'n8n API not configured. Please set N8N_API_URL and N8N_API_KEY environment variables.'
|
||||
};
|
||||
}
|
||||
let workflow;
|
||||
try {
|
||||
workflow = await client.getWorkflow(input.id);
|
||||
workflowBefore = JSON.parse(JSON.stringify(workflow));
|
||||
try {
|
||||
const validator = getValidator(repository);
|
||||
validationBefore = await validator.validateWorkflow(workflowBefore, {
|
||||
validateNodes: true,
|
||||
validateConnections: true,
|
||||
validateExpressions: true,
|
||||
profile: 'runtime'
|
||||
});
|
||||
}
|
||||
catch (validationError) {
|
||||
logger_1.logger.debug('Pre-mutation validation failed (non-blocking):', validationError);
|
||||
validationBefore = {
|
||||
valid: false,
|
||||
errors: [{ type: 'validation_error', message: 'Validation failed' }]
|
||||
};
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
if (error instanceof n8n_errors_1.N8nApiError) {
|
||||
return {
|
||||
success: false,
|
||||
error: (0, n8n_errors_1.getUserFriendlyErrorMessage)(error),
|
||||
code: error.code
|
||||
};
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
if (input.createBackup !== false && !input.validateOnly) {
|
||||
try {
|
||||
const versioningService = new workflow_versioning_service_1.WorkflowVersioningService(repository, client);
|
||||
const backupResult = await versioningService.createBackup(input.id, workflow, {
|
||||
trigger: 'partial_update',
|
||||
operations: input.operations
|
||||
});
|
||||
logger_1.logger.info('Workflow backup created', {
|
||||
workflowId: input.id,
|
||||
versionId: backupResult.versionId,
|
||||
versionNumber: backupResult.versionNumber,
|
||||
pruned: backupResult.pruned
|
||||
});
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.warn('Failed to create workflow backup', {
|
||||
workflowId: input.id,
|
||||
error: error.message
|
||||
});
|
||||
}
|
||||
}
|
||||
const diffEngine = new workflow_diff_engine_1.WorkflowDiffEngine();
|
||||
const diffRequest = input;
|
||||
const diffResult = await diffEngine.applyDiff(workflow, diffRequest);
|
||||
if (!diffResult.success) {
|
||||
if (diffRequest.continueOnError && diffResult.workflow && diffResult.operationsApplied && diffResult.operationsApplied > 0) {
|
||||
logger_1.logger.info(`continueOnError mode: Applying ${diffResult.operationsApplied} successful operations despite ${diffResult.failed?.length || 0} failures`);
|
||||
}
|
||||
else {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Failed to apply diff operations',
|
||||
details: {
|
||||
errors: diffResult.errors,
|
||||
warnings: diffResult.warnings,
|
||||
operationsApplied: diffResult.operationsApplied,
|
||||
applied: diffResult.applied,
|
||||
failed: diffResult.failed
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
if (input.validateOnly) {
|
||||
return {
|
||||
success: true,
|
||||
message: diffResult.message,
|
||||
data: {
|
||||
valid: true,
|
||||
operationsToApply: input.operations.length
|
||||
},
|
||||
details: {
|
||||
warnings: diffResult.warnings
|
||||
}
|
||||
};
|
||||
}
|
||||
if (diffResult.workflow) {
|
||||
const structureErrors = (0, n8n_validation_1.validateWorkflowStructure)(diffResult.workflow);
|
||||
if (structureErrors.length > 0) {
|
||||
const skipValidation = process.env.SKIP_WORKFLOW_VALIDATION === 'true';
|
||||
logger_1.logger.warn('Workflow structure validation failed after applying diff operations', {
|
||||
workflowId: input.id,
|
||||
errors: structureErrors,
|
||||
blocking: !skipValidation
|
||||
});
|
||||
const errorTypes = new Set();
|
||||
structureErrors.forEach(err => {
|
||||
if (err.includes('operator') || err.includes('singleValue'))
|
||||
errorTypes.add('operator_issues');
|
||||
if (err.includes('connection') || err.includes('referenced'))
|
||||
errorTypes.add('connection_issues');
|
||||
if (err.includes('Missing') || err.includes('missing'))
|
||||
errorTypes.add('missing_metadata');
|
||||
if (err.includes('branch') || err.includes('output'))
|
||||
errorTypes.add('branch_mismatch');
|
||||
});
|
||||
const recoverySteps = [];
|
||||
if (errorTypes.has('operator_issues')) {
|
||||
recoverySteps.push('Operator structure issue detected. Use validate_node_operation to check specific nodes.');
|
||||
recoverySteps.push('Binary operators (equals, contains, greaterThan, etc.) must NOT have singleValue:true');
|
||||
recoverySteps.push('Unary operators (isEmpty, isNotEmpty, true, false) REQUIRE singleValue:true');
|
||||
}
|
||||
if (errorTypes.has('connection_issues')) {
|
||||
recoverySteps.push('Connection validation failed. Check all node connections reference existing nodes.');
|
||||
recoverySteps.push('Use cleanStaleConnections operation to remove connections to non-existent nodes.');
|
||||
}
|
||||
if (errorTypes.has('missing_metadata')) {
|
||||
recoverySteps.push('Missing metadata detected. Ensure filter-based nodes (IF v2.2+, Switch v3.2+) have complete conditions.options.');
|
||||
recoverySteps.push('Required options: {version: 2, leftValue: "", caseSensitive: true, typeValidation: "strict"}');
|
||||
}
|
||||
if (errorTypes.has('branch_mismatch')) {
|
||||
recoverySteps.push('Branch count mismatch. Ensure Switch nodes have outputs for all rules (e.g., 3 rules = 3 output branches).');
|
||||
}
|
||||
if (recoverySteps.length === 0) {
|
||||
recoverySteps.push('Review the validation errors listed above');
|
||||
recoverySteps.push('Fix issues using updateNode or cleanStaleConnections operations');
|
||||
recoverySteps.push('Run validate_workflow again to verify fixes');
|
||||
}
|
||||
const errorMessage = structureErrors.length === 1
|
||||
? `Workflow validation failed: ${structureErrors[0]}`
|
||||
: `Workflow validation failed with ${structureErrors.length} structural issues`;
|
||||
if (!skipValidation) {
|
||||
return {
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
details: {
|
||||
errors: structureErrors,
|
||||
errorCount: structureErrors.length,
|
||||
operationsApplied: diffResult.operationsApplied,
|
||||
applied: diffResult.applied,
|
||||
recoveryGuidance: recoverySteps,
|
||||
note: 'Operations were applied but created an invalid workflow structure. The workflow was NOT saved to n8n to prevent UI rendering errors.',
|
||||
autoSanitizationNote: 'Auto-sanitization runs on all nodes during updates to fix operator structures and add missing metadata. However, it cannot fix all issues (e.g., broken connections, branch mismatches). Use the recovery guidance above to resolve remaining issues.'
|
||||
}
|
||||
};
|
||||
}
|
||||
logger_1.logger.info('Workflow validation skipped (SKIP_WORKFLOW_VALIDATION=true): Allowing workflow with validation warnings to proceed', {
|
||||
workflowId: input.id,
|
||||
warningCount: structureErrors.length
|
||||
});
|
||||
}
|
||||
}
|
||||
try {
|
||||
const updatedWorkflow = await client.updateWorkflow(input.id, diffResult.workflow);
|
||||
let finalWorkflow = updatedWorkflow;
|
||||
let activationMessage = '';
|
||||
try {
|
||||
const validator = getValidator(repository);
|
||||
validationAfter = await validator.validateWorkflow(finalWorkflow, {
|
||||
validateNodes: true,
|
||||
validateConnections: true,
|
||||
validateExpressions: true,
|
||||
profile: 'runtime'
|
||||
});
|
||||
}
|
||||
catch (validationError) {
|
||||
logger_1.logger.debug('Post-mutation validation failed (non-blocking):', validationError);
|
||||
validationAfter = {
|
||||
valid: false,
|
||||
errors: [{ type: 'validation_error', message: 'Validation failed' }]
|
||||
};
|
||||
}
|
||||
if (diffResult.shouldActivate) {
|
||||
try {
|
||||
finalWorkflow = await client.activateWorkflow(input.id);
|
||||
activationMessage = ' Workflow activated.';
|
||||
}
|
||||
catch (activationError) {
|
||||
logger_1.logger.error('Failed to activate workflow after update', activationError);
|
||||
return {
|
||||
success: false,
|
||||
error: 'Workflow updated successfully but activation failed',
|
||||
details: {
|
||||
workflowUpdated: true,
|
||||
activationError: activationError instanceof Error ? activationError.message : 'Unknown error'
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
else if (diffResult.shouldDeactivate) {
|
||||
try {
|
||||
finalWorkflow = await client.deactivateWorkflow(input.id);
|
||||
activationMessage = ' Workflow deactivated.';
|
||||
}
|
||||
catch (deactivationError) {
|
||||
logger_1.logger.error('Failed to deactivate workflow after update', deactivationError);
|
||||
return {
|
||||
success: false,
|
||||
error: 'Workflow updated successfully but deactivation failed',
|
||||
details: {
|
||||
workflowUpdated: true,
|
||||
deactivationError: deactivationError instanceof Error ? deactivationError.message : 'Unknown error'
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
if (workflowBefore && !input.validateOnly) {
|
||||
trackWorkflowMutation({
|
||||
sessionId,
|
||||
toolName: 'n8n_update_partial_workflow',
|
||||
userIntent: input.intent || 'Partial workflow update',
|
||||
operations: input.operations,
|
||||
workflowBefore,
|
||||
workflowAfter: finalWorkflow,
|
||||
validationBefore,
|
||||
validationAfter,
|
||||
mutationSuccess: true,
|
||||
durationMs: Date.now() - startTime,
|
||||
}).catch(err => {
|
||||
logger_1.logger.debug('Failed to track mutation telemetry:', err);
|
||||
});
|
||||
}
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
id: finalWorkflow.id,
|
||||
name: finalWorkflow.name,
|
||||
active: finalWorkflow.active,
|
||||
nodeCount: finalWorkflow.nodes?.length || 0,
|
||||
operationsApplied: diffResult.operationsApplied
|
||||
},
|
||||
message: `Workflow "${finalWorkflow.name}" updated successfully. Applied ${diffResult.operationsApplied} operations.${activationMessage} Use n8n_get_workflow with mode 'structure' to verify current state.`,
|
||||
details: {
|
||||
applied: diffResult.applied,
|
||||
failed: diffResult.failed,
|
||||
errors: diffResult.errors,
|
||||
warnings: diffResult.warnings
|
||||
}
|
||||
};
|
||||
}
|
||||
catch (error) {
|
||||
if (workflowBefore && !input.validateOnly) {
|
||||
trackWorkflowMutation({
|
||||
sessionId,
|
||||
toolName: 'n8n_update_partial_workflow',
|
||||
userIntent: input.intent || 'Partial workflow update',
|
||||
operations: input.operations,
|
||||
workflowBefore,
|
||||
workflowAfter: workflowBefore,
|
||||
validationBefore,
|
||||
validationAfter: validationBefore,
|
||||
mutationSuccess: false,
|
||||
mutationError: error instanceof Error ? error.message : 'Unknown error',
|
||||
durationMs: Date.now() - startTime,
|
||||
}).catch(err => {
|
||||
logger_1.logger.warn('Failed to track mutation telemetry for failed operation:', err);
|
||||
});
|
||||
}
|
||||
if (error instanceof n8n_errors_1.N8nApiError) {
|
||||
return {
|
||||
success: false,
|
||||
error: (0, n8n_errors_1.getUserFriendlyErrorMessage)(error),
|
||||
code: error.code,
|
||||
details: error.details
|
||||
};
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
if (error instanceof zod_1.z.ZodError) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Invalid input',
|
||||
details: { errors: error.errors }
|
||||
};
|
||||
}
|
||||
logger_1.logger.error('Failed to update partial workflow', error);
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Unknown error occurred'
|
||||
};
|
||||
}
|
||||
}
|
||||
function inferIntentFromOperations(operations) {
|
||||
if (!operations || operations.length === 0) {
|
||||
return 'Partial workflow update';
|
||||
}
|
||||
const opTypes = operations.map((op) => op.type);
|
||||
const opCount = operations.length;
|
||||
if (opCount === 1) {
|
||||
const op = operations[0];
|
||||
switch (op.type) {
|
||||
case 'addNode':
|
||||
return `Add ${op.node?.type || 'node'}`;
|
||||
case 'removeNode':
|
||||
return `Remove node ${op.nodeName || op.nodeId || ''}`.trim();
|
||||
case 'updateNode':
|
||||
return `Update node ${op.nodeName || op.nodeId || ''}`.trim();
|
||||
case 'addConnection':
|
||||
return `Connect ${op.source || 'node'} to ${op.target || 'node'}`;
|
||||
case 'removeConnection':
|
||||
return `Disconnect ${op.source || 'node'} from ${op.target || 'node'}`;
|
||||
case 'rewireConnection':
|
||||
return `Rewire ${op.source || 'node'} from ${op.from || ''} to ${op.to || ''}`.trim();
|
||||
case 'updateName':
|
||||
return `Rename workflow to "${op.name || ''}"`;
|
||||
case 'activateWorkflow':
|
||||
return 'Activate workflow';
|
||||
case 'deactivateWorkflow':
|
||||
return 'Deactivate workflow';
|
||||
default:
|
||||
return `Workflow ${op.type}`;
|
||||
}
|
||||
}
|
||||
const typeSet = new Set(opTypes);
|
||||
const summary = [];
|
||||
if (typeSet.has('addNode')) {
|
||||
const count = opTypes.filter((t) => t === 'addNode').length;
|
||||
summary.push(`add ${count} node${count > 1 ? 's' : ''}`);
|
||||
}
|
||||
if (typeSet.has('removeNode')) {
|
||||
const count = opTypes.filter((t) => t === 'removeNode').length;
|
||||
summary.push(`remove ${count} node${count > 1 ? 's' : ''}`);
|
||||
}
|
||||
if (typeSet.has('updateNode')) {
|
||||
const count = opTypes.filter((t) => t === 'updateNode').length;
|
||||
summary.push(`update ${count} node${count > 1 ? 's' : ''}`);
|
||||
}
|
||||
if (typeSet.has('addConnection') || typeSet.has('rewireConnection')) {
|
||||
summary.push('modify connections');
|
||||
}
|
||||
if (typeSet.has('updateName') || typeSet.has('updateSettings')) {
|
||||
summary.push('update metadata');
|
||||
}
|
||||
return summary.length > 0
|
||||
? `Workflow update: ${summary.join(', ')}`
|
||||
: `Workflow update: ${opCount} operations`;
|
||||
}
|
||||
async function trackWorkflowMutation(data) {
|
||||
try {
|
||||
if (!data.userIntent ||
|
||||
data.userIntent === 'Partial workflow update' ||
|
||||
data.userIntent.length < 10) {
|
||||
data.userIntent = inferIntentFromOperations(data.operations);
|
||||
}
|
||||
const { telemetry } = await Promise.resolve().then(() => __importStar(require('../telemetry/telemetry-manager.js')));
|
||||
await telemetry.trackWorkflowMutation(data);
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.debug('Telemetry tracking failed:', error);
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=handlers-workflow-diff.js.map
|
||||
1
dist/mcp/handlers-workflow-diff.js.map
vendored
Normal file
1
dist/mcp/handlers-workflow-diff.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
3
dist/mcp/index.d.ts
vendored
Normal file
3
dist/mcp/index.d.ts
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
#!/usr/bin/env node
|
||||
export {};
|
||||
//# sourceMappingURL=index.d.ts.map
|
||||
1
dist/mcp/index.d.ts.map
vendored
Normal file
1
dist/mcp/index.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/mcp/index.ts"],"names":[],"mappings":""}
|
||||
219
dist/mcp/index.js
vendored
Normal file
219
dist/mcp/index.js
vendored
Normal file
@@ -0,0 +1,219 @@
|
||||
#!/usr/bin/env node
|
||||
"use strict";
|
||||
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
||||
if (k2 === undefined) k2 = k;
|
||||
var desc = Object.getOwnPropertyDescriptor(m, k);
|
||||
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
||||
desc = { enumerable: true, get: function() { return m[k]; } };
|
||||
}
|
||||
Object.defineProperty(o, k2, desc);
|
||||
}) : (function(o, m, k, k2) {
|
||||
if (k2 === undefined) k2 = k;
|
||||
o[k2] = m[k];
|
||||
}));
|
||||
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
||||
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
||||
}) : function(o, v) {
|
||||
o["default"] = v;
|
||||
});
|
||||
var __importStar = (this && this.__importStar) || (function () {
|
||||
var ownKeys = function(o) {
|
||||
ownKeys = Object.getOwnPropertyNames || function (o) {
|
||||
var ar = [];
|
||||
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
||||
return ar;
|
||||
};
|
||||
return ownKeys(o);
|
||||
};
|
||||
return function (mod) {
|
||||
if (mod && mod.__esModule) return mod;
|
||||
var result = {};
|
||||
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
||||
__setModuleDefault(result, mod);
|
||||
return result;
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const server_1 = require("./server");
|
||||
const logger_1 = require("../utils/logger");
|
||||
const config_manager_1 = require("../telemetry/config-manager");
|
||||
const early_error_logger_1 = require("../telemetry/early-error-logger");
|
||||
const startup_checkpoints_1 = require("../telemetry/startup-checkpoints");
|
||||
const fs_1 = require("fs");
|
||||
process.on('uncaughtException', (error) => {
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
console.error('Uncaught Exception:', error);
|
||||
}
|
||||
logger_1.logger.error('Uncaught Exception:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
if (process.env.MCP_MODE !== 'stdio') {
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
}
|
||||
logger_1.logger.error('Unhandled Rejection:', reason);
|
||||
process.exit(1);
|
||||
});
|
||||
function isContainerEnvironment() {
|
||||
const dockerEnv = (process.env.IS_DOCKER || '').toLowerCase();
|
||||
const containerEnv = (process.env.IS_CONTAINER || '').toLowerCase();
|
||||
if (['true', '1', 'yes'].includes(dockerEnv)) {
|
||||
return true;
|
||||
}
|
||||
if (['true', '1', 'yes'].includes(containerEnv)) {
|
||||
return true;
|
||||
}
|
||||
try {
|
||||
return (0, fs_1.existsSync)('/.dockerenv') || (0, fs_1.existsSync)('/run/.containerenv');
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.debug('Container detection filesystem check failed:', error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
async function main() {
|
||||
const startTime = Date.now();
|
||||
const earlyLogger = early_error_logger_1.EarlyErrorLogger.getInstance();
|
||||
const checkpoints = [];
|
||||
try {
|
||||
earlyLogger.logCheckpoint(startup_checkpoints_1.STARTUP_CHECKPOINTS.PROCESS_STARTED);
|
||||
checkpoints.push(startup_checkpoints_1.STARTUP_CHECKPOINTS.PROCESS_STARTED);
|
||||
const args = process.argv.slice(2);
|
||||
if (args.length > 0 && args[0] === 'telemetry') {
|
||||
const telemetryConfig = config_manager_1.TelemetryConfigManager.getInstance();
|
||||
const action = args[1];
|
||||
switch (action) {
|
||||
case 'enable':
|
||||
telemetryConfig.enable();
|
||||
process.exit(0);
|
||||
break;
|
||||
case 'disable':
|
||||
telemetryConfig.disable();
|
||||
process.exit(0);
|
||||
break;
|
||||
case 'status':
|
||||
console.log(telemetryConfig.getStatus());
|
||||
process.exit(0);
|
||||
break;
|
||||
default:
|
||||
console.log(`
|
||||
Usage: n8n-mcp telemetry [command]
|
||||
|
||||
Commands:
|
||||
enable Enable anonymous telemetry
|
||||
disable Disable anonymous telemetry
|
||||
status Show current telemetry status
|
||||
|
||||
Learn more: https://github.com/czlonkowski/n8n-mcp/blob/main/PRIVACY.md
|
||||
`);
|
||||
process.exit(args[1] ? 1 : 0);
|
||||
}
|
||||
}
|
||||
const mode = process.env.MCP_MODE || 'stdio';
|
||||
earlyLogger.logCheckpoint(startup_checkpoints_1.STARTUP_CHECKPOINTS.TELEMETRY_INITIALIZING);
|
||||
checkpoints.push(startup_checkpoints_1.STARTUP_CHECKPOINTS.TELEMETRY_INITIALIZING);
|
||||
earlyLogger.logCheckpoint(startup_checkpoints_1.STARTUP_CHECKPOINTS.TELEMETRY_READY);
|
||||
checkpoints.push(startup_checkpoints_1.STARTUP_CHECKPOINTS.TELEMETRY_READY);
|
||||
try {
|
||||
if (mode === 'http') {
|
||||
console.error(`Starting n8n Documentation MCP Server in ${mode} mode...`);
|
||||
console.error('Current directory:', process.cwd());
|
||||
console.error('Node version:', process.version);
|
||||
}
|
||||
earlyLogger.logCheckpoint(startup_checkpoints_1.STARTUP_CHECKPOINTS.MCP_HANDSHAKE_STARTING);
|
||||
checkpoints.push(startup_checkpoints_1.STARTUP_CHECKPOINTS.MCP_HANDSHAKE_STARTING);
|
||||
if (mode === 'http') {
|
||||
if (process.env.USE_FIXED_HTTP === 'true') {
|
||||
const { startFixedHTTPServer } = await Promise.resolve().then(() => __importStar(require('../http-server')));
|
||||
await startFixedHTTPServer();
|
||||
}
|
||||
else {
|
||||
const { SingleSessionHTTPServer } = await Promise.resolve().then(() => __importStar(require('../http-server-single-session')));
|
||||
const server = new SingleSessionHTTPServer();
|
||||
const shutdown = async () => {
|
||||
await server.shutdown();
|
||||
process.exit(0);
|
||||
};
|
||||
process.on('SIGTERM', shutdown);
|
||||
process.on('SIGINT', shutdown);
|
||||
await server.start();
|
||||
}
|
||||
}
|
||||
else {
|
||||
const server = new server_1.N8NDocumentationMCPServer(undefined, earlyLogger);
|
||||
let isShuttingDown = false;
|
||||
const shutdown = async (signal = 'UNKNOWN') => {
|
||||
if (isShuttingDown)
|
||||
return;
|
||||
isShuttingDown = true;
|
||||
try {
|
||||
logger_1.logger.info(`Shutdown initiated by: ${signal}`);
|
||||
await server.shutdown();
|
||||
if (process.stdin && !process.stdin.destroyed) {
|
||||
process.stdin.pause();
|
||||
process.stdin.destroy();
|
||||
}
|
||||
setTimeout(() => {
|
||||
logger_1.logger.warn('Shutdown timeout exceeded, forcing exit');
|
||||
process.exit(0);
|
||||
}, 1000).unref();
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error('Error during shutdown:', error);
|
||||
process.exit(1);
|
||||
}
|
||||
};
|
||||
process.on('SIGTERM', () => shutdown('SIGTERM'));
|
||||
process.on('SIGINT', () => shutdown('SIGINT'));
|
||||
process.on('SIGHUP', () => shutdown('SIGHUP'));
|
||||
const isContainer = isContainerEnvironment();
|
||||
if (!isContainer && process.stdin.readable && !process.stdin.destroyed) {
|
||||
try {
|
||||
process.stdin.on('end', () => shutdown('STDIN_END'));
|
||||
process.stdin.on('close', () => shutdown('STDIN_CLOSE'));
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error('Failed to register stdin handlers, using signal handlers only:', error);
|
||||
}
|
||||
}
|
||||
await server.run();
|
||||
}
|
||||
earlyLogger.logCheckpoint(startup_checkpoints_1.STARTUP_CHECKPOINTS.MCP_HANDSHAKE_COMPLETE);
|
||||
checkpoints.push(startup_checkpoints_1.STARTUP_CHECKPOINTS.MCP_HANDSHAKE_COMPLETE);
|
||||
earlyLogger.logCheckpoint(startup_checkpoints_1.STARTUP_CHECKPOINTS.SERVER_READY);
|
||||
checkpoints.push(startup_checkpoints_1.STARTUP_CHECKPOINTS.SERVER_READY);
|
||||
const startupDuration = Date.now() - startTime;
|
||||
earlyLogger.logStartupSuccess(checkpoints, startupDuration);
|
||||
logger_1.logger.info(`Server startup completed in ${startupDuration}ms (${checkpoints.length} checkpoints passed)`);
|
||||
}
|
||||
catch (error) {
|
||||
const failedCheckpoint = (0, startup_checkpoints_1.findFailedCheckpoint)(checkpoints);
|
||||
earlyLogger.logStartupError(failedCheckpoint, error);
|
||||
if (mode !== 'stdio') {
|
||||
console.error('Failed to start MCP server:', error);
|
||||
logger_1.logger.error('Failed to start MCP server', error);
|
||||
if (error instanceof Error && error.message.includes('nodes.db not found')) {
|
||||
console.error('\nTo fix this issue:');
|
||||
console.error('1. cd to the n8n-mcp directory');
|
||||
console.error('2. Run: npm run build');
|
||||
console.error('3. Run: npm run rebuild');
|
||||
}
|
||||
else if (error instanceof Error && error.message.includes('NODE_MODULE_VERSION')) {
|
||||
console.error('\nTo fix this Node.js version mismatch:');
|
||||
console.error('1. cd to the n8n-mcp directory');
|
||||
console.error('2. Run: npm rebuild better-sqlite3');
|
||||
console.error('3. If that doesn\'t work, try: rm -rf node_modules && npm install');
|
||||
}
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
catch (outerError) {
|
||||
logger_1.logger.error('Critical startup error:', outerError);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
if (require.main === module) {
|
||||
main().catch(console.error);
|
||||
}
|
||||
//# sourceMappingURL=index.js.map
|
||||
1
dist/mcp/index.js.map
vendored
Normal file
1
dist/mcp/index.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
81
dist/mcp/server.d.ts
vendored
Normal file
81
dist/mcp/server.d.ts
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
import { InstanceContext } from '../types/instance-context';
|
||||
import { EarlyErrorLogger } from '../telemetry/early-error-logger';
|
||||
export declare class N8NDocumentationMCPServer {
|
||||
private server;
|
||||
private db;
|
||||
private repository;
|
||||
private templateService;
|
||||
private initialized;
|
||||
private cache;
|
||||
private clientInfo;
|
||||
private instanceContext?;
|
||||
private previousTool;
|
||||
private previousToolTimestamp;
|
||||
private earlyLogger;
|
||||
private disabledToolsCache;
|
||||
constructor(instanceContext?: InstanceContext, earlyLogger?: EarlyErrorLogger);
|
||||
close(): Promise<void>;
|
||||
private initializeDatabase;
|
||||
private initializeInMemorySchema;
|
||||
private parseSQLStatements;
|
||||
private ensureInitialized;
|
||||
private dbHealthChecked;
|
||||
private validateDatabaseHealth;
|
||||
private getDisabledTools;
|
||||
private setupHandlers;
|
||||
private sanitizeValidationResult;
|
||||
private validateToolParams;
|
||||
private validateToolParamsBasic;
|
||||
private validateExtractedArgs;
|
||||
private listNodes;
|
||||
private getNodeInfo;
|
||||
private searchNodes;
|
||||
private searchNodesFTS;
|
||||
private searchNodesFuzzy;
|
||||
private calculateFuzzyScore;
|
||||
private getEditDistance;
|
||||
private searchNodesLIKE;
|
||||
private calculateRelevance;
|
||||
private calculateRelevanceScore;
|
||||
private rankSearchResults;
|
||||
private listAITools;
|
||||
private getNodeDocumentation;
|
||||
private getDatabaseStatistics;
|
||||
private getNodeEssentials;
|
||||
private getNode;
|
||||
private handleInfoMode;
|
||||
private handleVersionMode;
|
||||
private getVersionSummary;
|
||||
private getVersionHistory;
|
||||
private compareVersions;
|
||||
private getBreakingChanges;
|
||||
private getMigrations;
|
||||
private enrichPropertyWithTypeInfo;
|
||||
private enrichPropertiesWithTypeInfo;
|
||||
private searchNodeProperties;
|
||||
private getPropertyValue;
|
||||
private listTasks;
|
||||
private validateNodeConfig;
|
||||
private getPropertyDependencies;
|
||||
private getNodeAsToolInfo;
|
||||
private getOutputDescriptions;
|
||||
private getCommonAIToolUseCases;
|
||||
private buildToolVariantGuidance;
|
||||
private getAIToolExamples;
|
||||
private validateNodeMinimal;
|
||||
private getToolsDocumentation;
|
||||
connect(transport: any): Promise<void>;
|
||||
private listTemplates;
|
||||
private listNodeTemplates;
|
||||
private getTemplate;
|
||||
private searchTemplates;
|
||||
private getTemplatesForTask;
|
||||
private searchTemplatesByMetadata;
|
||||
private getTaskDescription;
|
||||
private validateWorkflow;
|
||||
private validateWorkflowConnections;
|
||||
private validateWorkflowExpressions;
|
||||
run(): Promise<void>;
|
||||
shutdown(): Promise<void>;
|
||||
}
|
||||
//# sourceMappingURL=server.d.ts.map
|
||||
1
dist/mcp/server.d.ts.map
vendored
Normal file
1
dist/mcp/server.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"server.d.ts","sourceRoot":"","sources":["../../src/mcp/server.ts"],"names":[],"mappings":"AAsCA,OAAO,EAAE,eAAe,EAAE,MAAM,2BAA2B,CAAC;AAE5D,OAAO,EAAE,gBAAgB,EAAE,MAAM,iCAAiC,CAAC;AAgGnE,qBAAa,yBAAyB;IACpC,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,EAAE,CAAgC;IAC1C,OAAO,CAAC,UAAU,CAA+B;IACjD,OAAO,CAAC,eAAe,CAAgC;IACvD,OAAO,CAAC,WAAW,CAAgB;IACnC,OAAO,CAAC,KAAK,CAAqB;IAClC,OAAO,CAAC,UAAU,CAAa;IAC/B,OAAO,CAAC,eAAe,CAAC,CAAkB;IAC1C,OAAO,CAAC,YAAY,CAAuB;IAC3C,OAAO,CAAC,qBAAqB,CAAsB;IACnD,OAAO,CAAC,WAAW,CAAiC;IACpD,OAAO,CAAC,kBAAkB,CAA4B;gBAE1C,eAAe,CAAC,EAAE,eAAe,EAAE,WAAW,CAAC,EAAE,gBAAgB;IAiGvE,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;YA6Bd,kBAAkB;YAwClB,wBAAwB;IA0BtC,OAAO,CAAC,kBAAkB;YA6CZ,iBAAiB;IAa/B,OAAO,CAAC,eAAe,CAAkB;YAE3B,sBAAsB;IAgDpC,OAAO,CAAC,gBAAgB;IAqCxB,OAAO,CAAC,aAAa;IAoTrB,OAAO,CAAC,wBAAwB;IAoFhC,OAAO,CAAC,kBAAkB;IAqE1B,OAAO,CAAC,uBAAuB;IAwB/B,OAAO,CAAC,qBAAqB;YAgTf,SAAS;YA2DT,WAAW;YAkFX,WAAW;YAyCX,cAAc;YAyKd,gBAAgB;IAqD9B,OAAO,CAAC,mBAAmB;IAwE3B,OAAO,CAAC,eAAe;YAsBT,eAAe;IAqI7B,OAAO,CAAC,kBAAkB;IAQ1B,OAAO,CAAC,uBAAuB;IA0D/B,OAAO,CAAC,iBAAiB;YAqFX,WAAW;YAgCX,oBAAoB;YA2EpB,qBAAqB;YAwDrB,iBAAiB;YAiKjB,OAAO;YAgDP,cAAc;YAwFd,iBAAiB;IAqC/B,OAAO,CAAC,iBAAiB;IA0BzB,OAAO,CAAC,iBAAiB;IA0BzB,OAAO,CAAC,eAAe;IAwCvB,OAAO,CAAC,kBAAkB;IAiC1B,OAAO,CAAC,aAAa;IAoCrB,OAAO,CAAC,0BAA0B;IAgClC,OAAO,CAAC,4BAA4B;YAKtB,oBAAoB;IAsDlC,OAAO,CAAC,gBAAgB;YAiBV,SAAS;YA6CT,kBAAkB;YAqElB,uBAAuB;YAsDvB,iBAAiB;IAqE/B,OAAO,CAAC,qBAAqB;IA8C7B,OAAO,CAAC,uBAAuB;IA4D/B,OAAO,CAAC,wBAAwB;IAkChC,OAAO,CAAC,iBAAiB;YAoDX,mBAAmB;YAoEnB,qBAAqB;IAS7B,OAAO,CAAC,SAAS,EAAE,GAAG,GAAG,OAAO,CAAC,IAAI,CAAC;YAS9B,aAAa;YAcb,iBAAiB;YAoBjB,WAAW;YAwBX,eAAe;YAqBf,mBAAmB;YAwBnB,yBAAyB;IA4CvC,OAAO,CAAC,kBAAkB;YAiBZ,gBAAgB;YA6HhB,2BAA2B;YAiE3B,2BAA2B;IAyEnC,GAAG,IAAI,OAAO,CAAC,IAAI,CAAC;IA0BpB,QAAQ,IAAI,OAAO,CAAC,IAAI,CAAC;CAuBhC"}
|
||||
2824
dist/mcp/server.js
vendored
Normal file
2824
dist/mcp/server.js
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
dist/mcp/server.js.map
vendored
Normal file
1
dist/mcp/server.js.map
vendored
Normal file
File diff suppressed because one or more lines are too long
3
dist/mcp/stdio-wrapper.d.ts
vendored
Normal file
3
dist/mcp/stdio-wrapper.d.ts
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
#!/usr/bin/env node
|
||||
export {};
|
||||
//# sourceMappingURL=stdio-wrapper.d.ts.map
|
||||
1
dist/mcp/stdio-wrapper.d.ts.map
vendored
Normal file
1
dist/mcp/stdio-wrapper.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"stdio-wrapper.d.ts","sourceRoot":"","sources":["../../src/mcp/stdio-wrapper.ts"],"names":[],"mappings":""}
|
||||
81
dist/mcp/stdio-wrapper.js
vendored
Normal file
81
dist/mcp/stdio-wrapper.js
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
#!/usr/bin/env node
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
process.env.MCP_MODE = 'stdio';
|
||||
process.env.DISABLE_CONSOLE_OUTPUT = 'true';
|
||||
process.env.LOG_LEVEL = 'error';
|
||||
const originalConsoleLog = console.log;
|
||||
const originalConsoleError = console.error;
|
||||
const originalConsoleWarn = console.warn;
|
||||
const originalConsoleInfo = console.info;
|
||||
const originalConsoleDebug = console.debug;
|
||||
const originalConsoleTrace = console.trace;
|
||||
const originalConsoleDir = console.dir;
|
||||
const originalConsoleTime = console.time;
|
||||
const originalConsoleTimeEnd = console.timeEnd;
|
||||
console.log = () => { };
|
||||
console.error = () => { };
|
||||
console.warn = () => { };
|
||||
console.info = () => { };
|
||||
console.debug = () => { };
|
||||
console.trace = () => { };
|
||||
console.dir = () => { };
|
||||
console.time = () => { };
|
||||
console.timeEnd = () => { };
|
||||
console.timeLog = () => { };
|
||||
console.group = () => { };
|
||||
console.groupEnd = () => { };
|
||||
console.table = () => { };
|
||||
console.clear = () => { };
|
||||
console.count = () => { };
|
||||
console.countReset = () => { };
|
||||
const server_1 = require("./server");
|
||||
let server = null;
|
||||
async function main() {
|
||||
try {
|
||||
server = new server_1.N8NDocumentationMCPServer();
|
||||
await server.run();
|
||||
}
|
||||
catch (error) {
|
||||
originalConsoleError('Fatal error:', error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
process.on('uncaughtException', (error) => {
|
||||
originalConsoleError('Uncaught exception:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
process.on('unhandledRejection', (reason) => {
|
||||
originalConsoleError('Unhandled rejection:', reason);
|
||||
process.exit(1);
|
||||
});
|
||||
let isShuttingDown = false;
|
||||
async function shutdown(signal) {
|
||||
if (isShuttingDown)
|
||||
return;
|
||||
isShuttingDown = true;
|
||||
originalConsoleError(`Received ${signal}, shutting down gracefully...`);
|
||||
try {
|
||||
if (server) {
|
||||
await server.shutdown();
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
originalConsoleError('Error during shutdown:', error);
|
||||
}
|
||||
process.stdin.pause();
|
||||
process.stdin.destroy();
|
||||
setTimeout(() => {
|
||||
process.exit(0);
|
||||
}, 500).unref();
|
||||
process.exit(0);
|
||||
}
|
||||
process.on('SIGTERM', () => void shutdown('SIGTERM'));
|
||||
process.on('SIGINT', () => void shutdown('SIGINT'));
|
||||
process.on('SIGHUP', () => void shutdown('SIGHUP'));
|
||||
process.stdin.on('end', () => {
|
||||
originalConsoleError('stdin closed, shutting down...');
|
||||
void shutdown('STDIN_CLOSE');
|
||||
});
|
||||
main();
|
||||
//# sourceMappingURL=stdio-wrapper.js.map
|
||||
1
dist/mcp/stdio-wrapper.js.map
vendored
Normal file
1
dist/mcp/stdio-wrapper.js.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"stdio-wrapper.js","sourceRoot":"","sources":["../../src/mcp/stdio-wrapper.ts"],"names":[],"mappings":";;;AAQA,OAAO,CAAC,GAAG,CAAC,QAAQ,GAAG,OAAO,CAAC;AAC/B,OAAO,CAAC,GAAG,CAAC,sBAAsB,GAAG,MAAM,CAAC;AAC5C,OAAO,CAAC,GAAG,CAAC,SAAS,GAAG,OAAO,CAAC;AAGhC,MAAM,kBAAkB,GAAG,OAAO,CAAC,GAAG,CAAC;AACvC,MAAM,oBAAoB,GAAG,OAAO,CAAC,KAAK,CAAC;AAC3C,MAAM,mBAAmB,GAAG,OAAO,CAAC,IAAI,CAAC;AACzC,MAAM,mBAAmB,GAAG,OAAO,CAAC,IAAI,CAAC;AACzC,MAAM,oBAAoB,GAAG,OAAO,CAAC,KAAK,CAAC;AAC3C,MAAM,oBAAoB,GAAG,OAAO,CAAC,KAAK,CAAC;AAC3C,MAAM,kBAAkB,GAAG,OAAO,CAAC,GAAG,CAAC;AACvC,MAAM,mBAAmB,GAAG,OAAO,CAAC,IAAI,CAAC;AACzC,MAAM,sBAAsB,GAAG,OAAO,CAAC,OAAO,CAAC;AAG/C,OAAO,CAAC,GAAG,GAAG,GAAG,EAAE,GAAE,CAAC,CAAC;AACvB,OAAO,CAAC,KAAK,GAAG,GAAG,EAAE,GAAE,CAAC,CAAC;AACzB,OAAO,CAAC,IAAI,GAAG,GAAG,EAAE,GAAE,CAAC,CAAC;AACxB,OAAO,CAAC,IAAI,GAAG,GAAG,EAAE,GAAE,CAAC,CAAC;AACxB,OAAO,CAAC,KAAK,GAAG,GAAG,EAAE,GAAE,CAAC,CAAC;AACzB,OAAO,CAAC,KAAK,GAAG,GAAG,EAAE,GAAE,CAAC,CAAC;AACzB,OAAO,CAAC,GAAG,GAAG,GAAG,EAAE,GAAE,CAAC,CAAC;AACvB,OAAO,CAAC,IAAI,GAAG,GAAG,EAAE,GAAE,CAAC,CAAC;AACxB,OAAO,CAAC,OAAO,GAAG,GAAG,EAAE,GAAE,CAAC,CAAC;AAC3B,OAAO,CAAC,OAAO,GAAG,GAAG,EAAE,GAAE,CAAC,CAAC;AAC3B,OAAO,CAAC,KAAK,GAAG,GAAG,EAAE,GAAE,CAAC,CAAC;AACzB,OAAO,CAAC,QAAQ,GAAG,GAAG,EAAE,GAAE,CAAC,CAAC;AAC5B,OAAO,CAAC,KAAK,GAAG,GAAG,EAAE,GAAE,CAAC,CAAC;AACzB,OAAO,CAAC,KAAK,GAAG,GAAG,EAAE,GAAE,CAAC,CAAC;AACzB,OAAO,CAAC,KAAK,GAAG,GAAG,EAAE,GAAE,CAAC,CAAC;AACzB,OAAO,CAAC,UAAU,GAAG,GAAG,EAAE,GAAE,CAAC,CAAC;AAG9B,qCAAqD;AAErD,IAAI,MAAM,GAAqC,IAAI,CAAC;AAEpD,KAAK,UAAU,IAAI;IACjB,IAAI,CAAC;QACH,MAAM,GAAG,IAAI,kCAAyB,EAAE,CAAC;QACzC,MAAM,MAAM,CAAC,GAAG,EAAE,CAAC;IACrB,CAAC;IAAC,OAAO,KAAK,EAAE,CAAC;QAEf,oBAAoB,CAAC,cAAc,EAAE,KAAK,CAAC,CAAC;QAC5C,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;IAClB,CAAC;AACH,CAAC;AAGD,OAAO,CAAC,EAAE,CAAC,mBAAmB,EAAE,CAAC,KAAK,EAAE,EAAE;IACxC,oBAAoB,CAAC,qBAAqB,EAAE,KAAK,CAAC,CAAC;IACnD,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AAClB,CAAC,CAAC,CAAC;AAEH,OAAO,CAAC,EAAE,CAAC,oBAAoB,EAAE,CAAC,MAAM,EAAE,EAAE;IAC1C,oBAAoB,CAAC,sBAAsB,EAAE,MAAM,CAAC,CAAC;IACrD,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AAClB,CAAC,CAAC,CAAC;AAGH,IAAI,cAAc,GAAG,KAAK,CAAC;AAE3B,KAAK,UAAU,QAAQ,CAAC,MAAc;IACpC,IAAI,cAAc;QAAE,OAAO;IAC3B,cAAc,GAAG,IAAI,CAAC;IAGtB,oBAAoB,CAAC,YAAY,MAAM,+BAA+B,CAAC,CAAC;IAExE,IAAI,CAAC;QAEH,IAAI,MAAM,EAAE,CAAC;YACX,MAAM,MAAM,CAAC,QAAQ,EAAE,CAAC;QAC1B,CAAC;IACH,CAAC;IAAC,OAAO,KAAK,EAAE,CAAC;QACf,oBAAoB,CAAC,wBAAwB,EAAE,KAAK,CAAC,CAAC;IACxD,CAAC;IAGD,OAAO,CAAC,KAAK,CAAC,KAAK,EAAE,CAAC;IACtB,OAAO,CAAC,KAAK,CAAC,OAAO,EAAE,CAAC;IAGxB,UAAU,CAAC,GAAG,EAAE;QACd,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;IAClB,CAAC,EAAE,GAAG,CAAC,CAAC,KAAK,EAAE,CAAC;IAGhB,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AAClB,CAAC;AAGD,OAAO,CAAC,EAAE,CAAC,SAAS,EAAE,GAAG,EAAE,CAAC,KAAK,QAAQ,CAAC,SAAS,CAAC,CAAC,CAAC;AACtD,OAAO,CAAC,EAAE,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,KAAK,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AACpD,OAAO,CAAC,EAAE,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,KAAK,QAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC;AAGpD,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC,KAAK,EAAE,GAAG,EAAE;IAC3B,oBAAoB,CAAC,gCAAgC,CAAC,CAAC;IACvD,KAAK,QAAQ,CAAC,aAAa,CAAC,CAAC;AAC/B,CAAC,CAAC,CAAC;AAEH,IAAI,EAAE,CAAC"}
|
||||
3
dist/mcp/tool-docs/configuration/get-node.d.ts
vendored
Normal file
3
dist/mcp/tool-docs/configuration/get-node.d.ts
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
import { ToolDocumentation } from '../types';
|
||||
export declare const getNodeDoc: ToolDocumentation;
|
||||
//# sourceMappingURL=get-node.d.ts.map
|
||||
1
dist/mcp/tool-docs/configuration/get-node.d.ts.map
vendored
Normal file
1
dist/mcp/tool-docs/configuration/get-node.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"get-node.d.ts","sourceRoot":"","sources":["../../../../src/mcp/tool-docs/configuration/get-node.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,iBAAiB,EAAE,MAAM,UAAU,CAAC;AAE7C,eAAO,MAAM,UAAU,EAAE,iBAqFxB,CAAC"}
|
||||
90
dist/mcp/tool-docs/configuration/get-node.js
vendored
Normal file
90
dist/mcp/tool-docs/configuration/get-node.js
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getNodeDoc = void 0;
|
||||
exports.getNodeDoc = {
|
||||
name: 'get_node',
|
||||
category: 'configuration',
|
||||
essentials: {
|
||||
description: 'Unified node information tool with progressive detail levels and multiple modes. Get node schema, docs, search properties, or version info.',
|
||||
keyParameters: ['nodeType', 'detail', 'mode', 'includeTypeInfo', 'includeExamples'],
|
||||
example: 'get_node({nodeType: "nodes-base.httpRequest", detail: "standard"})',
|
||||
performance: 'Instant (<10ms) for minimal/standard, moderate for full',
|
||||
tips: [
|
||||
'Use detail="standard" (default) for most tasks - shows required fields',
|
||||
'Use mode="docs" for readable markdown documentation',
|
||||
'Use mode="search_properties" with propertyQuery to find specific fields',
|
||||
'Use mode="versions" to check version history and breaking changes',
|
||||
'Add includeExamples=true to get real-world configuration examples'
|
||||
]
|
||||
},
|
||||
full: {
|
||||
description: `**Detail Levels (mode="info", default):**
|
||||
- minimal (~200 tokens): Basic metadata only - nodeType, displayName, description, category
|
||||
- standard (~1-2K tokens): Essential properties + operations - recommended for most tasks
|
||||
- full (~3-8K tokens): Complete node schema - use only when standard insufficient
|
||||
|
||||
**Operation Modes:**
|
||||
- info (default): Node schema with configurable detail level
|
||||
- docs: Readable markdown documentation with examples and patterns
|
||||
- search_properties: Find specific properties within a node
|
||||
- versions: List all available versions with breaking changes summary
|
||||
- compare: Compare two versions with property-level changes
|
||||
- breaking: Show only breaking changes between versions
|
||||
- migrations: Show auto-migratable changes between versions`,
|
||||
parameters: {
|
||||
nodeType: { type: 'string', required: true, description: 'Full node type with prefix: "nodes-base.httpRequest" or "nodes-langchain.agent"' },
|
||||
detail: { type: 'string', required: false, description: 'Detail level for mode=info: "minimal", "standard" (default), "full"' },
|
||||
mode: { type: 'string', required: false, description: 'Operation mode: "info" (default), "docs", "search_properties", "versions", "compare", "breaking", "migrations"' },
|
||||
includeTypeInfo: { type: 'boolean', required: false, description: 'Include type structure metadata (validation rules, JS types). Adds ~80-120 tokens per property' },
|
||||
includeExamples: { type: 'boolean', required: false, description: 'Include real-world configuration examples from templates. Adds ~200-400 tokens per example' },
|
||||
propertyQuery: { type: 'string', required: false, description: 'For mode=search_properties: search term to find properties (e.g., "auth", "header", "body")' },
|
||||
maxPropertyResults: { type: 'number', required: false, description: 'For mode=search_properties: max results (default 20)' },
|
||||
fromVersion: { type: 'string', required: false, description: 'For compare/breaking/migrations modes: source version (e.g., "1.0")' },
|
||||
toVersion: { type: 'string', required: false, description: 'For compare mode: target version (e.g., "2.0"). Defaults to latest' }
|
||||
},
|
||||
returns: `Depends on mode:
|
||||
- info: Node schema with properties based on detail level
|
||||
- docs: Markdown documentation string
|
||||
- search_properties: Array of matching property paths with descriptions
|
||||
- versions: Version history with breaking changes flags
|
||||
- compare/breaking/migrations: Version comparison details`,
|
||||
examples: [
|
||||
'// Standard detail (recommended for AI agents)\nget_node({nodeType: "nodes-base.httpRequest"})',
|
||||
'// Minimal for quick metadata check\nget_node({nodeType: "nodes-base.slack", detail: "minimal"})',
|
||||
'// Full detail with examples\nget_node({nodeType: "nodes-base.googleSheets", detail: "full", includeExamples: true})',
|
||||
'// Get readable documentation\nget_node({nodeType: "nodes-base.webhook", mode: "docs"})',
|
||||
'// Search for authentication properties\nget_node({nodeType: "nodes-base.httpRequest", mode: "search_properties", propertyQuery: "auth"})',
|
||||
'// Check version history\nget_node({nodeType: "nodes-base.executeWorkflow", mode: "versions"})',
|
||||
'// Compare specific versions\nget_node({nodeType: "nodes-base.httpRequest", mode: "compare", fromVersion: "3.0", toVersion: "4.1"})'
|
||||
],
|
||||
useCases: [
|
||||
'Configure nodes for workflow building (use detail=standard)',
|
||||
'Find specific configuration options (use mode=search_properties)',
|
||||
'Get human-readable node documentation (use mode=docs)',
|
||||
'Check for breaking changes before version upgrades (use mode=breaking)',
|
||||
'Understand complex types with includeTypeInfo=true'
|
||||
],
|
||||
performance: `Token costs by detail level:
|
||||
- minimal: ~200 tokens
|
||||
- standard: ~1000-2000 tokens (default)
|
||||
- full: ~3000-8000 tokens
|
||||
- includeTypeInfo: +80-120 tokens per property
|
||||
- includeExamples: +200-400 tokens per example
|
||||
- Version modes: ~400-1200 tokens`,
|
||||
bestPractices: [
|
||||
'Start with detail="standard" - it covers 95% of use cases',
|
||||
'Only use detail="full" if standard is missing required properties',
|
||||
'Use mode="docs" when explaining nodes to users',
|
||||
'Combine includeTypeInfo=true for complex nodes (filter, resourceMapper)',
|
||||
'Check version history before configuring versioned nodes'
|
||||
],
|
||||
pitfalls: [
|
||||
'detail="full" returns large responses (~100KB) - use sparingly',
|
||||
'Node type must include prefix (nodes-base. or nodes-langchain.)',
|
||||
'includeExamples only works with mode=info and detail=standard',
|
||||
'Version modes require nodes with multiple versions in database'
|
||||
],
|
||||
relatedTools: ['search_nodes', 'validate_node', 'validate_workflow']
|
||||
}
|
||||
};
|
||||
//# sourceMappingURL=get-node.js.map
|
||||
1
dist/mcp/tool-docs/configuration/get-node.js.map
vendored
Normal file
1
dist/mcp/tool-docs/configuration/get-node.js.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"get-node.js","sourceRoot":"","sources":["../../../../src/mcp/tool-docs/configuration/get-node.ts"],"names":[],"mappings":";;;AAEa,QAAA,UAAU,GAAsB;IAC3C,IAAI,EAAE,UAAU;IAChB,QAAQ,EAAE,eAAe;IACzB,UAAU,EAAE;QACV,WAAW,EAAE,6IAA6I;QAC1J,aAAa,EAAE,CAAC,UAAU,EAAE,QAAQ,EAAE,MAAM,EAAE,iBAAiB,EAAE,iBAAiB,CAAC;QACnF,OAAO,EAAE,oEAAoE;QAC7E,WAAW,EAAE,yDAAyD;QACtE,IAAI,EAAE;YACJ,wEAAwE;YACxE,qDAAqD;YACrD,yEAAyE;YACzE,mEAAmE;YACnE,mEAAmE;SACpE;KACF;IACD,IAAI,EAAE;QACJ,WAAW,EAAE;;;;;;;;;;;;4DAY2C;QACxD,UAAU,EAAE;YACV,QAAQ,EAAE,EAAE,IAAI,EAAE,QAAQ,EAAE,QAAQ,EAAE,IAAI,EAAE,WAAW,EAAE,iFAAiF,EAAE;YAC5I,MAAM,EAAE,EAAE,IAAI,EAAE,QAAQ,EAAE,QAAQ,EAAE,KAAK,EAAE,WAAW,EAAE,qEAAqE,EAAE;YAC/H,IAAI,EAAE,EAAE,IAAI,EAAE,QAAQ,EAAE,QAAQ,EAAE,KAAK,EAAE,WAAW,EAAE,gHAAgH,EAAE;YACxK,eAAe,EAAE,EAAE,IAAI,EAAE,SAAS,EAAE,QAAQ,EAAE,KAAK,EAAE,WAAW,EAAE,gGAAgG,EAAE;YACpK,eAAe,EAAE,EAAE,IAAI,EAAE,SAAS,EAAE,QAAQ,EAAE,KAAK,EAAE,WAAW,EAAE,4FAA4F,EAAE;YAChK,aAAa,EAAE,EAAE,IAAI,EAAE,QAAQ,EAAE,QAAQ,EAAE,KAAK,EAAE,WAAW,EAAE,6FAA6F,EAAE;YAC9J,kBAAkB,EAAE,EAAE,IAAI,EAAE,QAAQ,EAAE,QAAQ,EAAE,KAAK,EAAE,WAAW,EAAE,sDAAsD,EAAE;YAC5H,WAAW,EAAE,EAAE,IAAI,EAAE,QAAQ,EAAE,QAAQ,EAAE,KAAK,EAAE,WAAW,EAAE,qEAAqE,EAAE;YACpI,SAAS,EAAE,EAAE,IAAI,EAAE,QAAQ,EAAE,QAAQ,EAAE,KAAK,EAAE,WAAW,EAAE,oEAAoE,EAAE;SAClI;QACD,OAAO,EAAE;;;;;0DAK6C;QACtD,QAAQ,EAAE;YACR,gGAAgG;YAChG,kGAAkG;YAClG,sHAAsH;YACtH,yFAAyF;YACzF,2IAA2I;YAC3I,gGAAgG;YAChG,qIAAqI;SACtI;QACD,QAAQ,EAAE;YACR,6DAA6D;YAC7D,kEAAkE;YAClE,uDAAuD;YACvD,wEAAwE;YACxE,oDAAoD;SACrD;QACD,WAAW,EAAE;;;;;;kCAMiB;QAC9B,aAAa,EAAE;YACb,2DAA2D;YAC3D,mEAAmE;YACnE,gDAAgD;YAChD,yEAAyE;YACzE,0DAA0D;SAC3D;QACD,QAAQ,EAAE;YACR,gEAAgE;YAChE,iEAAiE;YACjE,+DAA+D;YAC/D,gEAAgE;SACjE;QACD,YAAY,EAAE,CAAC,cAAc,EAAE,eAAe,EAAE,mBAAmB,CAAC;KACrE;CACF,CAAC"}
|
||||
2
dist/mcp/tool-docs/configuration/index.d.ts
vendored
Normal file
2
dist/mcp/tool-docs/configuration/index.d.ts
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
export { getNodeDoc } from './get-node';
|
||||
//# sourceMappingURL=index.d.ts.map
|
||||
1
dist/mcp/tool-docs/configuration/index.d.ts.map
vendored
Normal file
1
dist/mcp/tool-docs/configuration/index.d.ts.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../../src/mcp/tool-docs/configuration/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,UAAU,EAAE,MAAM,YAAY,CAAC"}
|
||||
6
dist/mcp/tool-docs/configuration/index.js
vendored
Normal file
6
dist/mcp/tool-docs/configuration/index.js
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getNodeDoc = void 0;
|
||||
var get_node_1 = require("./get-node");
|
||||
Object.defineProperty(exports, "getNodeDoc", { enumerable: true, get: function () { return get_node_1.getNodeDoc; } });
|
||||
//# sourceMappingURL=index.js.map
|
||||
1
dist/mcp/tool-docs/configuration/index.js.map
vendored
Normal file
1
dist/mcp/tool-docs/configuration/index.js.map
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../../../src/mcp/tool-docs/configuration/index.ts"],"names":[],"mappings":";;;AAAA,uCAAwC;AAA/B,sGAAA,UAAU,OAAA"}
|
||||
2
dist/mcp/tool-docs/discovery/index.d.ts
vendored
Normal file
2
dist/mcp/tool-docs/discovery/index.d.ts
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
export { searchNodesDoc } from './search-nodes';
|
||||
//# sourceMappingURL=index.d.ts.map
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user