mirror of
https://github.com/czlonkowski/n8n-mcp.git
synced 2026-04-01 23:23:12 +00:00
Compare commits
366 Commits
v2.10.4
...
fix/sessio
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
156dd329a0 | ||
|
|
dd62040155 | ||
|
|
112b40119c | ||
|
|
318986f546 | ||
|
|
aa8a6a7069 | ||
|
|
e11a885b0d | ||
|
|
ee99cb7ba1 | ||
|
|
66cb66b31b | ||
|
|
b67d6ba353 | ||
|
|
3ba5584df9 | ||
|
|
be0211d826 | ||
|
|
0d71a16f83 | ||
|
|
085f6db7a2 | ||
|
|
b6bc3b732e | ||
|
|
c16c9a2398 | ||
|
|
1d34ad81d5 | ||
|
|
4566253bdc | ||
|
|
54c598717c | ||
|
|
8b5b01de98 | ||
|
|
275e573d8d | ||
|
|
6256105053 | ||
|
|
1f43784315 | ||
|
|
80e3391773 | ||
|
|
c580a3dde4 | ||
|
|
fc8fb66900 | ||
|
|
4625ebf64d | ||
|
|
43dea68f0b | ||
|
|
dc62fd66cb | ||
|
|
a94ff0586c | ||
|
|
29b2b1d4c1 | ||
|
|
fa6ff89516 | ||
|
|
34811eaf69 | ||
|
|
52c9902efd | ||
|
|
fba8b2a490 | ||
|
|
275e4f8cef | ||
|
|
4016ac42ef | ||
|
|
b8227ff775 | ||
|
|
f61fd9b429 | ||
|
|
4b36ed6a95 | ||
|
|
f072b2e003 | ||
|
|
cfd2325ca4 | ||
|
|
978347e8d0 | ||
|
|
1b7dd3b517 | ||
|
|
c52bbcbb83 | ||
|
|
5fb63cd725 | ||
|
|
36eb8e3864 | ||
|
|
51278f52e9 | ||
|
|
6479ac2bf5 | ||
|
|
08d43bd7fb | ||
|
|
914805f5ea | ||
|
|
08a1d42f09 | ||
|
|
ae11738ac7 | ||
|
|
6e365714e2 | ||
|
|
a2cc37bdf7 | ||
|
|
cf3c66c0ea | ||
|
|
f33b626179 | ||
|
|
2113714ec2 | ||
|
|
49757e3c22 | ||
|
|
dd521d0d87 | ||
|
|
331883f944 | ||
|
|
f3164e202f | ||
|
|
8e2e1dce62 | ||
|
|
b986beef2c | ||
|
|
943f5862a3 | ||
|
|
2c536a25fd | ||
|
|
e95ac7c335 | ||
|
|
e2c8fd0125 | ||
|
|
3332eb09fc | ||
|
|
bd03412fc8 | ||
|
|
73fa494735 | ||
|
|
67d8f5d4d4 | ||
|
|
d2a250e23d | ||
|
|
710f054b93 | ||
|
|
fd65727632 | ||
|
|
5d9936a909 | ||
|
|
de95fb21ba | ||
|
|
2bcd7c757b | ||
|
|
50439e2aa1 | ||
|
|
96cb9eca0f | ||
|
|
36dc8b489c | ||
|
|
cffd5e8b2e | ||
|
|
1ad2c6f6d2 | ||
|
|
28cff8c77b | ||
|
|
0818b4d56c | ||
|
|
5e2a6bdb9c | ||
|
|
ec9d8fdb7e | ||
|
|
ddc4de8c3e | ||
|
|
c67659a7c3 | ||
|
|
4cf8bb5c98 | ||
|
|
53b5dc312d | ||
|
|
1eedb43e9f | ||
|
|
81dfbbbd77 | ||
|
|
3ba3f101b3 | ||
|
|
92eb4ef34f | ||
|
|
ccbe04f007 | ||
|
|
91ad08493c | ||
|
|
7bb021163f | ||
|
|
59ae78f03a | ||
|
|
cb224de01f | ||
|
|
fd9ea985f2 | ||
|
|
225bb06cd5 | ||
|
|
2627028be3 | ||
|
|
cc9fe69449 | ||
|
|
0144484f96 | ||
|
|
2b7bc48699 | ||
|
|
0ec02fa0da | ||
|
|
d207cc3723 | ||
|
|
eeb4b6ac3e | ||
|
|
06cbb40213 | ||
|
|
9a00a99011 | ||
|
|
36aedd5050 | ||
|
|
59f49c47ab | ||
|
|
b106550520 | ||
|
|
e1be4473a3 | ||
|
|
b12a927a10 | ||
|
|
08abdb7937 | ||
|
|
95bb002577 | ||
|
|
36e02c68d3 | ||
|
|
3078273d93 | ||
|
|
aeb74102e5 | ||
|
|
af949b09a5 | ||
|
|
44568a6edd | ||
|
|
59e4cb85ac | ||
|
|
f78f53e731 | ||
|
|
c6e0e528d1 | ||
|
|
34bafe240d | ||
|
|
f139d38c81 | ||
|
|
aeaba3b9ca | ||
|
|
a7bfa73479 | ||
|
|
ee125c52f8 | ||
|
|
f9194ee74c | ||
|
|
2a85000411 | ||
|
|
653f395666 | ||
|
|
cfe3c5e584 | ||
|
|
67c3c9c9c8 | ||
|
|
6d50cf93f0 | ||
|
|
de9f222cfe | ||
|
|
da593400d2 | ||
|
|
126d09c66b | ||
|
|
4f81962953 | ||
|
|
9e7a0e0487 | ||
|
|
a7dc07abab | ||
|
|
1c56eb0daa | ||
|
|
fcf778c79d | ||
|
|
c519cd5060 | ||
|
|
69f3a31d41 | ||
|
|
bd8a7f68ac | ||
|
|
abc6a31302 | ||
|
|
57459c27e3 | ||
|
|
9380602439 | ||
|
|
a696af8cfa | ||
|
|
b467bec93e | ||
|
|
6e042467b2 | ||
|
|
287b9aa819 | ||
|
|
3331b72df4 | ||
|
|
c0d7145a5a | ||
|
|
08e906739f | ||
|
|
ae329c3bb6 | ||
|
|
1cfbdc3bdf | ||
|
|
b3d42b3390 | ||
|
|
4feb905bd0 | ||
|
|
ad1f611d2a | ||
|
|
02574e5555 | ||
|
|
b27d245dab | ||
|
|
ecf0d50a63 | ||
|
|
1db9ecf33f | ||
|
|
fc973d83db | ||
|
|
2e19eaa309 | ||
|
|
73db3dfdfe | ||
|
|
7fcfa8f696 | ||
|
|
c8cdd3c0b5 | ||
|
|
62d01ab237 | ||
|
|
00289e90d7 | ||
|
|
5c01624c3a | ||
|
|
dad3a442d9 | ||
|
|
7a402bc7ad | ||
|
|
88e288f8f6 | ||
|
|
12a7f1e8bf | ||
|
|
2f18a2bb9a | ||
|
|
9b94e3be9c | ||
|
|
9e1a4129c0 | ||
|
|
4b764c6110 | ||
|
|
c3b691cedf | ||
|
|
4bf8f7006d | ||
|
|
2a9a3b9410 | ||
|
|
cd27d78bfd | ||
|
|
8d1ae278ee | ||
|
|
a84dbd6a15 | ||
|
|
1728495146 | ||
|
|
2305aaab9e | ||
|
|
f74427bdb5 | ||
|
|
fe59688e03 | ||
|
|
675989971c | ||
|
|
d875ac1e0c | ||
|
|
5bf1bc46e9 | ||
|
|
3bab53a3be | ||
|
|
8ffda534be | ||
|
|
0bf0e1cd74 | ||
|
|
9fb847a16f | ||
|
|
bf999232a3 | ||
|
|
59e476fdf0 | ||
|
|
711cecb90d | ||
|
|
582c9aac53 | ||
|
|
997cc93a0a | ||
|
|
2f234780dd | ||
|
|
99518f71cf | ||
|
|
fe1e3640af | ||
|
|
aef9d983e2 | ||
|
|
e252a36e3f | ||
|
|
39e13c451f | ||
|
|
a8e0b1ed34 | ||
|
|
ed7de10fd2 | ||
|
|
b7fa12667b | ||
|
|
4854a50854 | ||
|
|
cb5691f17d | ||
|
|
6d45ff8bcb | ||
|
|
64b9cf47a7 | ||
|
|
f4dff6b8e1 | ||
|
|
ec0d2e8a6e | ||
|
|
a1db133a50 | ||
|
|
d8bab6e667 | ||
|
|
3728a9cc67 | ||
|
|
47e6a7846c | ||
|
|
cabda2a0f8 | ||
|
|
34cb8f8c44 | ||
|
|
48df87f76c | ||
|
|
540c5270c6 | ||
|
|
6210378687 | ||
|
|
8c2b1cfbbe | ||
|
|
d862f4961d | ||
|
|
2057f98e76 | ||
|
|
fff47f9f9d | ||
|
|
87cc84f593 | ||
|
|
8405497263 | ||
|
|
7a66f71c23 | ||
|
|
9cbbc6bb67 | ||
|
|
fbce712714 | ||
|
|
f13685fcd7 | ||
|
|
89b1ef2354 | ||
|
|
951d5b7e1b | ||
|
|
263753254a | ||
|
|
2896e393d3 | ||
|
|
9fa1c44149 | ||
|
|
e217d022d6 | ||
|
|
ca150287c9 | ||
|
|
5825a85ccc | ||
|
|
fecc584145 | ||
|
|
09bbcd7001 | ||
|
|
c2195d7da6 | ||
|
|
d8c5c7d4df | ||
|
|
2716207d72 | ||
|
|
a5cf4193e4 | ||
|
|
a1a9ff63d2 | ||
|
|
676c693885 | ||
|
|
e14c647b7d | ||
|
|
481d74c249 | ||
|
|
6f21a717cd | ||
|
|
75b55776f2 | ||
|
|
fa04ece8ea | ||
|
|
acfffbb0f2 | ||
|
|
3b2be46119 | ||
|
|
671c175d71 | ||
|
|
09e69df5a7 | ||
|
|
f150802bed | ||
|
|
5960d2826e | ||
|
|
78abda601a | ||
|
|
2491caecdc | ||
|
|
5e45fe299a | ||
|
|
f6ee6349a0 | ||
|
|
370b063fe4 | ||
|
|
3506497412 | ||
|
|
247c8d74af | ||
|
|
f6160d43a0 | ||
|
|
c23442249a | ||
|
|
3981b9108a | ||
|
|
60f78d5783 | ||
|
|
ceb082efca | ||
|
|
27339ec78d | ||
|
|
eb28bf0f2a | ||
|
|
4390b72d2a | ||
|
|
3b469d0afe | ||
|
|
0c31f12372 | ||
|
|
77b454d8ca | ||
|
|
627c0144a4 | ||
|
|
11df329e0f | ||
|
|
9a13b977dc | ||
|
|
dd36735a1a | ||
|
|
c1fb3db568 | ||
|
|
149976323c | ||
|
|
14bd0f55d3 | ||
|
|
3f8acb7e4a | ||
|
|
1a926630b8 | ||
|
|
c5aebc1450 | ||
|
|
60305cde74 | ||
|
|
3f719ac174 | ||
|
|
594d4975cb | ||
|
|
f237fad1e8 | ||
|
|
bc1cc109b5 | ||
|
|
424f8ae1ff | ||
|
|
f0338ea5ce | ||
|
|
8ed66208e6 | ||
|
|
f6a1b62590 | ||
|
|
34c7f756e1 | ||
|
|
b366d40d67 | ||
|
|
05eec1cc81 | ||
|
|
7e76369d2a | ||
|
|
a5ac4297bc | ||
|
|
4823bd53bc | ||
|
|
32e434fb76 | ||
|
|
bc7bd8e2c0 | ||
|
|
34fbdc30fe | ||
|
|
27b89f4c92 | ||
|
|
70653b16bd | ||
|
|
e6f1d6bcf0 | ||
|
|
44f92063c3 | ||
|
|
17530c0f72 | ||
|
|
0ef69fbf75 | ||
|
|
f39c9a5389 | ||
|
|
92d7577f22 | ||
|
|
874aea6920 | ||
|
|
19caa7bbb4 | ||
|
|
dff0387ae2 | ||
|
|
469cc1720d | ||
|
|
99cdae7655 | ||
|
|
abc226f111 | ||
|
|
16e6a1fc44 | ||
|
|
a7a6d64931 | ||
|
|
03c4e3b9a5 | ||
|
|
297acb039e | ||
|
|
aaf7c83301 | ||
|
|
7147f5ef05 | ||
|
|
2ae0d559bf | ||
|
|
55be451f11 | ||
|
|
28a369deb4 | ||
|
|
0199bcd44d | ||
|
|
6b886acaca | ||
|
|
5f30643406 | ||
|
|
a7846c4ee9 | ||
|
|
0c4a2199f5 | ||
|
|
c18c4e7584 | ||
|
|
1e586c0b23 | ||
|
|
6e24da722b | ||
|
|
d49416fc58 | ||
|
|
b4021acd14 | ||
|
|
61b54266b3 | ||
|
|
319f22f26e | ||
|
|
ea650bc767 | ||
|
|
3b767c798c | ||
|
|
e7895d2e01 | ||
|
|
f35097ed46 | ||
|
|
10c29dd585 | ||
|
|
696f461cab | ||
|
|
1441508c00 | ||
|
|
6b4bb7ff66 | ||
|
|
9e79b53465 | ||
|
|
8ce7c62299 | ||
|
|
15e6e97fd9 | ||
|
|
984af0a72f | ||
|
|
2df1f1b32b | ||
|
|
45fac6fe5e | ||
|
|
b65a2f8f3d | ||
|
|
f3658a4cab | ||
|
|
182016d932 | ||
|
|
36839a1c30 | ||
|
|
cac43ed384 | ||
|
|
8fd8c082ee |
114
.env.example
114
.env.example
@@ -69,6 +69,55 @@ AUTH_TOKEN=your-secure-token-here
|
|||||||
# Default: 0 (disabled)
|
# Default: 0 (disabled)
|
||||||
# TRUST_PROXY=0
|
# TRUST_PROXY=0
|
||||||
|
|
||||||
|
# =========================
|
||||||
|
# SECURITY CONFIGURATION
|
||||||
|
# =========================
|
||||||
|
|
||||||
|
# Rate Limiting Configuration
|
||||||
|
# Protects authentication endpoint from brute force attacks
|
||||||
|
# Window: Time period in milliseconds (default: 900000 = 15 minutes)
|
||||||
|
# Max: Maximum authentication attempts per IP within window (default: 20)
|
||||||
|
# AUTH_RATE_LIMIT_WINDOW=900000
|
||||||
|
# AUTH_RATE_LIMIT_MAX=20
|
||||||
|
|
||||||
|
# SSRF Protection Mode
|
||||||
|
# Prevents webhooks from accessing internal networks and cloud metadata
|
||||||
|
#
|
||||||
|
# Modes:
|
||||||
|
# - strict (default): Block localhost + private IPs + cloud metadata
|
||||||
|
# Use for: Production deployments, cloud environments
|
||||||
|
# Security: Maximum
|
||||||
|
#
|
||||||
|
# - moderate: Allow localhost, block private IPs + cloud metadata
|
||||||
|
# Use for: Local development with local n8n instance
|
||||||
|
# Security: Good balance
|
||||||
|
# Example: n8n running on http://localhost:5678 or http://host.docker.internal:5678
|
||||||
|
#
|
||||||
|
# - permissive: Allow localhost + private IPs, block cloud metadata
|
||||||
|
# Use for: Internal network testing, private cloud (NOT for production)
|
||||||
|
# Security: Minimal - use with caution
|
||||||
|
#
|
||||||
|
# Default: strict
|
||||||
|
# WEBHOOK_SECURITY_MODE=strict
|
||||||
|
#
|
||||||
|
# For local development with local n8n:
|
||||||
|
# WEBHOOK_SECURITY_MODE=moderate
|
||||||
|
|
||||||
|
# =========================
|
||||||
|
# MULTI-TENANT CONFIGURATION
|
||||||
|
# =========================
|
||||||
|
# Enable multi-tenant mode for dynamic instance support
|
||||||
|
# When enabled, n8n API tools will be available for all sessions,
|
||||||
|
# and instance configuration will be determined from HTTP headers
|
||||||
|
# Default: false (single-tenant mode using environment variables)
|
||||||
|
ENABLE_MULTI_TENANT=false
|
||||||
|
|
||||||
|
# Session isolation strategy for multi-tenant mode
|
||||||
|
# - "instance": Create separate sessions per instance ID (recommended)
|
||||||
|
# - "shared": Share sessions but switch contexts (advanced)
|
||||||
|
# Default: instance
|
||||||
|
# MULTI_TENANT_SESSION_STRATEGY=instance
|
||||||
|
|
||||||
# =========================
|
# =========================
|
||||||
# N8N API CONFIGURATION
|
# N8N API CONFIGURATION
|
||||||
# =========================
|
# =========================
|
||||||
@@ -86,4 +135,67 @@ AUTH_TOKEN=your-secure-token-here
|
|||||||
# N8N_API_TIMEOUT=30000
|
# N8N_API_TIMEOUT=30000
|
||||||
|
|
||||||
# Maximum number of API request retries (default: 3)
|
# Maximum number of API request retries (default: 3)
|
||||||
# N8N_API_MAX_RETRIES=3
|
# N8N_API_MAX_RETRIES=3
|
||||||
|
|
||||||
|
# =========================
|
||||||
|
# CACHE CONFIGURATION
|
||||||
|
# =========================
|
||||||
|
# Optional: Configure instance cache settings for flexible instance support
|
||||||
|
|
||||||
|
# Maximum number of cached instances (default: 100, min: 1, max: 10000)
|
||||||
|
# INSTANCE_CACHE_MAX=100
|
||||||
|
|
||||||
|
# Cache TTL in minutes (default: 30, min: 1, max: 1440/24 hours)
|
||||||
|
# INSTANCE_CACHE_TTL_MINUTES=30
|
||||||
|
|
||||||
|
# =========================
|
||||||
|
# OPENAI API CONFIGURATION
|
||||||
|
# =========================
|
||||||
|
# Optional: Enable AI-powered template metadata generation
|
||||||
|
# Provides structured metadata for improved template discovery
|
||||||
|
|
||||||
|
# OpenAI API Key (get from https://platform.openai.com/api-keys)
|
||||||
|
# OPENAI_API_KEY=
|
||||||
|
|
||||||
|
# OpenAI Model for metadata generation (default: gpt-4o-mini)
|
||||||
|
# OPENAI_MODEL=gpt-4o-mini
|
||||||
|
|
||||||
|
# Batch size for metadata generation (default: 100)
|
||||||
|
# Templates are processed in batches using OpenAI's Batch API for 50% cost savings
|
||||||
|
# OPENAI_BATCH_SIZE=100
|
||||||
|
|
||||||
|
# Enable metadata generation during template fetch (default: false)
|
||||||
|
# Set to true to automatically generate metadata when running fetch:templates
|
||||||
|
# METADATA_GENERATION_ENABLED=false
|
||||||
|
|
||||||
|
# ========================================
|
||||||
|
# INTEGRATION TESTING CONFIGURATION
|
||||||
|
# ========================================
|
||||||
|
# Configuration for integration tests that call real n8n instance API
|
||||||
|
|
||||||
|
# n8n API Configuration for Integration Tests
|
||||||
|
# For local development: Use your local n8n instance
|
||||||
|
# For CI: These will be provided by GitHub secrets
|
||||||
|
# N8N_API_URL=http://localhost:5678
|
||||||
|
# N8N_API_KEY=
|
||||||
|
|
||||||
|
# Pre-activated Webhook Workflows for Testing
|
||||||
|
# These workflows must be created manually in n8n and activated
|
||||||
|
# because n8n API doesn't support workflow activation.
|
||||||
|
#
|
||||||
|
# Setup Instructions:
|
||||||
|
# 1. Create 4 workflows in n8n UI (one for each HTTP method)
|
||||||
|
# 2. Each workflow should have a single Webhook node
|
||||||
|
# 3. Configure webhook paths: mcp-test-get, mcp-test-post, mcp-test-put, mcp-test-delete
|
||||||
|
# 4. ACTIVATE each workflow in n8n UI
|
||||||
|
# 5. Copy the workflow IDs here
|
||||||
|
#
|
||||||
|
# N8N_TEST_WEBHOOK_GET_ID= # Workflow ID for GET method webhook
|
||||||
|
# N8N_TEST_WEBHOOK_POST_ID= # Workflow ID for POST method webhook
|
||||||
|
# N8N_TEST_WEBHOOK_PUT_ID= # Workflow ID for PUT method webhook
|
||||||
|
# N8N_TEST_WEBHOOK_DELETE_ID= # Workflow ID for DELETE method webhook
|
||||||
|
|
||||||
|
# Test Configuration
|
||||||
|
N8N_TEST_CLEANUP_ENABLED=true # Enable automatic cleanup of test workflows
|
||||||
|
N8N_TEST_TAG=mcp-integration-test # Tag applied to all test workflows
|
||||||
|
N8N_TEST_NAME_PREFIX=[MCP-TEST] # Name prefix for test workflows
|
||||||
87
.github/workflows/release.yml
vendored
87
.github/workflows/release.yml
vendored
@@ -79,6 +79,38 @@ jobs:
|
|||||||
echo "ℹ️ No version change detected"
|
echo "ℹ️ No version change detected"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
- name: Validate version against npm registry
|
||||||
|
if: steps.check.outputs.changed == 'true'
|
||||||
|
run: |
|
||||||
|
CURRENT_VERSION="${{ steps.check.outputs.version }}"
|
||||||
|
|
||||||
|
# Get latest version from npm (handle package not found)
|
||||||
|
NPM_VERSION=$(npm view n8n-mcp version 2>/dev/null || echo "0.0.0")
|
||||||
|
|
||||||
|
echo "Current version: $CURRENT_VERSION"
|
||||||
|
echo "NPM registry version: $NPM_VERSION"
|
||||||
|
|
||||||
|
# Check if version already exists in npm
|
||||||
|
if [ "$CURRENT_VERSION" = "$NPM_VERSION" ]; then
|
||||||
|
echo "❌ Error: Version $CURRENT_VERSION already published to npm"
|
||||||
|
echo "Please bump the version in package.json before releasing"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Simple semver comparison (assumes format: major.minor.patch)
|
||||||
|
# Compare if current version is greater than npm version
|
||||||
|
if [ "$NPM_VERSION" != "0.0.0" ]; then
|
||||||
|
# Sort versions and check if current is not the highest
|
||||||
|
HIGHEST=$(printf '%s\n%s' "$NPM_VERSION" "$CURRENT_VERSION" | sort -V | tail -n1)
|
||||||
|
if [ "$HIGHEST" != "$CURRENT_VERSION" ]; then
|
||||||
|
echo "❌ Error: Version $CURRENT_VERSION is not greater than npm version $NPM_VERSION"
|
||||||
|
echo "Please use a higher version number"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ Version $CURRENT_VERSION is valid (higher than npm version $NPM_VERSION)"
|
||||||
|
|
||||||
extract-changelog:
|
extract-changelog:
|
||||||
name: Extract Changelog
|
name: Extract Changelog
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -206,8 +238,8 @@ jobs:
|
|||||||
echo "id=$RELEASE_ID" >> $GITHUB_OUTPUT
|
echo "id=$RELEASE_ID" >> $GITHUB_OUTPUT
|
||||||
echo "upload_url=https://uploads.github.com/repos/${{ github.repository }}/releases/$RELEASE_ID/assets{?name,label}" >> $GITHUB_OUTPUT
|
echo "upload_url=https://uploads.github.com/repos/${{ github.repository }}/releases/$RELEASE_ID/assets{?name,label}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
build-and-test:
|
build-and-verify:
|
||||||
name: Build and Test
|
name: Build and Verify
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: detect-version-change
|
needs: detect-version-change
|
||||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||||
@@ -226,22 +258,28 @@ jobs:
|
|||||||
|
|
||||||
- name: Build project
|
- name: Build project
|
||||||
run: npm run build
|
run: npm run build
|
||||||
|
|
||||||
- name: Rebuild database
|
# Database is already built and committed during development
|
||||||
run: npm run rebuild
|
# Rebuilding here causes segfault due to memory pressure (exit code 139)
|
||||||
|
- name: Verify database exists
|
||||||
- name: Run tests
|
run: |
|
||||||
run: npm test
|
if [ ! -f "data/nodes.db" ]; then
|
||||||
env:
|
echo "❌ Error: data/nodes.db not found"
|
||||||
CI: true
|
echo "Please run 'npm run rebuild' locally and commit the database"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "✅ Database exists ($(du -h data/nodes.db | cut -f1))"
|
||||||
|
|
||||||
|
# Skip tests - they already passed in PR before merge
|
||||||
|
# Running them again on the same commit adds no safety, only time (~6-7 min)
|
||||||
|
|
||||||
- name: Run type checking
|
- name: Run type checking
|
||||||
run: npm run typecheck
|
run: npm run typecheck
|
||||||
|
|
||||||
publish-npm:
|
publish-npm:
|
||||||
name: Publish to NPM
|
name: Publish to NPM
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: [detect-version-change, build-and-test, create-release]
|
needs: [detect-version-change, build-and-verify, create-release]
|
||||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
@@ -259,10 +297,16 @@ jobs:
|
|||||||
|
|
||||||
- name: Build project
|
- name: Build project
|
||||||
run: npm run build
|
run: npm run build
|
||||||
|
|
||||||
- name: Rebuild database
|
# Database is already built and committed during development
|
||||||
run: npm run rebuild
|
- name: Verify database exists
|
||||||
|
run: |
|
||||||
|
if [ ! -f "data/nodes.db" ]; then
|
||||||
|
echo "❌ Error: data/nodes.db not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "✅ Database exists ($(du -h data/nodes.db | cut -f1))"
|
||||||
|
|
||||||
- name: Sync runtime version
|
- name: Sync runtime version
|
||||||
run: npm run sync:runtime-version
|
run: npm run sync:runtime-version
|
||||||
|
|
||||||
@@ -290,6 +334,15 @@ jobs:
|
|||||||
const pkg = require('./package.json');
|
const pkg = require('./package.json');
|
||||||
pkg.name = 'n8n-mcp';
|
pkg.name = 'n8n-mcp';
|
||||||
pkg.description = 'Integration between n8n workflow automation and Model Context Protocol (MCP)';
|
pkg.description = 'Integration between n8n workflow automation and Model Context Protocol (MCP)';
|
||||||
|
pkg.main = 'dist/index.js';
|
||||||
|
pkg.types = 'dist/index.d.ts';
|
||||||
|
pkg.exports = {
|
||||||
|
'.': {
|
||||||
|
types: './dist/index.d.ts',
|
||||||
|
require: './dist/index.js',
|
||||||
|
import: './dist/index.js'
|
||||||
|
}
|
||||||
|
};
|
||||||
pkg.bin = { 'n8n-mcp': './dist/mcp/index.js' };
|
pkg.bin = { 'n8n-mcp': './dist/mcp/index.js' };
|
||||||
pkg.repository = { type: 'git', url: 'git+https://github.com/czlonkowski/n8n-mcp.git' };
|
pkg.repository = { type: 'git', url: 'git+https://github.com/czlonkowski/n8n-mcp.git' };
|
||||||
pkg.keywords = ['n8n', 'mcp', 'model-context-protocol', 'ai', 'workflow', 'automation'];
|
pkg.keywords = ['n8n', 'mcp', 'model-context-protocol', 'ai', 'workflow', 'automation'];
|
||||||
@@ -324,7 +377,7 @@ jobs:
|
|||||||
build-docker:
|
build-docker:
|
||||||
name: Build and Push Docker Images
|
name: Build and Push Docker Images
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: [detect-version-change, build-and-test]
|
needs: [detect-version-change, build-and-verify]
|
||||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
|||||||
6
.github/workflows/test.yml
vendored
6
.github/workflows/test.yml
vendored
@@ -72,6 +72,12 @@ jobs:
|
|||||||
run: npm run test:integration -- --reporter=default --reporter=junit
|
run: npm run test:integration -- --reporter=default --reporter=junit
|
||||||
env:
|
env:
|
||||||
CI: true
|
CI: true
|
||||||
|
N8N_API_URL: ${{ secrets.N8N_API_URL }}
|
||||||
|
N8N_API_KEY: ${{ secrets.N8N_API_KEY }}
|
||||||
|
N8N_TEST_WEBHOOK_GET_URL: ${{ secrets.N8N_TEST_WEBHOOK_GET_URL }}
|
||||||
|
N8N_TEST_WEBHOOK_POST_URL: ${{ secrets.N8N_TEST_WEBHOOK_POST_URL }}
|
||||||
|
N8N_TEST_WEBHOOK_PUT_URL: ${{ secrets.N8N_TEST_WEBHOOK_PUT_URL }}
|
||||||
|
N8N_TEST_WEBHOOK_DELETE_URL: ${{ secrets.N8N_TEST_WEBHOOK_DELETE_URL }}
|
||||||
|
|
||||||
# Generate test summary
|
# Generate test summary
|
||||||
- name: Generate test summary
|
- name: Generate test summary
|
||||||
|
|||||||
14
.gitignore
vendored
14
.gitignore
vendored
@@ -89,11 +89,19 @@ docker-compose.override.yml
|
|||||||
temp/
|
temp/
|
||||||
tmp/
|
tmp/
|
||||||
|
|
||||||
|
# Batch processing error files (may contain API tokens from templates)
|
||||||
|
docs/batch_*.jsonl
|
||||||
|
**/batch_*_error.jsonl
|
||||||
|
|
||||||
|
# Local documentation and analysis files
|
||||||
|
docs/local/
|
||||||
|
|
||||||
# Database files
|
# Database files
|
||||||
# Database files - nodes.db is now tracked directly
|
# Database files - nodes.db is now tracked directly
|
||||||
# data/*.db
|
# data/*.db
|
||||||
data/*.db-journal
|
data/*.db-journal
|
||||||
data/*.db.bak
|
data/*.db.bak
|
||||||
|
data/*.db.backup
|
||||||
!data/.gitkeep
|
!data/.gitkeep
|
||||||
!data/nodes.db
|
!data/nodes.db
|
||||||
|
|
||||||
@@ -126,3 +134,9 @@ n8n-mcp-wrapper.sh
|
|||||||
|
|
||||||
# Package tarballs
|
# Package tarballs
|
||||||
*.tgz
|
*.tgz
|
||||||
|
|
||||||
|
# MCP configuration files
|
||||||
|
.mcp.json
|
||||||
|
|
||||||
|
# Telemetry configuration (user-specific)
|
||||||
|
~/.n8n-mcp/
|
||||||
|
|||||||
3072
CHANGELOG.md
Normal file
3072
CHANGELOG.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -191,4 +191,5 @@ NEVER proactively create documentation files (*.md) or README files. Only create
|
|||||||
- When you make changes to MCP server, you need to ask the user to reload it before you test
|
- When you make changes to MCP server, you need to ask the user to reload it before you test
|
||||||
- When the user asks to review issues, you should use GH CLI to get the issue and all the comments
|
- When the user asks to review issues, you should use GH CLI to get the issue and all the comments
|
||||||
- When the task can be divided into separated subtasks, you should spawn separate sub-agents to handle them in paralel
|
- When the task can be divided into separated subtasks, you should spawn separate sub-agents to handle them in paralel
|
||||||
- Use the best sub-agent for the task as per their descriptions
|
- Use the best sub-agent for the task as per their descriptions
|
||||||
|
- Do not use hyperbolic or dramatic language in comments and documentation
|
||||||
@@ -9,11 +9,13 @@ WORKDIR /app
|
|||||||
COPY tsconfig*.json ./
|
COPY tsconfig*.json ./
|
||||||
|
|
||||||
# Create minimal package.json and install ONLY build dependencies
|
# Create minimal package.json and install ONLY build dependencies
|
||||||
|
# Note: openai and zod are needed for TypeScript compilation of template metadata modules
|
||||||
RUN --mount=type=cache,target=/root/.npm \
|
RUN --mount=type=cache,target=/root/.npm \
|
||||||
echo '{}' > package.json && \
|
echo '{}' > package.json && \
|
||||||
npm install --no-save typescript@^5.8.3 @types/node@^22.15.30 @types/express@^5.0.3 \
|
npm install --no-save typescript@^5.8.3 @types/node@^22.15.30 @types/express@^5.0.3 \
|
||||||
@modelcontextprotocol/sdk@^1.12.1 dotenv@^16.5.0 express@^5.1.0 axios@^1.10.0 \
|
@modelcontextprotocol/sdk@^1.12.1 dotenv@^16.5.0 express@^5.1.0 axios@^1.10.0 \
|
||||||
n8n-workflow@^1.96.0 uuid@^11.0.5 @types/uuid@^10.0.0
|
n8n-workflow@^1.96.0 uuid@^11.0.5 @types/uuid@^10.0.0 \
|
||||||
|
openai@^4.77.0 zod@^3.24.1 lru-cache@^11.2.1 @supabase/supabase-js@^2.57.4
|
||||||
|
|
||||||
# Copy source and build
|
# Copy source and build
|
||||||
COPY src ./src
|
COPY src ./src
|
||||||
@@ -72,6 +74,10 @@ USER nodejs
|
|||||||
# Set Docker environment flag
|
# Set Docker environment flag
|
||||||
ENV IS_DOCKER=true
|
ENV IS_DOCKER=true
|
||||||
|
|
||||||
|
# Telemetry: Anonymous usage statistics are ENABLED by default
|
||||||
|
# To opt-out, uncomment the following line:
|
||||||
|
# ENV N8N_MCP_TELEMETRY_DISABLED=true
|
||||||
|
|
||||||
# Expose HTTP port
|
# Expose HTTP port
|
||||||
EXPOSE 3000
|
EXPOSE 3000
|
||||||
|
|
||||||
|
|||||||
3491
IMPLEMENTATION_GUIDE.md
Normal file
3491
IMPLEMENTATION_GUIDE.md
Normal file
File diff suppressed because it is too large
Load Diff
336
MEMORY_TEMPLATE_UPDATE.md
Normal file
336
MEMORY_TEMPLATE_UPDATE.md
Normal file
@@ -0,0 +1,336 @@
|
|||||||
|
# Template Update Process - Quick Reference
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The n8n-mcp project maintains a database of workflow templates from n8n.io. This guide explains how to update the template database incrementally without rebuilding from scratch.
|
||||||
|
|
||||||
|
## Current Database State
|
||||||
|
|
||||||
|
As of the last update:
|
||||||
|
- **2,598 templates** in database
|
||||||
|
- Templates from the last 12 months
|
||||||
|
- Latest template: September 12, 2025
|
||||||
|
|
||||||
|
## Quick Commands
|
||||||
|
|
||||||
|
### Incremental Update (Recommended)
|
||||||
|
```bash
|
||||||
|
# Build if needed
|
||||||
|
npm run build
|
||||||
|
|
||||||
|
# Fetch only NEW templates (5-10 minutes)
|
||||||
|
npm run fetch:templates:update
|
||||||
|
```
|
||||||
|
|
||||||
|
### Full Rebuild (Rare)
|
||||||
|
```bash
|
||||||
|
# Rebuild entire database from scratch (30-40 minutes)
|
||||||
|
npm run fetch:templates
|
||||||
|
```
|
||||||
|
|
||||||
|
## How It Works
|
||||||
|
|
||||||
|
### Incremental Update Mode (`--update`)
|
||||||
|
|
||||||
|
The incremental update is **smart and efficient**:
|
||||||
|
|
||||||
|
1. **Loads existing template IDs** from database (~2,598 templates)
|
||||||
|
2. **Fetches template list** from n8n.io API (all templates from last 12 months)
|
||||||
|
3. **Filters** to find only NEW templates not in database
|
||||||
|
4. **Fetches details** for new templates only (saves time and API calls)
|
||||||
|
5. **Saves** new templates to database (existing ones untouched)
|
||||||
|
6. **Rebuilds FTS5** search index for new templates
|
||||||
|
|
||||||
|
### Key Benefits
|
||||||
|
|
||||||
|
✅ **Non-destructive**: All existing templates preserved
|
||||||
|
✅ **Fast**: Only fetches new templates (5-10 min vs 30-40 min)
|
||||||
|
✅ **API friendly**: Reduces load on n8n.io API
|
||||||
|
✅ **Safe**: Preserves AI-generated metadata
|
||||||
|
✅ **Smart**: Automatically skips duplicates
|
||||||
|
|
||||||
|
## Performance Comparison
|
||||||
|
|
||||||
|
| Mode | Templates Fetched | Time | Use Case |
|
||||||
|
|------|------------------|------|----------|
|
||||||
|
| **Update** | Only new (~50-200) | 5-10 min | Regular updates |
|
||||||
|
| **Rebuild** | All (~8000+) | 30-40 min | Initial setup or corruption |
|
||||||
|
|
||||||
|
## Command Options
|
||||||
|
|
||||||
|
### Basic Update
|
||||||
|
```bash
|
||||||
|
npm run fetch:templates:update
|
||||||
|
```
|
||||||
|
|
||||||
|
### Full Rebuild
|
||||||
|
```bash
|
||||||
|
npm run fetch:templates
|
||||||
|
```
|
||||||
|
|
||||||
|
### With Metadata Generation
|
||||||
|
```bash
|
||||||
|
# Update templates and generate AI metadata
|
||||||
|
npm run fetch:templates -- --update --generate-metadata
|
||||||
|
|
||||||
|
# Or just generate metadata for existing templates
|
||||||
|
npm run fetch:templates -- --metadata-only
|
||||||
|
```
|
||||||
|
|
||||||
|
### Help
|
||||||
|
```bash
|
||||||
|
npm run fetch:templates -- --help
|
||||||
|
```
|
||||||
|
|
||||||
|
## Update Frequency
|
||||||
|
|
||||||
|
Recommended update schedule:
|
||||||
|
- **Weekly**: Run incremental update to get latest templates
|
||||||
|
- **Monthly**: Review database statistics
|
||||||
|
- **As needed**: Rebuild only if database corruption suspected
|
||||||
|
|
||||||
|
## Template Filtering
|
||||||
|
|
||||||
|
The fetcher automatically filters templates:
|
||||||
|
- ✅ **Includes**: Templates from last 12 months
|
||||||
|
- ✅ **Includes**: Templates with >10 views
|
||||||
|
- ❌ **Excludes**: Templates with ≤10 views (too niche)
|
||||||
|
- ❌ **Excludes**: Templates older than 12 months
|
||||||
|
|
||||||
|
## Workflow
|
||||||
|
|
||||||
|
### Regular Update Workflow
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Check current state
|
||||||
|
sqlite3 data/nodes.db "SELECT COUNT(*) FROM templates"
|
||||||
|
|
||||||
|
# 2. Build project (if code changed)
|
||||||
|
npm run build
|
||||||
|
|
||||||
|
# 3. Run incremental update
|
||||||
|
npm run fetch:templates:update
|
||||||
|
|
||||||
|
# 4. Verify new templates added
|
||||||
|
sqlite3 data/nodes.db "SELECT COUNT(*) FROM templates"
|
||||||
|
```
|
||||||
|
|
||||||
|
### After n8n Dependency Update
|
||||||
|
|
||||||
|
When you update n8n dependencies, templates remain compatible:
|
||||||
|
```bash
|
||||||
|
# 1. Update n8n (from MEMORY_N8N_UPDATE.md)
|
||||||
|
npm run update:all
|
||||||
|
|
||||||
|
# 2. Fetch new templates incrementally
|
||||||
|
npm run fetch:templates:update
|
||||||
|
|
||||||
|
# 3. Check how many templates were added
|
||||||
|
sqlite3 data/nodes.db "SELECT COUNT(*) FROM templates"
|
||||||
|
|
||||||
|
# 4. Generate AI metadata for new templates (optional, requires OPENAI_API_KEY)
|
||||||
|
npm run fetch:templates -- --metadata-only
|
||||||
|
|
||||||
|
# 5. IMPORTANT: Sanitize templates before pushing database
|
||||||
|
npm run build
|
||||||
|
npm run sanitize:templates
|
||||||
|
```
|
||||||
|
|
||||||
|
Templates are independent of n8n version - they're just workflow JSON data.
|
||||||
|
|
||||||
|
**CRITICAL**: Always run `npm run sanitize:templates` before pushing the database to remove API tokens from template workflows.
|
||||||
|
|
||||||
|
**Note**: New templates fetched via `--update` mode will NOT have AI-generated metadata by default. You need to run `--metadata-only` separately to generate metadata for templates that don't have it yet.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### No New Templates Found
|
||||||
|
|
||||||
|
This is normal! It means:
|
||||||
|
- All recent templates are already in your database
|
||||||
|
- n8n.io hasn't published many new templates recently
|
||||||
|
- Your database is up to date
|
||||||
|
|
||||||
|
```bash
|
||||||
|
📊 Update mode: 0 new templates to fetch (skipping 2598 existing)
|
||||||
|
✅ All templates already have metadata
|
||||||
|
```
|
||||||
|
|
||||||
|
### API Rate Limiting
|
||||||
|
|
||||||
|
If you hit rate limits:
|
||||||
|
- The fetcher includes built-in delays (150ms between requests)
|
||||||
|
- Wait a few minutes and try again
|
||||||
|
- Use `--update` mode instead of full rebuild
|
||||||
|
|
||||||
|
### Database Corruption
|
||||||
|
|
||||||
|
If you suspect corruption:
|
||||||
|
```bash
|
||||||
|
# Full rebuild from scratch
|
||||||
|
npm run fetch:templates
|
||||||
|
|
||||||
|
# This will:
|
||||||
|
# - Drop and recreate templates table
|
||||||
|
# - Fetch all templates fresh
|
||||||
|
# - Rebuild search indexes
|
||||||
|
```
|
||||||
|
|
||||||
|
## Database Schema
|
||||||
|
|
||||||
|
Templates are stored with:
|
||||||
|
- Basic info (id, name, description, author, views, created_at)
|
||||||
|
- Node types used (JSON array)
|
||||||
|
- Complete workflow (gzip compressed, base64 encoded)
|
||||||
|
- AI-generated metadata (optional, requires OpenAI API key)
|
||||||
|
- FTS5 search index for fast text search
|
||||||
|
|
||||||
|
## Metadata Generation
|
||||||
|
|
||||||
|
Generate AI metadata for templates:
|
||||||
|
```bash
|
||||||
|
# Requires OPENAI_API_KEY in .env
|
||||||
|
export OPENAI_API_KEY="sk-..."
|
||||||
|
|
||||||
|
# Generate for templates without metadata (recommended after incremental update)
|
||||||
|
npm run fetch:templates -- --metadata-only
|
||||||
|
|
||||||
|
# Generate during template fetch (slower, but automatic)
|
||||||
|
npm run fetch:templates:update -- --generate-metadata
|
||||||
|
```
|
||||||
|
|
||||||
|
**Important**: Incremental updates (`--update`) do NOT generate metadata by default. After running `npm run fetch:templates:update`, you'll have new templates without metadata. Run `--metadata-only` separately to generate metadata for them.
|
||||||
|
|
||||||
|
### Check Metadata Coverage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# See how many templates have metadata
|
||||||
|
sqlite3 data/nodes.db "SELECT
|
||||||
|
COUNT(*) as total,
|
||||||
|
SUM(CASE WHEN metadata_json IS NOT NULL THEN 1 ELSE 0 END) as with_metadata,
|
||||||
|
SUM(CASE WHEN metadata_json IS NULL THEN 1 ELSE 0 END) as without_metadata
|
||||||
|
FROM templates"
|
||||||
|
|
||||||
|
# See recent templates without metadata
|
||||||
|
sqlite3 data/nodes.db "SELECT id, name, created_at
|
||||||
|
FROM templates
|
||||||
|
WHERE metadata_json IS NULL
|
||||||
|
ORDER BY created_at DESC
|
||||||
|
LIMIT 10"
|
||||||
|
```
|
||||||
|
|
||||||
|
Metadata includes:
|
||||||
|
- Categories
|
||||||
|
- Complexity level (simple/medium/complex)
|
||||||
|
- Use cases
|
||||||
|
- Estimated setup time
|
||||||
|
- Required services
|
||||||
|
- Key features
|
||||||
|
- Target audience
|
||||||
|
|
||||||
|
### Metadata Generation Troubleshooting
|
||||||
|
|
||||||
|
If metadata generation fails:
|
||||||
|
|
||||||
|
1. **Check error file**: Errors are saved to `temp/batch/batch_*_error.jsonl`
|
||||||
|
2. **Common issues**:
|
||||||
|
- `"Unsupported value: 'temperature'"` - Model doesn't support custom temperature
|
||||||
|
- `"Invalid request"` - Check OPENAI_API_KEY is valid
|
||||||
|
- Model availability issues
|
||||||
|
3. **Model**: Uses `gpt-5-mini-2025-08-07` by default
|
||||||
|
4. **Token limit**: 3000 tokens per request for detailed metadata
|
||||||
|
|
||||||
|
The system will automatically:
|
||||||
|
- Process error files and assign default metadata to failed templates
|
||||||
|
- Save error details for debugging
|
||||||
|
- Continue processing even if some templates fail
|
||||||
|
|
||||||
|
**Example error handling**:
|
||||||
|
```bash
|
||||||
|
# If you see: "No output file available for batch job"
|
||||||
|
# Check: temp/batch/batch_*_error.jsonl for error details
|
||||||
|
# The system now automatically processes errors and generates default metadata
|
||||||
|
```
|
||||||
|
|
||||||
|
## Environment Variables
|
||||||
|
|
||||||
|
Optional configuration:
|
||||||
|
```bash
|
||||||
|
# OpenAI for metadata generation
|
||||||
|
OPENAI_API_KEY=sk-...
|
||||||
|
OPENAI_MODEL=gpt-4o-mini # Default model
|
||||||
|
OPENAI_BATCH_SIZE=50 # Batch size for metadata generation
|
||||||
|
|
||||||
|
# Metadata generation limits
|
||||||
|
METADATA_LIMIT=100 # Max templates to process (0 = all)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Statistics
|
||||||
|
|
||||||
|
After update, check stats:
|
||||||
|
```bash
|
||||||
|
# Template count
|
||||||
|
sqlite3 data/nodes.db "SELECT COUNT(*) FROM templates"
|
||||||
|
|
||||||
|
# Most recent template
|
||||||
|
sqlite3 data/nodes.db "SELECT MAX(created_at) FROM templates"
|
||||||
|
|
||||||
|
# Templates by view count
|
||||||
|
sqlite3 data/nodes.db "SELECT COUNT(*),
|
||||||
|
CASE
|
||||||
|
WHEN views < 50 THEN '<50'
|
||||||
|
WHEN views < 100 THEN '50-100'
|
||||||
|
WHEN views < 500 THEN '100-500'
|
||||||
|
ELSE '500+'
|
||||||
|
END as view_range
|
||||||
|
FROM templates GROUP BY view_range"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Integration with n8n-mcp
|
||||||
|
|
||||||
|
Templates are available through MCP tools:
|
||||||
|
- `list_templates`: List all templates
|
||||||
|
- `get_template`: Get specific template with workflow
|
||||||
|
- `search_templates`: Search by keyword
|
||||||
|
- `list_node_templates`: Templates using specific nodes
|
||||||
|
- `get_templates_for_task`: Templates for common tasks
|
||||||
|
- `search_templates_by_metadata`: Advanced filtering
|
||||||
|
|
||||||
|
See `npm run test:templates` for usage examples.
|
||||||
|
|
||||||
|
## Time Estimates
|
||||||
|
|
||||||
|
Typical incremental update:
|
||||||
|
- Loading existing IDs: 1-2 seconds
|
||||||
|
- Fetching template list: 2-3 minutes
|
||||||
|
- Filtering new templates: instant
|
||||||
|
- Fetching details for 100 new templates: ~15 seconds (0.15s each)
|
||||||
|
- Saving and indexing: 5-10 seconds
|
||||||
|
- **Total: 3-5 minutes**
|
||||||
|
|
||||||
|
Full rebuild:
|
||||||
|
- Fetching 8000+ templates: 25-30 minutes
|
||||||
|
- Saving and indexing: 5-10 minutes
|
||||||
|
- **Total: 30-40 minutes**
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
1. **Use incremental updates** for regular maintenance
|
||||||
|
2. **Rebuild only when necessary** (corruption, major changes)
|
||||||
|
3. **Generate metadata incrementally** to avoid OpenAI costs
|
||||||
|
4. **Monitor template count** to verify updates working
|
||||||
|
5. **Keep database backed up** before major operations
|
||||||
|
|
||||||
|
## Next Steps
|
||||||
|
|
||||||
|
After updating templates:
|
||||||
|
1. Test template search: `npm run test:templates`
|
||||||
|
2. Verify MCP tools work: Test in Claude Desktop
|
||||||
|
3. Check statistics in database
|
||||||
|
4. Commit changes if desired (database changes)
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
- `MEMORY_N8N_UPDATE.md` - Updating n8n dependencies
|
||||||
|
- `CLAUDE.md` - Project overview and architecture
|
||||||
|
- `README.md` - User documentation
|
||||||
1464
MVP_DEPLOYMENT_PLAN.md
Normal file
1464
MVP_DEPLOYMENT_PLAN.md
Normal file
File diff suppressed because it is too large
Load Diff
484
P0-R3-TEST-PLAN.md
Normal file
484
P0-R3-TEST-PLAN.md
Normal file
@@ -0,0 +1,484 @@
|
|||||||
|
# P0-R3 Feature Test Coverage Plan
|
||||||
|
|
||||||
|
## Executive Summary
|
||||||
|
|
||||||
|
This document outlines comprehensive test coverage for the P0-R3 feature (Template-based Configuration Examples). The feature adds real-world configuration examples from popular templates to node search and essentials tools.
|
||||||
|
|
||||||
|
**Feature Overview:**
|
||||||
|
- New database table: `template_node_configs` (197 pre-extracted configurations)
|
||||||
|
- Enhanced tools: `search_nodes({includeExamples: true})` and `get_node_essentials({includeExamples: true})`
|
||||||
|
- Breaking changes: Removed `get_node_for_task` tool
|
||||||
|
|
||||||
|
## Test Files Created
|
||||||
|
|
||||||
|
### Unit Tests
|
||||||
|
|
||||||
|
#### 1. `/tests/unit/scripts/fetch-templates-extraction.test.ts` ✅
|
||||||
|
**Purpose:** Test template extraction logic from `fetch-templates.ts`
|
||||||
|
|
||||||
|
**Coverage:**
|
||||||
|
- `extractNodeConfigs()` - 90%+ coverage
|
||||||
|
- Valid workflows with multiple nodes
|
||||||
|
- Empty workflows
|
||||||
|
- Malformed compressed data
|
||||||
|
- Invalid JSON
|
||||||
|
- Nodes without parameters
|
||||||
|
- Sticky note filtering
|
||||||
|
- Credential handling
|
||||||
|
- Expression detection
|
||||||
|
- Special characters
|
||||||
|
- Large workflows (100 nodes)
|
||||||
|
|
||||||
|
- `detectExpressions()` - 100% coverage
|
||||||
|
- `={{...}}` syntax detection
|
||||||
|
- `$json` references
|
||||||
|
- `$node` references
|
||||||
|
- Nested objects
|
||||||
|
- Arrays
|
||||||
|
- Null/undefined handling
|
||||||
|
- Multiple expression types
|
||||||
|
|
||||||
|
**Test Count:** 27 tests
|
||||||
|
**Expected Coverage:** 92%+
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### 2. `/tests/unit/mcp/search-nodes-examples.test.ts` ✅
|
||||||
|
**Purpose:** Test `search_nodes` tool with includeExamples parameter
|
||||||
|
|
||||||
|
**Coverage:**
|
||||||
|
- includeExamples parameter behavior
|
||||||
|
- false: no examples returned
|
||||||
|
- undefined: no examples returned (default)
|
||||||
|
- true: examples returned
|
||||||
|
- Example data structure validation
|
||||||
|
- Top 2 limit enforcement
|
||||||
|
- Backward compatibility
|
||||||
|
- Performance (<100ms)
|
||||||
|
- Error handling (malformed JSON, database errors)
|
||||||
|
- searchNodesLIKE integration
|
||||||
|
- searchNodesFTS integration
|
||||||
|
|
||||||
|
**Test Count:** 12 tests
|
||||||
|
**Expected Coverage:** 85%+
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### 3. `/tests/unit/mcp/get-node-essentials-examples.test.ts` ✅
|
||||||
|
**Purpose:** Test `get_node_essentials` tool with includeExamples parameter
|
||||||
|
|
||||||
|
**Coverage:**
|
||||||
|
- includeExamples parameter behavior
|
||||||
|
- Full metadata structure
|
||||||
|
- configuration object
|
||||||
|
- source (template, views, complexity)
|
||||||
|
- useCases (limited to 2)
|
||||||
|
- metadata (hasCredentials, hasExpressions)
|
||||||
|
- Cache key differentiation
|
||||||
|
- Backward compatibility
|
||||||
|
- Performance (<100ms)
|
||||||
|
- Error handling
|
||||||
|
- Top 3 limit enforcement
|
||||||
|
|
||||||
|
**Test Count:** 13 tests
|
||||||
|
**Expected Coverage:** 88%+
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Integration Tests
|
||||||
|
|
||||||
|
#### 4. `/tests/integration/database/template-node-configs.test.ts` ✅
|
||||||
|
**Purpose:** Test database schema, migrations, and operations
|
||||||
|
|
||||||
|
**Coverage:**
|
||||||
|
- Schema validation
|
||||||
|
- Table creation
|
||||||
|
- All columns present
|
||||||
|
- Correct types and constraints
|
||||||
|
- CHECK constraint on complexity
|
||||||
|
- Indexes
|
||||||
|
- idx_config_node_type_rank
|
||||||
|
- idx_config_complexity
|
||||||
|
- idx_config_auth
|
||||||
|
- View: ranked_node_configs
|
||||||
|
- Top 5 per node_type
|
||||||
|
- Correct ordering
|
||||||
|
- Foreign key constraints
|
||||||
|
- CASCADE delete
|
||||||
|
- Referential integrity
|
||||||
|
- Data operations
|
||||||
|
- INSERT with all fields
|
||||||
|
- Nullable fields
|
||||||
|
- Rank updates
|
||||||
|
- Delete rank > 10
|
||||||
|
- Performance
|
||||||
|
- 1000 records < 10ms queries
|
||||||
|
- Migration idempotency
|
||||||
|
|
||||||
|
**Test Count:** 19 tests
|
||||||
|
**Expected Coverage:** 95%+
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### 5. `/tests/integration/mcp/template-examples-e2e.test.ts` ✅
|
||||||
|
**Purpose:** End-to-end integration testing
|
||||||
|
|
||||||
|
**Coverage:**
|
||||||
|
- Direct SQL queries
|
||||||
|
- Top 2 examples for search_nodes
|
||||||
|
- Top 3 examples with metadata for get_node_essentials
|
||||||
|
- Data structure validation
|
||||||
|
- Valid JSON in all fields
|
||||||
|
- Credentials when has_credentials=1
|
||||||
|
- Ranked view functionality
|
||||||
|
- Performance with 100+ configs
|
||||||
|
- Query performance < 5ms
|
||||||
|
- Complexity filtering
|
||||||
|
- Edge cases
|
||||||
|
- Non-existent node types
|
||||||
|
- Long parameters_json (100 params)
|
||||||
|
- Special characters (Unicode, emojis, symbols)
|
||||||
|
- Data integrity
|
||||||
|
- Foreign key constraints
|
||||||
|
- Cascade deletes
|
||||||
|
|
||||||
|
**Test Count:** 14 tests
|
||||||
|
**Expected Coverage:** 90%+
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Test Fixtures
|
||||||
|
|
||||||
|
#### 6. `/tests/fixtures/template-configs.ts` ✅
|
||||||
|
**Purpose:** Reusable test data
|
||||||
|
|
||||||
|
**Provides:**
|
||||||
|
- `sampleConfigs`: 7 realistic node configurations
|
||||||
|
- simpleWebhook
|
||||||
|
- webhookWithAuth
|
||||||
|
- httpRequestBasic
|
||||||
|
- httpRequestWithExpressions
|
||||||
|
- slackMessage
|
||||||
|
- codeNodeTransform
|
||||||
|
- codeNodeWithExpressions
|
||||||
|
|
||||||
|
- `sampleWorkflows`: 3 complete workflows
|
||||||
|
- webhookToSlack
|
||||||
|
- apiWorkflow
|
||||||
|
- complexWorkflow
|
||||||
|
|
||||||
|
- **Helper Functions:**
|
||||||
|
- `compressWorkflow()` - Compress to base64
|
||||||
|
- `createTemplateMetadata()` - Generate metadata
|
||||||
|
- `createConfigBatch()` - Batch create configs
|
||||||
|
- `getConfigByComplexity()` - Filter by complexity
|
||||||
|
- `getConfigsWithExpressions()` - Filter with expressions
|
||||||
|
- `getConfigsWithCredentials()` - Filter with credentials
|
||||||
|
- `createInsertStatement()` - SQL insert helper
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Existing Tests Requiring Updates
|
||||||
|
|
||||||
|
### High Priority
|
||||||
|
|
||||||
|
#### 1. `tests/unit/mcp/parameter-validation.test.ts`
|
||||||
|
**Line 480:** Remove `get_node_for_task` from legacyValidationTools array
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// REMOVE THIS:
|
||||||
|
{ name: 'get_node_for_task', args: {}, expected: 'Missing required parameters for get_node_for_task: task' },
|
||||||
|
```
|
||||||
|
|
||||||
|
**Status:** ⚠️ BREAKING CHANGE - Tool removed
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### 2. `tests/unit/mcp/tools.test.ts`
|
||||||
|
**Update:** Remove `get_node_for_task` from templates category
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// BEFORE:
|
||||||
|
templates: ['list_tasks', 'get_node_for_task', 'search_templates', ...]
|
||||||
|
|
||||||
|
// AFTER:
|
||||||
|
templates: ['list_tasks', 'search_templates', ...]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Add:** Tests for new includeExamples parameter in tool definitions
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
it('should have includeExamples parameter in search_nodes', () => {
|
||||||
|
const searchNodesTool = tools.find(t => t.name === 'search_nodes');
|
||||||
|
expect(searchNodesTool.inputSchema.properties.includeExamples).toBeDefined();
|
||||||
|
expect(searchNodesTool.inputSchema.properties.includeExamples.type).toBe('boolean');
|
||||||
|
expect(searchNodesTool.inputSchema.properties.includeExamples.default).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should have includeExamples parameter in get_node_essentials', () => {
|
||||||
|
const essentialsTool = tools.find(t => t.name === 'get_node_essentials');
|
||||||
|
expect(essentialsTool.inputSchema.properties.includeExamples).toBeDefined();
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Status:** ⚠️ REQUIRED UPDATE
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### 3. `tests/integration/mcp-protocol/session-management.test.ts`
|
||||||
|
**Remove:** Test case calling `get_node_for_task` with invalid task
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// REMOVE THIS TEST:
|
||||||
|
client.callTool({ name: 'get_node_for_task', arguments: { task: 'invalid_task' } }).catch(e => e)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Status:** ⚠️ BREAKING CHANGE
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### 4. `tests/integration/mcp-protocol/tool-invocation.test.ts`
|
||||||
|
**Remove:** Entire `get_node_for_task` describe block
|
||||||
|
|
||||||
|
**Add:** Tests for new includeExamples functionality
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
describe('search_nodes with includeExamples', () => {
|
||||||
|
it('should return examples when includeExamples is true', async () => {
|
||||||
|
const response = await client.callTool({
|
||||||
|
name: 'search_nodes',
|
||||||
|
arguments: { query: 'webhook', includeExamples: true }
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(response.results).toBeDefined();
|
||||||
|
// Examples may or may not be present depending on database
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not return examples when includeExamples is false', async () => {
|
||||||
|
const response = await client.callTool({
|
||||||
|
name: 'search_nodes',
|
||||||
|
arguments: { query: 'webhook', includeExamples: false }
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(response.results).toBeDefined();
|
||||||
|
response.results.forEach(node => {
|
||||||
|
expect(node.examples).toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('get_node_essentials with includeExamples', () => {
|
||||||
|
it('should return examples with metadata when includeExamples is true', async () => {
|
||||||
|
const response = await client.callTool({
|
||||||
|
name: 'get_node_essentials',
|
||||||
|
arguments: { nodeType: 'nodes-base.webhook', includeExamples: true }
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(response.nodeType).toBeDefined();
|
||||||
|
// Examples may or may not be present depending on database
|
||||||
|
});
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Status:** ⚠️ REQUIRED UPDATE
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Medium Priority
|
||||||
|
|
||||||
|
#### 5. `tests/unit/services/task-templates.test.ts`
|
||||||
|
**Status:** ✅ No changes needed (TaskTemplates marked as deprecated but not removed)
|
||||||
|
|
||||||
|
**Note:** TaskTemplates remains for backward compatibility. Tests should continue to pass.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Test Execution Plan
|
||||||
|
|
||||||
|
### Phase 1: Unit Tests
|
||||||
|
```bash
|
||||||
|
# Run new unit tests
|
||||||
|
npm test tests/unit/scripts/fetch-templates-extraction.test.ts
|
||||||
|
npm test tests/unit/mcp/search-nodes-examples.test.ts
|
||||||
|
npm test tests/unit/mcp/get-node-essentials-examples.test.ts
|
||||||
|
|
||||||
|
# Expected: All pass, 52 tests
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 2: Integration Tests
|
||||||
|
```bash
|
||||||
|
# Run new integration tests
|
||||||
|
npm test tests/integration/database/template-node-configs.test.ts
|
||||||
|
npm test tests/integration/mcp/template-examples-e2e.test.ts
|
||||||
|
|
||||||
|
# Expected: All pass, 33 tests
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 3: Update Existing Tests
|
||||||
|
```bash
|
||||||
|
# Update files as outlined above, then run:
|
||||||
|
npm test tests/unit/mcp/parameter-validation.test.ts
|
||||||
|
npm test tests/unit/mcp/tools.test.ts
|
||||||
|
npm test tests/integration/mcp-protocol/session-management.test.ts
|
||||||
|
npm test tests/integration/mcp-protocol/tool-invocation.test.ts
|
||||||
|
|
||||||
|
# Expected: All pass after updates
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 4: Full Test Suite
|
||||||
|
```bash
|
||||||
|
# Run all tests
|
||||||
|
npm test
|
||||||
|
|
||||||
|
# Run with coverage
|
||||||
|
npm run test:coverage
|
||||||
|
|
||||||
|
# Expected coverage improvements:
|
||||||
|
# - src/scripts/fetch-templates.ts: +20% (60% → 80%)
|
||||||
|
# - src/mcp/server.ts: +5% (75% → 80%)
|
||||||
|
# - Overall project: +2% (current → current+2%)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Coverage Expectations
|
||||||
|
|
||||||
|
### New Code Coverage
|
||||||
|
|
||||||
|
| File | Function | Target | Tests |
|
||||||
|
|------|----------|--------|-------|
|
||||||
|
| fetch-templates.ts | extractNodeConfigs | 95% | 15 tests |
|
||||||
|
| fetch-templates.ts | detectExpressions | 100% | 12 tests |
|
||||||
|
| server.ts | searchNodes (with examples) | 90% | 8 tests |
|
||||||
|
| server.ts | getNodeEssentials (with examples) | 90% | 10 tests |
|
||||||
|
| Database migration | template_node_configs | 100% | 19 tests |
|
||||||
|
|
||||||
|
### Overall Coverage Goals
|
||||||
|
|
||||||
|
- **Unit Tests:** 90%+ coverage for new code
|
||||||
|
- **Integration Tests:** All happy paths + critical error paths
|
||||||
|
- **E2E Tests:** Complete feature workflows
|
||||||
|
- **Performance:** All queries <10ms (database), <100ms (MCP)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Test Infrastructure
|
||||||
|
|
||||||
|
### Dependencies Required
|
||||||
|
All dependencies already present in `package.json`:
|
||||||
|
- vitest (test runner)
|
||||||
|
- better-sqlite3 (database)
|
||||||
|
- @vitest/coverage-v8 (coverage)
|
||||||
|
|
||||||
|
### Test Utilities Used
|
||||||
|
- TestDatabase helper (from existing test utils)
|
||||||
|
- createTestDatabaseAdapter (from existing test utils)
|
||||||
|
- Standard vitest matchers
|
||||||
|
|
||||||
|
### No New Dependencies Required ✅
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Regression Prevention
|
||||||
|
|
||||||
|
### Critical Paths Protected
|
||||||
|
|
||||||
|
1. **Backward Compatibility**
|
||||||
|
- Tools work without includeExamples parameter
|
||||||
|
- Existing workflows unchanged
|
||||||
|
- Cache keys differentiated
|
||||||
|
|
||||||
|
2. **Performance**
|
||||||
|
- No degradation when includeExamples=false
|
||||||
|
- Indexed queries <10ms
|
||||||
|
- Example fetch errors don't break responses
|
||||||
|
|
||||||
|
3. **Data Integrity**
|
||||||
|
- Foreign key constraints enforced
|
||||||
|
- JSON validation in all fields
|
||||||
|
- Rank calculations correct
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## CI/CD Integration
|
||||||
|
|
||||||
|
### GitHub Actions Updates
|
||||||
|
No changes required. Existing test commands will run new tests:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- run: npm test
|
||||||
|
- run: npm run test:coverage
|
||||||
|
```
|
||||||
|
|
||||||
|
### Coverage Thresholds
|
||||||
|
Current thresholds maintained. Expected improvements:
|
||||||
|
- Lines: +2%
|
||||||
|
- Functions: +3%
|
||||||
|
- Branches: +2%
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Manual Testing Checklist
|
||||||
|
|
||||||
|
### Pre-Deployment Verification
|
||||||
|
|
||||||
|
- [ ] Run `npm run rebuild` - Verify migration applies cleanly
|
||||||
|
- [ ] Run `npm run fetch:templates --extract-only` - Verify extraction works
|
||||||
|
- [ ] Check database: `SELECT COUNT(*) FROM template_node_configs` - Should be ~197
|
||||||
|
- [ ] Test MCP tool: `search_nodes({query: "webhook", includeExamples: true})`
|
||||||
|
- [ ] Test MCP tool: `get_node_essentials({nodeType: "nodes-base.webhook", includeExamples: true})`
|
||||||
|
- [ ] Verify backward compatibility: Tools work without includeExamples parameter
|
||||||
|
- [ ] Performance test: Query 100 nodes with examples < 200ms
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Rollback Plan
|
||||||
|
|
||||||
|
If issues are detected:
|
||||||
|
|
||||||
|
1. **Database Rollback:**
|
||||||
|
```sql
|
||||||
|
DROP TABLE IF EXISTS template_node_configs;
|
||||||
|
DROP VIEW IF EXISTS ranked_node_configs;
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Code Rollback:**
|
||||||
|
- Revert server.ts changes
|
||||||
|
- Revert tools.ts changes
|
||||||
|
- Restore get_node_for_task tool (if critical)
|
||||||
|
|
||||||
|
3. **Test Rollback:**
|
||||||
|
- Revert parameter-validation.test.ts
|
||||||
|
- Revert tools.test.ts
|
||||||
|
- Revert tool-invocation.test.ts
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Success Metrics
|
||||||
|
|
||||||
|
### Test Metrics
|
||||||
|
- ✅ 85+ new tests added
|
||||||
|
- ✅ 0 tests failing after updates
|
||||||
|
- ✅ Coverage increase 2%+
|
||||||
|
- ✅ All performance tests pass
|
||||||
|
|
||||||
|
### Feature Metrics
|
||||||
|
- ✅ 197 template configs extracted
|
||||||
|
- ✅ Top 2/3 examples returned correctly
|
||||||
|
- ✅ Query performance <10ms
|
||||||
|
- ✅ No backward compatibility breaks
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Conclusion
|
||||||
|
|
||||||
|
This test plan provides **comprehensive coverage** for the P0-R3 feature with:
|
||||||
|
- **85+ new tests** across unit, integration, and E2E levels
|
||||||
|
- **Complete coverage** of extraction, storage, and retrieval
|
||||||
|
- **Backward compatibility** protection
|
||||||
|
- **Performance validation** (<10ms queries)
|
||||||
|
- **Clear migration path** for existing tests
|
||||||
|
|
||||||
|
**All test files are ready for execution.** Update the 4 existing test files as outlined, then run the full test suite.
|
||||||
|
|
||||||
|
**Estimated Total Implementation Time:** 2-3 hours for updating existing tests + validation
|
||||||
69
PRIVACY.md
Normal file
69
PRIVACY.md
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
# Privacy Policy for n8n-mcp Telemetry
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
n8n-mcp collects anonymous usage statistics to help improve the tool. This data collection is designed to respect user privacy while providing valuable insights into how the tool is used.
|
||||||
|
|
||||||
|
## What We Collect
|
||||||
|
- **Anonymous User ID**: A hashed identifier derived from your machine characteristics (no personal information)
|
||||||
|
- **Tool Usage**: Which MCP tools are used and their performance metrics
|
||||||
|
- **Workflow Patterns**: Sanitized workflow structures (all sensitive data removed)
|
||||||
|
- **Error Types**: Categories of errors encountered (no error messages with user data)
|
||||||
|
- **System Information**: Platform, architecture, Node.js version, and n8n-mcp version
|
||||||
|
|
||||||
|
## What We DON'T Collect
|
||||||
|
- Personal information or usernames
|
||||||
|
- API keys, tokens, or credentials
|
||||||
|
- URLs, endpoints, or hostnames
|
||||||
|
- Email addresses or contact information
|
||||||
|
- File paths or directory structures
|
||||||
|
- Actual workflow data or parameters
|
||||||
|
- Database connection strings
|
||||||
|
- Any authentication information
|
||||||
|
|
||||||
|
## Data Sanitization
|
||||||
|
All collected data undergoes automatic sanitization:
|
||||||
|
- URLs are replaced with `[URL]` or `[REDACTED]`
|
||||||
|
- Long alphanumeric strings (potential keys) are replaced with `[KEY]`
|
||||||
|
- Email addresses are replaced with `[EMAIL]`
|
||||||
|
- Authentication-related fields are completely removed
|
||||||
|
|
||||||
|
## Data Storage
|
||||||
|
- Data is stored securely using Supabase
|
||||||
|
- Anonymous users have write-only access (cannot read data back)
|
||||||
|
- Row Level Security (RLS) policies prevent data access by anonymous users
|
||||||
|
|
||||||
|
## Opt-Out
|
||||||
|
You can disable telemetry at any time:
|
||||||
|
```bash
|
||||||
|
npx n8n-mcp telemetry disable
|
||||||
|
```
|
||||||
|
|
||||||
|
To re-enable:
|
||||||
|
```bash
|
||||||
|
npx n8n-mcp telemetry enable
|
||||||
|
```
|
||||||
|
|
||||||
|
To check status:
|
||||||
|
```bash
|
||||||
|
npx n8n-mcp telemetry status
|
||||||
|
```
|
||||||
|
|
||||||
|
## Data Usage
|
||||||
|
Collected data is used solely to:
|
||||||
|
- Understand which features are most used
|
||||||
|
- Identify common error patterns
|
||||||
|
- Improve tool performance and reliability
|
||||||
|
- Guide development priorities
|
||||||
|
|
||||||
|
## Data Retention
|
||||||
|
- Data is retained for analysis purposes
|
||||||
|
- No personal identification is possible from the collected data
|
||||||
|
|
||||||
|
## Changes to This Policy
|
||||||
|
We may update this privacy policy from time to time. Updates will be reflected in this document.
|
||||||
|
|
||||||
|
## Contact
|
||||||
|
For questions about telemetry or privacy, please open an issue on GitHub:
|
||||||
|
https://github.com/czlonkowski/n8n-mcp/issues
|
||||||
|
|
||||||
|
Last updated: 2025-09-25
|
||||||
528
README.md
528
README.md
@@ -2,11 +2,10 @@
|
|||||||
|
|
||||||
[](https://opensource.org/licenses/MIT)
|
[](https://opensource.org/licenses/MIT)
|
||||||
[](https://github.com/czlonkowski/n8n-mcp)
|
[](https://github.com/czlonkowski/n8n-mcp)
|
||||||
[](https://github.com/czlonkowski/n8n-mcp)
|
|
||||||
[](https://www.npmjs.com/package/n8n-mcp)
|
[](https://www.npmjs.com/package/n8n-mcp)
|
||||||
[](https://codecov.io/gh/czlonkowski/n8n-mcp)
|
[](https://codecov.io/gh/czlonkowski/n8n-mcp)
|
||||||
[](https://github.com/czlonkowski/n8n-mcp/actions)
|
[](https://github.com/czlonkowski/n8n-mcp/actions)
|
||||||
[](https://github.com/n8n-io/n8n)
|
[](https://github.com/n8n-io/n8n)
|
||||||
[](https://github.com/czlonkowski/n8n-mcp/pkgs/container/n8n-mcp)
|
[](https://github.com/czlonkowski/n8n-mcp/pkgs/container/n8n-mcp)
|
||||||
[](https://railway.com/deploy/n8n-mcp?referralCode=n8n-mcp)
|
[](https://railway.com/deploy/n8n-mcp?referralCode=n8n-mcp)
|
||||||
|
|
||||||
@@ -16,11 +15,13 @@ A Model Context Protocol (MCP) server that provides AI assistants with comprehen
|
|||||||
|
|
||||||
n8n-MCP serves as a bridge between n8n's workflow automation platform and AI models, enabling them to understand and work with n8n nodes effectively. It provides structured access to:
|
n8n-MCP serves as a bridge between n8n's workflow automation platform and AI models, enabling them to understand and work with n8n nodes effectively. It provides structured access to:
|
||||||
|
|
||||||
- 📚 **535 n8n nodes** from both n8n-nodes-base and @n8n/n8n-nodes-langchain
|
- 📚 **536 n8n nodes** from both n8n-nodes-base and @n8n/n8n-nodes-langchain
|
||||||
- 🔧 **Node properties** - 99% coverage with detailed schemas
|
- 🔧 **Node properties** - 99% coverage with detailed schemas
|
||||||
- ⚡ **Node operations** - 63.6% coverage of available actions
|
- ⚡ **Node operations** - 63.6% coverage of available actions
|
||||||
- 📄 **Documentation** - 90% coverage from official n8n docs (including AI nodes)
|
- 📄 **Documentation** - 90% coverage from official n8n docs (including AI nodes)
|
||||||
- 🤖 **AI tools** - 263 AI-capable nodes detected with full documentation
|
- 🤖 **AI tools** - 263 AI-capable nodes detected with full documentation
|
||||||
|
- 💡 **Real-world examples** - 2,646 pre-extracted configurations from popular templates
|
||||||
|
- 🎯 **Template library** - 2,500+ workflow templates with smart filtering
|
||||||
|
|
||||||
|
|
||||||
## ⚠️ Important Safety Warning
|
## ⚠️ Important Safety Warning
|
||||||
@@ -197,10 +198,36 @@ Add to Claude Desktop config:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
>💡 Tip: If you’re running n8n locally on the same machine (e.g., via Docker), use http://host.docker.internal:5678 as the N8N_API_URL.
|
>💡 Tip: If you're running n8n locally on the same machine (e.g., via Docker), use http://host.docker.internal:5678 as the N8N_API_URL.
|
||||||
|
|
||||||
> **Note**: The n8n API credentials are optional. Without them, you'll have access to all documentation and validation tools. With them, you'll additionally get workflow management capabilities (create, update, execute workflows).
|
> **Note**: The n8n API credentials are optional. Without them, you'll have access to all documentation and validation tools. With them, you'll additionally get workflow management capabilities (create, update, execute workflows).
|
||||||
|
|
||||||
|
### 🏠 Local n8n Instance Configuration
|
||||||
|
|
||||||
|
If you're running n8n locally (e.g., `http://localhost:5678` or Docker), you need to allow localhost webhooks:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"n8n-mcp": {
|
||||||
|
"command": "docker",
|
||||||
|
"args": [
|
||||||
|
"run", "-i", "--rm", "--init",
|
||||||
|
"-e", "MCP_MODE=stdio",
|
||||||
|
"-e", "LOG_LEVEL=error",
|
||||||
|
"-e", "DISABLE_CONSOLE_OUTPUT=true",
|
||||||
|
"-e", "N8N_API_URL=http://host.docker.internal:5678",
|
||||||
|
"-e", "N8N_API_KEY=your-api-key",
|
||||||
|
"-e", "WEBHOOK_SECURITY_MODE=moderate",
|
||||||
|
"ghcr.io/czlonkowski/n8n-mcp:latest"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
> ⚠️ **Important:** Set `WEBHOOK_SECURITY_MODE=moderate` to allow webhooks to your local n8n instance. This is safe for local development while still blocking private networks and cloud metadata.
|
||||||
|
|
||||||
**Important:** The `-i` flag is required for MCP stdio communication.
|
**Important:** The `-i` flag is required for MCP stdio communication.
|
||||||
|
|
||||||
> 🔧 If you encounter any issues with Docker, check our [Docker Troubleshooting Guide](./docs/DOCKER_TROUBLESHOOTING.md).
|
> 🔧 If you encounter any issues with Docker, check our [Docker Troubleshooting Guide](./docs/DOCKER_TROUBLESHOOTING.md).
|
||||||
@@ -212,6 +239,51 @@ Add to Claude Desktop config:
|
|||||||
|
|
||||||
**Restart Claude Desktop after updating configuration** - That's it! 🎉
|
**Restart Claude Desktop after updating configuration** - That's it! 🎉
|
||||||
|
|
||||||
|
## 🔐 Privacy & Telemetry
|
||||||
|
|
||||||
|
n8n-mcp collects anonymous usage statistics to improve the tool. [View our privacy policy](./PRIVACY.md).
|
||||||
|
|
||||||
|
### Opting Out
|
||||||
|
|
||||||
|
**For npx users:**
|
||||||
|
```bash
|
||||||
|
npx n8n-mcp telemetry disable
|
||||||
|
```
|
||||||
|
|
||||||
|
**For Docker users:**
|
||||||
|
Add the following environment variable to your Docker configuration:
|
||||||
|
```json
|
||||||
|
"-e", "N8N_MCP_TELEMETRY_DISABLED=true"
|
||||||
|
```
|
||||||
|
|
||||||
|
Example in Claude Desktop config:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"n8n-mcp": {
|
||||||
|
"command": "docker",
|
||||||
|
"args": [
|
||||||
|
"run",
|
||||||
|
"-i",
|
||||||
|
"--rm",
|
||||||
|
"--init",
|
||||||
|
"-e", "MCP_MODE=stdio",
|
||||||
|
"-e", "LOG_LEVEL=error",
|
||||||
|
"-e", "N8N_MCP_TELEMETRY_DISABLED=true",
|
||||||
|
"ghcr.io/czlonkowski/n8n-mcp:latest"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**For docker-compose users:**
|
||||||
|
Set in your environment file or docker-compose.yml:
|
||||||
|
```yaml
|
||||||
|
environment:
|
||||||
|
N8N_MCP_TELEMETRY_DISABLED: "true"
|
||||||
|
```
|
||||||
|
|
||||||
## 💖 Support This Project
|
## 💖 Support This Project
|
||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
@@ -346,133 +418,295 @@ Step-by-step tutorial for connecting n8n-MCP to Cursor IDE with custom rules.
|
|||||||
### [Windsurf](./docs/WINDSURF_SETUP.md)
|
### [Windsurf](./docs/WINDSURF_SETUP.md)
|
||||||
Complete guide for integrating n8n-MCP with Windsurf using project rules.
|
Complete guide for integrating n8n-MCP with Windsurf using project rules.
|
||||||
|
|
||||||
|
### [Codex](./docs/CODEX_SETUP.md)
|
||||||
|
Complete guide for integrating n8n-MCP with Codex.
|
||||||
|
|
||||||
## 🤖 Claude Project Setup
|
## 🤖 Claude Project Setup
|
||||||
|
|
||||||
For the best results when using n8n-MCP with Claude Projects, use these enhanced system instructions:
|
For the best results when using n8n-MCP with Claude Projects, use these enhanced system instructions:
|
||||||
|
|
||||||
```markdown
|
````markdown
|
||||||
You are an expert in n8n automation software using n8n-MCP tools. Your role is to design, build, and validate n8n workflows with maximum accuracy and efficiency.
|
You are an expert in n8n automation software using n8n-MCP tools. Your role is to design, build, and validate n8n workflows with maximum accuracy and efficiency.
|
||||||
|
|
||||||
## Core Workflow Process
|
## Core Principles
|
||||||
|
|
||||||
1. **ALWAYS start new conversation with**: `tools_documentation()` to understand best practices and available tools.
|
### 1. Silent Execution
|
||||||
|
CRITICAL: Execute tools without commentary. Only respond AFTER all tools complete.
|
||||||
|
|
||||||
2. **Discovery Phase** - Find the right nodes:
|
❌ BAD: "Let me search for Slack nodes... Great! Now let me get details..."
|
||||||
- Think deeply about user request and the logic you are going to build to fulfill it. Ask follow-up questions to clarify the user's intent, if something is unclear. Then, proceed with the rest of your instructions.
|
✅ GOOD: [Execute search_nodes and get_node_essentials in parallel, then respond]
|
||||||
- `search_nodes({query: 'keyword'})` - Search by functionality
|
|
||||||
|
### 2. Parallel Execution
|
||||||
|
When operations are independent, execute them in parallel for maximum performance.
|
||||||
|
|
||||||
|
✅ GOOD: Call search_nodes, list_nodes, and search_templates simultaneously
|
||||||
|
❌ BAD: Sequential tool calls (await each one before the next)
|
||||||
|
|
||||||
|
### 3. Templates First
|
||||||
|
ALWAYS check templates before building from scratch (2,500+ available).
|
||||||
|
|
||||||
|
### 4. Multi-Level Validation
|
||||||
|
Use validate_node_minimal → validate_node_operation → validate_workflow pattern.
|
||||||
|
|
||||||
|
### 5. Never Trust Defaults
|
||||||
|
⚠️ CRITICAL: Default parameter values are the #1 source of runtime failures.
|
||||||
|
ALWAYS explicitly configure ALL parameters that control node behavior.
|
||||||
|
|
||||||
|
## Workflow Process
|
||||||
|
|
||||||
|
1. **Start**: Call `tools_documentation()` for best practices
|
||||||
|
|
||||||
|
2. **Template Discovery Phase** (FIRST - parallel when searching multiple)
|
||||||
|
- `search_templates_by_metadata({complexity: "simple"})` - Smart filtering
|
||||||
|
- `get_templates_for_task('webhook_processing')` - Curated by task
|
||||||
|
- `search_templates('slack notification')` - Text search
|
||||||
|
- `list_node_templates(['n8n-nodes-base.slack'])` - By node type
|
||||||
|
|
||||||
|
**Filtering strategies**:
|
||||||
|
- Beginners: `complexity: "simple"` + `maxSetupMinutes: 30`
|
||||||
|
- By role: `targetAudience: "marketers"` | `"developers"` | `"analysts"`
|
||||||
|
- By time: `maxSetupMinutes: 15` for quick wins
|
||||||
|
- By service: `requiredService: "openai"` for compatibility
|
||||||
|
|
||||||
|
3. **Node Discovery** (if no suitable template - parallel execution)
|
||||||
|
- Think deeply about requirements. Ask clarifying questions if unclear.
|
||||||
|
- `search_nodes({query: 'keyword', includeExamples: true})` - Parallel for multiple nodes
|
||||||
- `list_nodes({category: 'trigger'})` - Browse by category
|
- `list_nodes({category: 'trigger'})` - Browse by category
|
||||||
- `list_ai_tools()` - See AI-capable nodes (remember: ANY node can be an AI tool!)
|
- `list_ai_tools()` - AI-capable nodes
|
||||||
|
|
||||||
3. **Configuration Phase** - Get node details efficiently:
|
4. **Configuration Phase** (parallel for multiple nodes)
|
||||||
- `get_node_essentials(nodeType)` - Start here! Only 10-20 essential properties
|
- `get_node_essentials(nodeType, {includeExamples: true})` - 10-20 key properties
|
||||||
- `search_node_properties(nodeType, 'auth')` - Find specific properties
|
- `search_node_properties(nodeType, 'auth')` - Find specific properties
|
||||||
- `get_node_for_task('send_email')` - Get pre-configured templates
|
- `get_node_documentation(nodeType)` - Human-readable docs
|
||||||
- `get_node_documentation(nodeType)` - Human-readable docs when needed
|
- Show workflow architecture to user for approval before proceeding
|
||||||
- It is good common practice to show a visual representation of the workflow architecture to the user and asking for opinion, before moving forward.
|
|
||||||
|
|
||||||
4. **Pre-Validation Phase** - Validate BEFORE building:
|
5. **Validation Phase** (parallel for multiple nodes)
|
||||||
- `validate_node_minimal(nodeType, config)` - Quick required fields check
|
- `validate_node_minimal(nodeType, config)` - Quick required fields check
|
||||||
- `validate_node_operation(nodeType, config, profile)` - Full operation-aware validation
|
- `validate_node_operation(nodeType, config, 'runtime')` - Full validation with fixes
|
||||||
- Fix any validation errors before proceeding
|
- Fix ALL errors before proceeding
|
||||||
|
|
||||||
5. **Building Phase** - Create the workflow:
|
6. **Building Phase**
|
||||||
- Use validated configurations from step 4
|
- If using template: `get_template(templateId, {mode: "full"})`
|
||||||
|
- **MANDATORY ATTRIBUTION**: "Based on template by **[author.name]** (@[username]). View at: [url]"
|
||||||
|
- Build from validated configurations
|
||||||
|
- ⚠️ EXPLICITLY set ALL parameters - never rely on defaults
|
||||||
- Connect nodes with proper structure
|
- Connect nodes with proper structure
|
||||||
- Add error handling where appropriate
|
- Add error handling
|
||||||
- Use expressions like $json, $node["NodeName"].json
|
- Use n8n expressions: $json, $node["NodeName"].json
|
||||||
- Build the workflow in an artifact for easy editing downstream (unless the user asked to create in n8n instance)
|
- Build in artifact (unless deploying to n8n instance)
|
||||||
|
|
||||||
6. **Workflow Validation Phase** - Validate complete workflow:
|
7. **Workflow Validation** (before deployment)
|
||||||
- `validate_workflow(workflow)` - Complete validation including connections
|
- `validate_workflow(workflow)` - Complete validation
|
||||||
- `validate_workflow_connections(workflow)` - Check structure and AI tool connections
|
- `validate_workflow_connections(workflow)` - Structure check
|
||||||
- `validate_workflow_expressions(workflow)` - Validate all n8n expressions
|
- `validate_workflow_expressions(workflow)` - Expression validation
|
||||||
- Fix any issues found before deployment
|
- Fix ALL issues before deployment
|
||||||
|
|
||||||
7. **Deployment Phase** (if n8n API configured):
|
8. **Deployment** (if n8n API configured)
|
||||||
- `n8n_create_workflow(workflow)` - Deploy validated workflow
|
- `n8n_create_workflow(workflow)` - Deploy
|
||||||
- `n8n_validate_workflow({id: 'workflow-id'})` - Post-deployment validation
|
- `n8n_validate_workflow({id})` - Post-deployment check
|
||||||
- `n8n_update_partial_workflow()` - Make incremental updates using diffs
|
- `n8n_update_partial_workflow({id, operations: [...]})` - Batch updates
|
||||||
- `n8n_trigger_webhook_workflow()` - Test webhook workflows
|
- `n8n_trigger_webhook_workflow()` - Test webhooks
|
||||||
|
|
||||||
## Key Insights
|
## Critical Warnings
|
||||||
|
|
||||||
- **USE CODE NODE ONLY WHEN IT IS NECESSARY** - always prefer to use standard nodes over code node. Use code node only when you are sure you need it.
|
### ⚠️ Never Trust Defaults
|
||||||
- **VALIDATE EARLY AND OFTEN** - Catch errors before they reach deployment
|
Default values cause runtime failures. Example:
|
||||||
- **USE DIFF UPDATES** - Use n8n_update_partial_workflow for 80-90% token savings
|
```json
|
||||||
- **ANY node can be an AI tool** - not just those with usableAsTool=true
|
// ❌ FAILS at runtime
|
||||||
- **Pre-validate configurations** - Use validate_node_minimal before building
|
{resource: "message", operation: "post", text: "Hello"}
|
||||||
- **Post-validate workflows** - Always validate complete workflows before deployment
|
|
||||||
- **Incremental updates** - Use diff operations for existing workflows
|
// ✅ WORKS - all parameters explicit
|
||||||
- **Test thoroughly** - Validate both locally and after deployment to n8n
|
{resource: "message", operation: "post", select: "channel", channelId: "C123", text: "Hello"}
|
||||||
|
```
|
||||||
|
|
||||||
|
### ⚠️ Example Availability
|
||||||
|
`includeExamples: true` returns real configurations from workflow templates.
|
||||||
|
- Coverage varies by node popularity
|
||||||
|
- When no examples available, use `get_node_essentials` + `validate_node_minimal`
|
||||||
|
|
||||||
## Validation Strategy
|
## Validation Strategy
|
||||||
|
|
||||||
### Before Building:
|
### Level 1 - Quick Check (before building)
|
||||||
1. validate_node_minimal() - Check required fields
|
`validate_node_minimal(nodeType, config)` - Required fields only (<100ms)
|
||||||
2. validate_node_operation() - Full configuration validation
|
|
||||||
3. Fix all errors before proceeding
|
|
||||||
|
|
||||||
### After Building:
|
### Level 2 - Comprehensive (before building)
|
||||||
1. validate_workflow() - Complete workflow validation
|
`validate_node_operation(nodeType, config, 'runtime')` - Full validation with fixes
|
||||||
2. validate_workflow_connections() - Structure validation
|
|
||||||
3. validate_workflow_expressions() - Expression syntax check
|
|
||||||
|
|
||||||
### After Deployment:
|
### Level 3 - Complete (after building)
|
||||||
1. n8n_validate_workflow({id}) - Validate deployed workflow
|
`validate_workflow(workflow)` - Connections, expressions, AI tools
|
||||||
2. n8n_list_executions() - Monitor execution status
|
|
||||||
3. n8n_update_partial_workflow() - Fix issues using diffs
|
|
||||||
|
|
||||||
## Response Structure
|
### Level 4 - Post-Deployment
|
||||||
|
1. `n8n_validate_workflow({id})` - Validate deployed workflow
|
||||||
|
2. `n8n_autofix_workflow({id})` - Auto-fix common errors
|
||||||
|
3. `n8n_list_executions()` - Monitor execution status
|
||||||
|
|
||||||
1. **Discovery**: Show available nodes and options
|
## Response Format
|
||||||
2. **Pre-Validation**: Validate node configurations first
|
|
||||||
3. **Configuration**: Show only validated, working configs
|
### Initial Creation
|
||||||
4. **Building**: Construct workflow with validated components
|
```
|
||||||
5. **Workflow Validation**: Full workflow validation results
|
[Silent tool execution in parallel]
|
||||||
6. **Deployment**: Deploy only after all validations pass
|
|
||||||
7. **Post-Validation**: Verify deployment succeeded
|
Created workflow:
|
||||||
|
- Webhook trigger → Slack notification
|
||||||
|
- Configured: POST /webhook → #general channel
|
||||||
|
|
||||||
|
Validation: ✅ All checks passed
|
||||||
|
```
|
||||||
|
|
||||||
|
### Modifications
|
||||||
|
```
|
||||||
|
[Silent tool execution]
|
||||||
|
|
||||||
|
Updated workflow:
|
||||||
|
- Added error handling to HTTP node
|
||||||
|
- Fixed required Slack parameters
|
||||||
|
|
||||||
|
Changes validated successfully.
|
||||||
|
```
|
||||||
|
|
||||||
|
## Batch Operations
|
||||||
|
|
||||||
|
Use `n8n_update_partial_workflow` with multiple operations in a single call:
|
||||||
|
|
||||||
|
✅ GOOD - Batch multiple operations:
|
||||||
|
```json
|
||||||
|
n8n_update_partial_workflow({
|
||||||
|
id: "wf-123",
|
||||||
|
operations: [
|
||||||
|
{type: "updateNode", nodeId: "slack-1", changes: {...}},
|
||||||
|
{type: "updateNode", nodeId: "http-1", changes: {...}},
|
||||||
|
{type: "cleanStaleConnections"}
|
||||||
|
]
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
❌ BAD - Separate calls:
|
||||||
|
```json
|
||||||
|
n8n_update_partial_workflow({id: "wf-123", operations: [{...}]})
|
||||||
|
n8n_update_partial_workflow({id: "wf-123", operations: [{...}]})
|
||||||
|
```
|
||||||
|
|
||||||
## Example Workflow
|
## Example Workflow
|
||||||
|
|
||||||
### 1. Discovery & Configuration
|
### Template-First Approach
|
||||||
search_nodes({query: 'slack'})
|
|
||||||
get_node_essentials('n8n-nodes-base.slack')
|
|
||||||
|
|
||||||
### 2. Pre-Validation
|
```
|
||||||
validate_node_minimal('n8n-nodes-base.slack', {resource:'message', operation:'send'})
|
// STEP 1: Template Discovery (parallel execution)
|
||||||
|
[Silent execution]
|
||||||
|
search_templates_by_metadata({
|
||||||
|
requiredService: 'slack',
|
||||||
|
complexity: 'simple',
|
||||||
|
targetAudience: 'marketers'
|
||||||
|
})
|
||||||
|
get_templates_for_task('slack_integration')
|
||||||
|
|
||||||
|
// STEP 2: Use template
|
||||||
|
get_template(templateId, {mode: 'full'})
|
||||||
|
validate_workflow(workflow)
|
||||||
|
|
||||||
|
// Response after all tools complete:
|
||||||
|
"Found template by **David Ashby** (@cfomodz).
|
||||||
|
View at: https://n8n.io/workflows/2414
|
||||||
|
|
||||||
|
Validation: ✅ All checks passed"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Building from Scratch (if no template)
|
||||||
|
|
||||||
|
```
|
||||||
|
// STEP 1: Discovery (parallel execution)
|
||||||
|
[Silent execution]
|
||||||
|
search_nodes({query: 'slack', includeExamples: true})
|
||||||
|
list_nodes({category: 'communication'})
|
||||||
|
|
||||||
|
// STEP 2: Configuration (parallel execution)
|
||||||
|
[Silent execution]
|
||||||
|
get_node_essentials('n8n-nodes-base.slack', {includeExamples: true})
|
||||||
|
get_node_essentials('n8n-nodes-base.webhook', {includeExamples: true})
|
||||||
|
|
||||||
|
// STEP 3: Validation (parallel execution)
|
||||||
|
[Silent execution]
|
||||||
|
validate_node_minimal('n8n-nodes-base.slack', config)
|
||||||
validate_node_operation('n8n-nodes-base.slack', fullConfig, 'runtime')
|
validate_node_operation('n8n-nodes-base.slack', fullConfig, 'runtime')
|
||||||
|
|
||||||
### 3. Build Workflow
|
// STEP 4: Build
|
||||||
// Create workflow JSON with validated configs
|
// Construct workflow with validated configs
|
||||||
|
// ⚠️ Set ALL parameters explicitly
|
||||||
|
|
||||||
### 4. Workflow Validation
|
// STEP 5: Validate
|
||||||
|
[Silent execution]
|
||||||
validate_workflow(workflowJson)
|
validate_workflow(workflowJson)
|
||||||
validate_workflow_connections(workflowJson)
|
|
||||||
validate_workflow_expressions(workflowJson)
|
|
||||||
|
|
||||||
### 5. Deploy (if configured)
|
// Response after all tools complete:
|
||||||
n8n_create_workflow(validatedWorkflow)
|
"Created workflow: Webhook → Slack
|
||||||
n8n_validate_workflow({id: createdWorkflowId})
|
Validation: ✅ Passed"
|
||||||
|
```
|
||||||
|
|
||||||
### 6. Update Using Diffs
|
### Batch Updates
|
||||||
|
|
||||||
|
```json
|
||||||
|
// ONE call with multiple operations
|
||||||
n8n_update_partial_workflow({
|
n8n_update_partial_workflow({
|
||||||
workflowId: id,
|
id: "wf-123",
|
||||||
operations: [
|
operations: [
|
||||||
{type: 'updateNode', nodeId: 'slack1', changes: {position: [100, 200]}}
|
{type: "updateNode", nodeId: "slack-1", changes: {position: [100, 200]}},
|
||||||
|
{type: "updateNode", nodeId: "http-1", changes: {position: [300, 200]}},
|
||||||
|
{type: "cleanStaleConnections"}
|
||||||
]
|
]
|
||||||
})
|
})
|
||||||
|
```
|
||||||
|
|
||||||
## Important Rules
|
## Important Rules
|
||||||
|
|
||||||
- ALWAYS validate before building
|
### Core Behavior
|
||||||
- ALWAYS validate after building
|
1. **Silent execution** - No commentary between tools
|
||||||
- NEVER deploy unvalidated workflows
|
2. **Parallel by default** - Execute independent operations simultaneously
|
||||||
- USE diff operations for updates (80-90% token savings)
|
3. **Templates first** - Always check before building (2,500+ available)
|
||||||
- STATE validation results clearly
|
4. **Multi-level validation** - Quick check → Full validation → Workflow validation
|
||||||
- FIX all errors before proceeding
|
5. **Never trust defaults** - Explicitly configure ALL parameters
|
||||||
```
|
|
||||||
|
|
||||||
Save these instructions in your Claude Project for optimal n8n workflow assistance with comprehensive validation.
|
### Attribution & Credits
|
||||||
|
- **MANDATORY TEMPLATE ATTRIBUTION**: Share author name, username, and n8n.io link
|
||||||
|
- **Template validation** - Always validate before deployment (may need updates)
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
- **Batch operations** - Use diff operations with multiple changes in one call
|
||||||
|
- **Parallel execution** - Search, validate, and configure simultaneously
|
||||||
|
- **Template metadata** - Use smart filtering for faster discovery
|
||||||
|
|
||||||
|
### Code Node Usage
|
||||||
|
- **Avoid when possible** - Prefer standard nodes
|
||||||
|
- **Only when necessary** - Use code node as last resort
|
||||||
|
- **AI tool capability** - ANY node can be an AI tool (not just marked ones)
|
||||||
|
|
||||||
|
### Most Popular n8n Nodes (for get_node_essentials):
|
||||||
|
|
||||||
|
1. **n8n-nodes-base.code** - JavaScript/Python scripting
|
||||||
|
2. **n8n-nodes-base.httpRequest** - HTTP API calls
|
||||||
|
3. **n8n-nodes-base.webhook** - Event-driven triggers
|
||||||
|
4. **n8n-nodes-base.set** - Data transformation
|
||||||
|
5. **n8n-nodes-base.if** - Conditional routing
|
||||||
|
6. **n8n-nodes-base.manualTrigger** - Manual workflow execution
|
||||||
|
7. **n8n-nodes-base.respondToWebhook** - Webhook responses
|
||||||
|
8. **n8n-nodes-base.scheduleTrigger** - Time-based triggers
|
||||||
|
9. **@n8n/n8n-nodes-langchain.agent** - AI agents
|
||||||
|
10. **n8n-nodes-base.googleSheets** - Spreadsheet integration
|
||||||
|
11. **n8n-nodes-base.merge** - Data merging
|
||||||
|
12. **n8n-nodes-base.switch** - Multi-branch routing
|
||||||
|
13. **n8n-nodes-base.telegram** - Telegram bot integration
|
||||||
|
14. **@n8n/n8n-nodes-langchain.lmChatOpenAi** - OpenAI chat models
|
||||||
|
15. **n8n-nodes-base.splitInBatches** - Batch processing
|
||||||
|
16. **n8n-nodes-base.openAi** - OpenAI legacy node
|
||||||
|
17. **n8n-nodes-base.gmail** - Email automation
|
||||||
|
18. **n8n-nodes-base.function** - Custom functions
|
||||||
|
19. **n8n-nodes-base.stickyNote** - Workflow documentation
|
||||||
|
20. **n8n-nodes-base.executeWorkflowTrigger** - Sub-workflow calls
|
||||||
|
|
||||||
|
**Note:** LangChain nodes use the `@n8n/n8n-nodes-langchain.` prefix, core nodes use `n8n-nodes-base.`
|
||||||
|
|
||||||
|
````
|
||||||
|
|
||||||
|
Save these instructions in your Claude Project for optimal n8n workflow assistance with intelligent template discovery.
|
||||||
|
|
||||||
## 🚨 Important: Sharing Guidelines
|
## 🚨 Important: Sharing Guidelines
|
||||||
|
|
||||||
@@ -488,11 +722,16 @@ This tool was created to benefit everyone in the n8n community without friction.
|
|||||||
## Features
|
## Features
|
||||||
|
|
||||||
- **🔍 Smart Node Search**: Find nodes by name, category, or functionality
|
- **🔍 Smart Node Search**: Find nodes by name, category, or functionality
|
||||||
- **📖 Essential Properties**: Get only the 10-20 properties that matter (NEW in v2.4.0)
|
- **📖 Essential Properties**: Get only the 10-20 properties that matter
|
||||||
- **🎯 Task Templates**: Pre-configured settings for common automation tasks
|
- **💡 Real-World Examples**: 2,646 pre-extracted configurations from popular templates
|
||||||
- **✅ Config Validation**: Validate node configurations before deployment
|
- **✅ Config Validation**: Validate node configurations before deployment
|
||||||
|
- **🤖 AI Workflow Validation**: Comprehensive validation for AI Agent workflows (NEW in v2.17.0!)
|
||||||
|
- Missing language model detection
|
||||||
|
- AI tool connection validation
|
||||||
|
- Streaming mode constraints
|
||||||
|
- Memory and output parser checks
|
||||||
- **🔗 Dependency Analysis**: Understand property relationships and conditions
|
- **🔗 Dependency Analysis**: Understand property relationships and conditions
|
||||||
- **💡 Working Examples**: Real-world examples for immediate use
|
- **🎯 Template Discovery**: 2,500+ workflow templates with smart filtering
|
||||||
- **⚡ Fast Response**: Average query time ~12ms with optimized SQLite
|
- **⚡ Fast Response**: Average query time ~12ms with optimized SQLite
|
||||||
- **🌐 Universal Compatibility**: Works with any Node.js version
|
- **🌐 Universal Compatibility**: Works with any Node.js version
|
||||||
|
|
||||||
@@ -518,20 +757,32 @@ Once connected, Claude can use these powerful tools:
|
|||||||
- **`tools_documentation`** - Get documentation for any MCP tool (START HERE!)
|
- **`tools_documentation`** - Get documentation for any MCP tool (START HERE!)
|
||||||
- **`list_nodes`** - List all n8n nodes with filtering options
|
- **`list_nodes`** - List all n8n nodes with filtering options
|
||||||
- **`get_node_info`** - Get comprehensive information about a specific node
|
- **`get_node_info`** - Get comprehensive information about a specific node
|
||||||
- **`get_node_essentials`** - Get only essential properties with examples (10-20 properties instead of 200+)
|
- **`get_node_essentials`** - Get only essential properties (10-20 instead of 200+). Use `includeExamples: true` to get top 3 real-world configurations from popular templates
|
||||||
- **`search_nodes`** - Full-text search across all node documentation
|
- **`search_nodes`** - Full-text search across all node documentation. Use `includeExamples: true` to get top 2 real-world configurations per node from templates
|
||||||
- **`search_node_properties`** - Find specific properties within nodes
|
- **`search_node_properties`** - Find specific properties within nodes
|
||||||
- **`list_ai_tools`** - List all AI-capable nodes (ANY node can be used as AI tool!)
|
- **`list_ai_tools`** - List all AI-capable nodes (ANY node can be used as AI tool!)
|
||||||
- **`get_node_as_tool_info`** - Get guidance on using any node as an AI tool
|
- **`get_node_as_tool_info`** - Get guidance on using any node as an AI tool
|
||||||
|
|
||||||
### Advanced Tools
|
### Template Tools
|
||||||
- **`get_node_for_task`** - Pre-configured node settings for common tasks
|
- **`list_templates`** - Browse all templates with descriptions and optional metadata (2,500+ templates)
|
||||||
- **`list_tasks`** - Discover available task templates
|
- **`search_templates`** - Text search across template names and descriptions
|
||||||
- **`validate_node_operation`** - Validate node configurations (operation-aware, profiles support)
|
- **`search_templates_by_metadata`** - Advanced filtering by complexity, setup time, services, audience
|
||||||
- **`validate_node_minimal`** - Quick validation for just required fields
|
- **`list_node_templates`** - Find templates using specific nodes
|
||||||
- **`validate_workflow`** - Complete workflow validation including AI tool connections
|
- **`get_template`** - Get complete workflow JSON for import
|
||||||
|
- **`get_templates_for_task`** - Curated templates for common automation tasks
|
||||||
|
|
||||||
|
### Validation Tools
|
||||||
|
- **`validate_workflow`** - Complete workflow validation including **AI Agent validation** (NEW in v2.17.0!)
|
||||||
|
- Detects missing language model connections
|
||||||
|
- Validates AI tool connections (no false warnings)
|
||||||
|
- Enforces streaming mode constraints
|
||||||
|
- Checks memory and output parser configurations
|
||||||
- **`validate_workflow_connections`** - Check workflow structure and AI tool connections
|
- **`validate_workflow_connections`** - Check workflow structure and AI tool connections
|
||||||
- **`validate_workflow_expressions`** - Validate n8n expressions including $fromAI()
|
- **`validate_workflow_expressions`** - Validate n8n expressions including $fromAI()
|
||||||
|
- **`validate_node_operation`** - Validate node configurations (operation-aware, profiles support)
|
||||||
|
- **`validate_node_minimal`** - Quick validation for just required fields
|
||||||
|
|
||||||
|
### Advanced Tools
|
||||||
- **`get_property_dependencies`** - Analyze property visibility conditions
|
- **`get_property_dependencies`** - Analyze property visibility conditions
|
||||||
- **`get_node_documentation`** - Get parsed documentation from n8n-docs
|
- **`get_node_documentation`** - Get parsed documentation from n8n-docs
|
||||||
- **`get_database_statistics`** - View database metrics and coverage
|
- **`get_database_statistics`** - View database metrics and coverage
|
||||||
@@ -550,6 +801,7 @@ These powerful tools allow you to manage n8n workflows directly from Claude. The
|
|||||||
- **`n8n_delete_workflow`** - Delete workflows permanently
|
- **`n8n_delete_workflow`** - Delete workflows permanently
|
||||||
- **`n8n_list_workflows`** - List workflows with filtering and pagination
|
- **`n8n_list_workflows`** - List workflows with filtering and pagination
|
||||||
- **`n8n_validate_workflow`** - Validate workflows already in n8n by ID (NEW in v2.6.3)
|
- **`n8n_validate_workflow`** - Validate workflows already in n8n by ID (NEW in v2.6.3)
|
||||||
|
- **`n8n_autofix_workflow`** - Automatically fix common workflow errors (NEW in v2.13.0!)
|
||||||
|
|
||||||
#### Execution Management
|
#### Execution Management
|
||||||
- **`n8n_trigger_webhook_workflow`** - Trigger workflows via webhook URL
|
- **`n8n_trigger_webhook_workflow`** - Trigger workflows via webhook URL
|
||||||
@@ -565,14 +817,17 @@ These powerful tools allow you to manage n8n workflows directly from Claude. The
|
|||||||
### Example Usage
|
### Example Usage
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
// Get essentials for quick configuration
|
// Get essentials with real-world examples from templates
|
||||||
get_node_essentials("nodes-base.httpRequest")
|
get_node_essentials({
|
||||||
|
nodeType: "nodes-base.httpRequest",
|
||||||
|
includeExamples: true // Returns top 3 configs from popular templates
|
||||||
|
})
|
||||||
|
|
||||||
// Find nodes for a specific task
|
// Search nodes with configuration examples
|
||||||
search_nodes({ query: "send email gmail" })
|
search_nodes({
|
||||||
|
query: "send email gmail",
|
||||||
// Get pre-configured settings
|
includeExamples: true // Returns top 2 configs per node
|
||||||
get_node_for_task("send_email")
|
})
|
||||||
|
|
||||||
// Validate before deployment
|
// Validate before deployment
|
||||||
validate_node_operation({
|
validate_node_operation({
|
||||||
@@ -663,12 +918,14 @@ npm run dev:http # HTTP dev mode
|
|||||||
|
|
||||||
## 📊 Metrics & Coverage
|
## 📊 Metrics & Coverage
|
||||||
|
|
||||||
Current database coverage (n8n v1.106.3):
|
Current database coverage (n8n v1.113.3):
|
||||||
|
|
||||||
- ✅ **535/535** nodes loaded (100%)
|
- ✅ **536/536** nodes loaded (100%)
|
||||||
- ✅ **528** nodes with properties (98.7%)
|
- ✅ **528** nodes with properties (98.7%)
|
||||||
- ✅ **470** nodes with documentation (88%)
|
- ✅ **470** nodes with documentation (88%)
|
||||||
- ✅ **267** AI-capable tools detected
|
- ✅ **267** AI-capable tools detected
|
||||||
|
- ✅ **2,646** pre-extracted template configurations
|
||||||
|
- ✅ **2,500+** workflow templates available
|
||||||
- ✅ **AI Agent & LangChain nodes** fully documented
|
- ✅ **AI Agent & LangChain nodes** fully documented
|
||||||
- ⚡ **Average response time**: ~12ms
|
- ⚡ **Average response time**: ~12ms
|
||||||
- 💾 **Database size**: ~15MB (optimized)
|
- 💾 **Database size**: ~15MB (optimized)
|
||||||
@@ -708,7 +965,7 @@ docker run --rm ghcr.io/czlonkowski/n8n-mcp:latest --version
|
|||||||
|
|
||||||
## 🧪 Testing
|
## 🧪 Testing
|
||||||
|
|
||||||
The project includes a comprehensive test suite with **1,356 tests** ensuring code quality and reliability:
|
The project includes a comprehensive test suite with **2,883 tests** ensuring code quality and reliability:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Run all tests
|
# Run all tests
|
||||||
@@ -728,9 +985,9 @@ npm run test:bench # Performance benchmarks
|
|||||||
|
|
||||||
### Test Suite Overview
|
### Test Suite Overview
|
||||||
|
|
||||||
- **Total Tests**: 1,356 (100% passing)
|
- **Total Tests**: 2,883 (100% passing)
|
||||||
- **Unit Tests**: 1,107 tests across 44 files
|
- **Unit Tests**: 2,526 tests across 99 files
|
||||||
- **Integration Tests**: 249 tests across 14 files
|
- **Integration Tests**: 357 tests across 20 files
|
||||||
- **Execution Time**: ~2.5 minutes in CI
|
- **Execution Time**: ~2.5 minutes in CI
|
||||||
- **Test Framework**: Vitest (for speed and TypeScript support)
|
- **Test Framework**: Vitest (for speed and TypeScript support)
|
||||||
- **Mocking**: MSW for API mocking, custom mocks for databases
|
- **Mocking**: MSW for API mocking, custom mocks for databases
|
||||||
@@ -744,22 +1001,24 @@ npm run test:bench # Performance benchmarks
|
|||||||
|
|
||||||
### Testing Architecture
|
### Testing Architecture
|
||||||
|
|
||||||
- **Unit Tests**: Isolated component testing with mocks
|
**Total: 3,336 tests** across unit and integration test suites
|
||||||
- Services layer: ~450 tests
|
|
||||||
- Parsers: ~200 tests
|
|
||||||
- Database repositories: ~100 tests
|
|
||||||
- MCP tools: ~180 tests
|
|
||||||
|
|
||||||
- **Integration Tests**: Full system behavior validation
|
- **Unit Tests** (2,766 tests): Isolated component testing with mocks
|
||||||
- MCP Protocol compliance: 72 tests
|
- Services layer: Enhanced validation, property filtering, workflow validation
|
||||||
- Database operations: 89 tests
|
- Parsers: Node parsing, property extraction, documentation mapping
|
||||||
- Error handling: 44 tests
|
- Database: Repositories, adapters, migrations, FTS5 search
|
||||||
- Performance: 44 tests
|
- MCP tools: Tool definitions, documentation system
|
||||||
|
- HTTP server: Multi-tenant support, security, configuration
|
||||||
|
|
||||||
- **Benchmarks**: Performance testing for critical paths
|
- **Integration Tests** (570 tests): Full system behavior validation
|
||||||
- Database queries
|
- **n8n API Integration** (172 tests): All 18 MCP handler tools tested against real n8n instance
|
||||||
- Node loading
|
- Workflow management: Create, read, update, delete, list, validate, autofix
|
||||||
- Search operations
|
- Execution management: Trigger, retrieve, list, delete
|
||||||
|
- System tools: Health check, tool listing, diagnostics
|
||||||
|
- **MCP Protocol** (119 tests): Protocol compliance, session management, error handling
|
||||||
|
- **Database** (226 tests): Repository operations, transactions, performance, FTS5 search
|
||||||
|
- **Templates** (35 tests): Template fetching, storage, metadata operations
|
||||||
|
- **Docker** (18 tests): Configuration, entrypoint, security validation
|
||||||
|
|
||||||
For detailed testing documentation, see [Testing Architecture](./docs/testing-architecture.md).
|
For detailed testing documentation, see [Testing Architecture](./docs/testing-architecture.md).
|
||||||
|
|
||||||
@@ -807,6 +1066,23 @@ See [Automated Release Guide](./docs/AUTOMATED_RELEASES.md) for complete details
|
|||||||
- [Anthropic](https://anthropic.com) for the Model Context Protocol
|
- [Anthropic](https://anthropic.com) for the Model Context Protocol
|
||||||
- All contributors and users of this project
|
- All contributors and users of this project
|
||||||
|
|
||||||
|
### Template Attribution
|
||||||
|
|
||||||
|
All workflow templates in this project are fetched from n8n's public template gallery at [n8n.io/workflows](https://n8n.io/workflows). Each template includes:
|
||||||
|
- Full attribution to the original creator (name and username)
|
||||||
|
- Direct link to the source template on n8n.io
|
||||||
|
- Original workflow ID for reference
|
||||||
|
|
||||||
|
The AI agent instructions in this project contain mandatory attribution requirements. When using any template, the AI will automatically:
|
||||||
|
- Share the template author's name and username
|
||||||
|
- Provide a direct link to the original template on n8n.io
|
||||||
|
- Display attribution in the format: "This workflow is based on a template by **[author]** (@[username]). View the original at: [url]"
|
||||||
|
|
||||||
|
Template creators retain all rights to their workflows. This project indexes templates to improve discoverability through AI assistants. If you're a template creator and have concerns about your template being indexed, please open an issue.
|
||||||
|
|
||||||
|
Special thanks to the prolific template contributors whose work helps thousands of users automate their workflows, including:
|
||||||
|
**David Ashby** (@cfomodz), **Yaron Been** (@yaron-nofluff), **Jimleuk** (@jimleuk), **Davide** (@n3witalia), **David Olusola** (@dae221), **Ranjan Dailata** (@ranjancse), **Airtop** (@cesar-at-airtop), **Joseph LePage** (@joe), **Don Jayamaha Jr** (@don-the-gem-dealer), **Angel Menendez** (@djangelic), and the entire n8n community of creators!
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
|
|||||||
623
TELEMETRY_PRUNING_GUIDE.md
Normal file
623
TELEMETRY_PRUNING_GUIDE.md
Normal file
@@ -0,0 +1,623 @@
|
|||||||
|
# Telemetry Data Pruning & Aggregation Guide
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This guide provides a complete solution for managing n8n-mcp telemetry data in Supabase to stay within the 500 MB free tier limit while preserving valuable insights for product development.
|
||||||
|
|
||||||
|
## Current Situation
|
||||||
|
|
||||||
|
- **Database Size**: 265 MB / 500 MB (53% of limit)
|
||||||
|
- **Growth Rate**: 7.7 MB/day (54 MB/week)
|
||||||
|
- **Time Until Full**: ~17 days
|
||||||
|
- **Total Events**: 641,487 events + 17,247 workflows
|
||||||
|
|
||||||
|
### Storage Breakdown
|
||||||
|
|
||||||
|
| Event Type | Count | Size | % of Total |
|
||||||
|
|------------|-------|------|------------|
|
||||||
|
| `tool_sequence` | 362,704 | 96 MB | 72% |
|
||||||
|
| `tool_used` | 191,938 | 28 MB | 21% |
|
||||||
|
| `validation_details` | 36,280 | 14 MB | 11% |
|
||||||
|
| `workflow_created` | 23,213 | 4.5 MB | 3% |
|
||||||
|
| Others | ~26,000 | ~3 MB | 2% |
|
||||||
|
|
||||||
|
## Solution Strategy
|
||||||
|
|
||||||
|
**Aggregate → Delete → Retain only recent raw events**
|
||||||
|
|
||||||
|
### Expected Results
|
||||||
|
|
||||||
|
| Metric | Before | After | Improvement |
|
||||||
|
|--------|--------|-------|-------------|
|
||||||
|
| Database Size | 265 MB | ~90-120 MB | **55-65% reduction** |
|
||||||
|
| Growth Rate | 7.7 MB/day | ~2-3 MB/day | **60-70% slower** |
|
||||||
|
| Days Until Full | 17 days | **Sustainable** | Never fills |
|
||||||
|
| Free Tier Usage | 53% | ~20-25% | **75-80% headroom** |
|
||||||
|
|
||||||
|
## Implementation Steps
|
||||||
|
|
||||||
|
### Step 1: Execute the SQL Migration
|
||||||
|
|
||||||
|
Open Supabase SQL Editor and run the entire contents of `supabase-telemetry-aggregation.sql`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Copy and paste the entire supabase-telemetry-aggregation.sql file
|
||||||
|
-- Or run it directly from the file
|
||||||
|
```
|
||||||
|
|
||||||
|
This will create:
|
||||||
|
- 5 aggregation tables
|
||||||
|
- Aggregation functions
|
||||||
|
- Automated cleanup function
|
||||||
|
- Monitoring functions
|
||||||
|
- Scheduled cron job (daily at 2 AM UTC)
|
||||||
|
|
||||||
|
### Step 2: Verify Cron Job Setup
|
||||||
|
|
||||||
|
Check that the cron job was created successfully:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- View scheduled cron jobs
|
||||||
|
SELECT
|
||||||
|
jobid,
|
||||||
|
schedule,
|
||||||
|
command,
|
||||||
|
nodename,
|
||||||
|
nodeport,
|
||||||
|
database,
|
||||||
|
username,
|
||||||
|
active
|
||||||
|
FROM cron.job
|
||||||
|
WHERE jobname = 'telemetry-daily-cleanup';
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected output:
|
||||||
|
- Schedule: `0 2 * * *` (daily at 2 AM UTC)
|
||||||
|
- Active: `true`
|
||||||
|
|
||||||
|
### Step 3: Run Initial Emergency Cleanup
|
||||||
|
|
||||||
|
Get immediate space relief by running the emergency cleanup:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- This will aggregate and delete data older than 7 days
|
||||||
|
SELECT * FROM emergency_cleanup();
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected results:
|
||||||
|
```
|
||||||
|
action | rows_deleted | space_freed_mb
|
||||||
|
------------------------------------+--------------+----------------
|
||||||
|
Deleted non-critical events > 7d | ~284,924 | ~52 MB
|
||||||
|
Deleted error events > 14d | ~2,400 | ~0.5 MB
|
||||||
|
Deleted duplicate workflows | ~8,500 | ~11 MB
|
||||||
|
TOTAL (run VACUUM separately) | 0 | ~63.5 MB
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Reclaim Disk Space
|
||||||
|
|
||||||
|
After deletion, reclaim the actual disk space:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Reclaim space from deleted rows
|
||||||
|
VACUUM FULL telemetry_events;
|
||||||
|
VACUUM FULL telemetry_workflows;
|
||||||
|
|
||||||
|
-- Update statistics for query optimization
|
||||||
|
ANALYZE telemetry_events;
|
||||||
|
ANALYZE telemetry_workflows;
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note**: `VACUUM FULL` may take a few minutes and locks the table. Run during off-peak hours if possible.
|
||||||
|
|
||||||
|
### Step 5: Verify Results
|
||||||
|
|
||||||
|
Check the new database size:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM check_database_size();
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected output:
|
||||||
|
```
|
||||||
|
total_size_mb | events_size_mb | workflows_size_mb | aggregates_size_mb | percent_of_limit | days_until_full | status
|
||||||
|
--------------+----------------+-------------------+--------------------+------------------+-----------------+---------
|
||||||
|
202.5 | 85.2 | 35.8 | 12.5 | 40.5 | ~95 | HEALTHY
|
||||||
|
```
|
||||||
|
|
||||||
|
## Daily Operations (Automated)
|
||||||
|
|
||||||
|
Once set up, the system runs automatically:
|
||||||
|
|
||||||
|
1. **Daily at 2 AM UTC**: Cron job runs
|
||||||
|
2. **Aggregation**: Data older than 3 days is aggregated into summary tables
|
||||||
|
3. **Deletion**: Raw events are deleted after aggregation
|
||||||
|
4. **Cleanup**: VACUUM runs to reclaim space
|
||||||
|
5. **Retention**:
|
||||||
|
- High-volume events: 3 days
|
||||||
|
- Error events: 30 days
|
||||||
|
- Aggregated insights: Forever
|
||||||
|
|
||||||
|
## Monitoring Commands
|
||||||
|
|
||||||
|
### Check Database Health
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- View current size and status
|
||||||
|
SELECT * FROM check_database_size();
|
||||||
|
```
|
||||||
|
|
||||||
|
### View Aggregated Insights
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Top tools used daily
|
||||||
|
SELECT
|
||||||
|
aggregation_date,
|
||||||
|
tool_name,
|
||||||
|
usage_count,
|
||||||
|
success_count,
|
||||||
|
error_count,
|
||||||
|
ROUND(100.0 * success_count / NULLIF(usage_count, 0), 1) as success_rate_pct
|
||||||
|
FROM telemetry_tool_usage_daily
|
||||||
|
ORDER BY aggregation_date DESC, usage_count DESC
|
||||||
|
LIMIT 50;
|
||||||
|
|
||||||
|
-- Most common tool sequences
|
||||||
|
SELECT
|
||||||
|
aggregation_date,
|
||||||
|
tool_sequence,
|
||||||
|
occurrence_count,
|
||||||
|
ROUND(avg_sequence_duration_ms, 0) as avg_duration_ms,
|
||||||
|
ROUND(100 * success_rate, 1) as success_rate_pct
|
||||||
|
FROM telemetry_tool_patterns
|
||||||
|
ORDER BY occurrence_count DESC
|
||||||
|
LIMIT 20;
|
||||||
|
|
||||||
|
-- Error patterns over time
|
||||||
|
SELECT
|
||||||
|
aggregation_date,
|
||||||
|
error_type,
|
||||||
|
error_context,
|
||||||
|
occurrence_count,
|
||||||
|
affected_users,
|
||||||
|
sample_error_message
|
||||||
|
FROM telemetry_error_patterns
|
||||||
|
ORDER BY aggregation_date DESC, occurrence_count DESC
|
||||||
|
LIMIT 30;
|
||||||
|
|
||||||
|
-- Workflow creation trends
|
||||||
|
SELECT
|
||||||
|
aggregation_date,
|
||||||
|
complexity,
|
||||||
|
node_count_range,
|
||||||
|
has_trigger,
|
||||||
|
has_webhook,
|
||||||
|
workflow_count,
|
||||||
|
ROUND(avg_node_count, 1) as avg_nodes
|
||||||
|
FROM telemetry_workflow_insights
|
||||||
|
ORDER BY aggregation_date DESC, workflow_count DESC
|
||||||
|
LIMIT 30;
|
||||||
|
|
||||||
|
-- Validation success rates
|
||||||
|
SELECT
|
||||||
|
aggregation_date,
|
||||||
|
validation_type,
|
||||||
|
profile,
|
||||||
|
success_count,
|
||||||
|
failure_count,
|
||||||
|
ROUND(100.0 * success_count / NULLIF(success_count + failure_count, 0), 1) as success_rate_pct,
|
||||||
|
common_failure_reasons
|
||||||
|
FROM telemetry_validation_insights
|
||||||
|
ORDER BY aggregation_date DESC, (success_count + failure_count) DESC
|
||||||
|
LIMIT 30;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Check Cron Job Execution History
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- View recent cron job runs
|
||||||
|
SELECT
|
||||||
|
runid,
|
||||||
|
jobid,
|
||||||
|
database,
|
||||||
|
status,
|
||||||
|
return_message,
|
||||||
|
start_time,
|
||||||
|
end_time
|
||||||
|
FROM cron.job_run_details
|
||||||
|
WHERE jobid = (SELECT jobid FROM cron.job WHERE jobname = 'telemetry-daily-cleanup')
|
||||||
|
ORDER BY start_time DESC
|
||||||
|
LIMIT 10;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Manual Operations
|
||||||
|
|
||||||
|
### Run Cleanup On-Demand
|
||||||
|
|
||||||
|
If you need to run cleanup outside the scheduled time:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Run with default 3-day retention
|
||||||
|
SELECT * FROM run_telemetry_aggregation_and_cleanup(3);
|
||||||
|
VACUUM ANALYZE telemetry_events;
|
||||||
|
|
||||||
|
-- Or with custom retention (e.g., 5 days)
|
||||||
|
SELECT * FROM run_telemetry_aggregation_and_cleanup(5);
|
||||||
|
VACUUM ANALYZE telemetry_events;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Emergency Cleanup (Critical Situations)
|
||||||
|
|
||||||
|
If database is approaching limit and you need immediate relief:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Step 1: Run emergency cleanup (7-day retention)
|
||||||
|
SELECT * FROM emergency_cleanup();
|
||||||
|
|
||||||
|
-- Step 2: Reclaim space aggressively
|
||||||
|
VACUUM FULL telemetry_events;
|
||||||
|
VACUUM FULL telemetry_workflows;
|
||||||
|
ANALYZE telemetry_events;
|
||||||
|
ANALYZE telemetry_workflows;
|
||||||
|
|
||||||
|
-- Step 3: Verify results
|
||||||
|
SELECT * FROM check_database_size();
|
||||||
|
```
|
||||||
|
|
||||||
|
### Adjust Retention Policy
|
||||||
|
|
||||||
|
To change the default 3-day retention period:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Update cron job to use 5-day retention instead
|
||||||
|
SELECT cron.unschedule('telemetry-daily-cleanup');
|
||||||
|
|
||||||
|
SELECT cron.schedule(
|
||||||
|
'telemetry-daily-cleanup',
|
||||||
|
'0 2 * * *', -- Daily at 2 AM UTC
|
||||||
|
$$
|
||||||
|
SELECT run_telemetry_aggregation_and_cleanup(5); -- 5 days instead of 3
|
||||||
|
VACUUM ANALYZE telemetry_events;
|
||||||
|
VACUUM ANALYZE telemetry_workflows;
|
||||||
|
$$
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Data Retention Policies
|
||||||
|
|
||||||
|
### Raw Events Retention
|
||||||
|
|
||||||
|
| Event Type | Retention | Reason |
|
||||||
|
|------------|-----------|--------|
|
||||||
|
| `tool_sequence` | 3 days | High volume, low long-term value |
|
||||||
|
| `tool_used` | 3 days | High volume, aggregated daily |
|
||||||
|
| `validation_details` | 3 days | Aggregated into insights |
|
||||||
|
| `workflow_created` | 3 days | Aggregated into patterns |
|
||||||
|
| `session_start` | 3 days | Operational data only |
|
||||||
|
| `search_query` | 3 days | Operational data only |
|
||||||
|
| `error_occurred` | **30 days** | Extended for debugging |
|
||||||
|
| `workflow_validation_failed` | 3 days | Captured in aggregates |
|
||||||
|
|
||||||
|
### Aggregated Data Retention
|
||||||
|
|
||||||
|
All aggregated data is kept **indefinitely**:
|
||||||
|
- Daily tool usage statistics
|
||||||
|
- Tool sequence patterns
|
||||||
|
- Workflow creation trends
|
||||||
|
- Error patterns and frequencies
|
||||||
|
- Validation success rates
|
||||||
|
|
||||||
|
### Workflow Retention
|
||||||
|
|
||||||
|
- **Unique workflows**: Kept indefinitely (one per unique hash)
|
||||||
|
- **Duplicate workflows**: Deleted after 3 days
|
||||||
|
- **Workflow metadata**: Aggregated into daily insights
|
||||||
|
|
||||||
|
## Intelligence Preserved
|
||||||
|
|
||||||
|
Even after aggressive pruning, you still have access to:
|
||||||
|
|
||||||
|
### Long-term Product Insights
|
||||||
|
- Which tools are most/least used over time
|
||||||
|
- Tool usage trends and adoption curves
|
||||||
|
- Common workflow patterns and complexities
|
||||||
|
- Error frequencies and types across versions
|
||||||
|
- Validation failure patterns
|
||||||
|
|
||||||
|
### Development Intelligence
|
||||||
|
- Feature adoption rates (by day/week/month)
|
||||||
|
- Pain points (high error rates, validation failures)
|
||||||
|
- User behavior patterns (tool sequences, workflow styles)
|
||||||
|
- Version comparison (changes in usage between releases)
|
||||||
|
|
||||||
|
### Recent Debugging Data
|
||||||
|
- Last 3 days of raw events for immediate issues
|
||||||
|
- Last 30 days of error events for bug tracking
|
||||||
|
- Sample error messages for each error type
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Cron Job Not Running
|
||||||
|
|
||||||
|
Check if pg_cron extension is enabled:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Enable pg_cron
|
||||||
|
CREATE EXTENSION IF NOT EXISTS pg_cron;
|
||||||
|
|
||||||
|
-- Verify it's enabled
|
||||||
|
SELECT * FROM pg_extension WHERE extname = 'pg_cron';
|
||||||
|
```
|
||||||
|
|
||||||
|
### Aggregation Functions Failing
|
||||||
|
|
||||||
|
Check for errors in cron job execution:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- View error messages
|
||||||
|
SELECT
|
||||||
|
status,
|
||||||
|
return_message,
|
||||||
|
start_time
|
||||||
|
FROM cron.job_run_details
|
||||||
|
WHERE jobid = (SELECT jobid FROM cron.job WHERE jobname = 'telemetry-daily-cleanup')
|
||||||
|
AND status = 'failed'
|
||||||
|
ORDER BY start_time DESC;
|
||||||
|
```
|
||||||
|
|
||||||
|
### VACUUM Not Reclaiming Space
|
||||||
|
|
||||||
|
If `VACUUM ANALYZE` isn't reclaiming enough space, use `VACUUM FULL`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- More aggressive space reclamation (locks table)
|
||||||
|
VACUUM FULL telemetry_events;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Database Still Growing Too Fast
|
||||||
|
|
||||||
|
Reduce retention period further:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Change to 2-day retention (more aggressive)
|
||||||
|
SELECT * FROM run_telemetry_aggregation_and_cleanup(2);
|
||||||
|
```
|
||||||
|
|
||||||
|
Or delete more event types:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Delete additional low-value events
|
||||||
|
DELETE FROM telemetry_events
|
||||||
|
WHERE created_at < NOW() - INTERVAL '3 days'
|
||||||
|
AND event IN ('session_start', 'search_query', 'diagnostic_completed', 'health_check_completed');
|
||||||
|
```
|
||||||
|
|
||||||
|
## Performance Considerations
|
||||||
|
|
||||||
|
### Cron Job Execution Time
|
||||||
|
|
||||||
|
The daily cleanup typically takes:
|
||||||
|
- **Aggregation**: 30-60 seconds
|
||||||
|
- **Deletion**: 15-30 seconds
|
||||||
|
- **VACUUM**: 2-5 minutes
|
||||||
|
- **Total**: ~3-7 minutes
|
||||||
|
|
||||||
|
### Query Performance
|
||||||
|
|
||||||
|
All aggregation tables have indexes on:
|
||||||
|
- Date columns (for time-series queries)
|
||||||
|
- Lookup columns (tool_name, error_type, etc.)
|
||||||
|
- User columns (for user-specific analysis)
|
||||||
|
|
||||||
|
### Lock Considerations
|
||||||
|
|
||||||
|
- `VACUUM ANALYZE`: Minimal locking, safe during operation
|
||||||
|
- `VACUUM FULL`: Locks table, run during off-peak hours
|
||||||
|
- Aggregation functions: Read-only queries, no locking
|
||||||
|
|
||||||
|
## Customization
|
||||||
|
|
||||||
|
### Add Custom Aggregations
|
||||||
|
|
||||||
|
To track additional metrics, create new aggregation tables:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Example: Session duration aggregation
|
||||||
|
CREATE TABLE telemetry_session_duration_daily (
|
||||||
|
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||||
|
aggregation_date DATE NOT NULL,
|
||||||
|
avg_duration_seconds NUMERIC,
|
||||||
|
median_duration_seconds NUMERIC,
|
||||||
|
max_duration_seconds NUMERIC,
|
||||||
|
session_count INTEGER,
|
||||||
|
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||||
|
UNIQUE(aggregation_date)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Add to cleanup function
|
||||||
|
-- (modify run_telemetry_aggregation_and_cleanup)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Modify Retention Policies
|
||||||
|
|
||||||
|
Edit the `run_telemetry_aggregation_and_cleanup` function to adjust retention by event type:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Keep validation_details for 7 days instead of 3
|
||||||
|
DELETE FROM telemetry_events
|
||||||
|
WHERE created_at < (NOW() - INTERVAL '7 days')
|
||||||
|
AND event = 'validation_details';
|
||||||
|
```
|
||||||
|
|
||||||
|
### Change Cron Schedule
|
||||||
|
|
||||||
|
Adjust the execution time if needed:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Run at different time (e.g., 3 AM UTC)
|
||||||
|
SELECT cron.schedule(
|
||||||
|
'telemetry-daily-cleanup',
|
||||||
|
'0 3 * * *', -- 3 AM instead of 2 AM
|
||||||
|
$$ SELECT run_telemetry_aggregation_and_cleanup(3); VACUUM ANALYZE telemetry_events; $$
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Run twice daily (2 AM and 2 PM)
|
||||||
|
SELECT cron.schedule(
|
||||||
|
'telemetry-cleanup-morning',
|
||||||
|
'0 2 * * *',
|
||||||
|
$$ SELECT run_telemetry_aggregation_and_cleanup(3); $$
|
||||||
|
);
|
||||||
|
|
||||||
|
SELECT cron.schedule(
|
||||||
|
'telemetry-cleanup-afternoon',
|
||||||
|
'0 14 * * *',
|
||||||
|
$$ SELECT run_telemetry_aggregation_and_cleanup(3); $$
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Backup & Recovery
|
||||||
|
|
||||||
|
### Before Running Emergency Cleanup
|
||||||
|
|
||||||
|
Create a backup of aggregation queries:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Export aggregated data to CSV or backup tables
|
||||||
|
CREATE TABLE telemetry_tool_usage_backup AS
|
||||||
|
SELECT * FROM telemetry_tool_usage_daily;
|
||||||
|
|
||||||
|
CREATE TABLE telemetry_patterns_backup AS
|
||||||
|
SELECT * FROM telemetry_tool_patterns;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Restore Deleted Data
|
||||||
|
|
||||||
|
Raw event data cannot be restored after deletion. However, aggregated insights are preserved indefinitely.
|
||||||
|
|
||||||
|
To prevent accidental data loss:
|
||||||
|
1. Test cleanup functions on staging first
|
||||||
|
2. Review `check_database_size()` before running emergency cleanup
|
||||||
|
3. Start with longer retention periods (7 days) and reduce gradually
|
||||||
|
4. Monitor aggregated data quality for 1-2 weeks
|
||||||
|
|
||||||
|
## Monitoring Dashboard Queries
|
||||||
|
|
||||||
|
### Weekly Growth Report
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Database growth over last 7 days
|
||||||
|
SELECT
|
||||||
|
DATE(created_at) as date,
|
||||||
|
COUNT(*) as events_created,
|
||||||
|
COUNT(DISTINCT event) as event_types,
|
||||||
|
COUNT(DISTINCT user_id) as active_users,
|
||||||
|
ROUND(SUM(pg_column_size(telemetry_events.*))::NUMERIC / 1024 / 1024, 2) as size_mb
|
||||||
|
FROM telemetry_events
|
||||||
|
WHERE created_at >= NOW() - INTERVAL '7 days'
|
||||||
|
GROUP BY DATE(created_at)
|
||||||
|
ORDER BY date DESC;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Storage Efficiency Report
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Compare raw vs aggregated storage
|
||||||
|
SELECT
|
||||||
|
'Raw Events (last 3 days)' as category,
|
||||||
|
COUNT(*) as row_count,
|
||||||
|
pg_size_pretty(pg_total_relation_size('telemetry_events')) as table_size
|
||||||
|
FROM telemetry_events
|
||||||
|
WHERE created_at >= NOW() - INTERVAL '3 days'
|
||||||
|
|
||||||
|
UNION ALL
|
||||||
|
|
||||||
|
SELECT
|
||||||
|
'Aggregated Insights (all time)',
|
||||||
|
(SELECT COUNT(*) FROM telemetry_tool_usage_daily) +
|
||||||
|
(SELECT COUNT(*) FROM telemetry_tool_patterns) +
|
||||||
|
(SELECT COUNT(*) FROM telemetry_workflow_insights) +
|
||||||
|
(SELECT COUNT(*) FROM telemetry_error_patterns) +
|
||||||
|
(SELECT COUNT(*) FROM telemetry_validation_insights),
|
||||||
|
pg_size_pretty(
|
||||||
|
pg_total_relation_size('telemetry_tool_usage_daily') +
|
||||||
|
pg_total_relation_size('telemetry_tool_patterns') +
|
||||||
|
pg_total_relation_size('telemetry_workflow_insights') +
|
||||||
|
pg_total_relation_size('telemetry_error_patterns') +
|
||||||
|
pg_total_relation_size('telemetry_validation_insights')
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Top Events by Size
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Which event types consume most space
|
||||||
|
SELECT
|
||||||
|
event,
|
||||||
|
COUNT(*) as event_count,
|
||||||
|
pg_size_pretty(SUM(pg_column_size(telemetry_events.*))::BIGINT) as total_size,
|
||||||
|
pg_size_pretty(AVG(pg_column_size(telemetry_events.*))::BIGINT) as avg_size_per_event,
|
||||||
|
ROUND(100.0 * COUNT(*) / SUM(COUNT(*)) OVER (), 2) as pct_of_events
|
||||||
|
FROM telemetry_events
|
||||||
|
GROUP BY event
|
||||||
|
ORDER BY SUM(pg_column_size(telemetry_events.*)) DESC;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Success Metrics
|
||||||
|
|
||||||
|
Track these metrics weekly to ensure the system is working:
|
||||||
|
|
||||||
|
### Target Metrics (After Implementation)
|
||||||
|
|
||||||
|
- ✅ Database size: **< 150 MB** (< 30% of limit)
|
||||||
|
- ✅ Growth rate: **< 3 MB/day** (sustainable)
|
||||||
|
- ✅ Raw event retention: **3 days** (configurable)
|
||||||
|
- ✅ Aggregated data: **All-time insights available**
|
||||||
|
- ✅ Cron job success rate: **> 95%**
|
||||||
|
- ✅ Query performance: **< 500ms for aggregated queries**
|
||||||
|
|
||||||
|
### Review Schedule
|
||||||
|
|
||||||
|
- **Daily**: Check `check_database_size()` status
|
||||||
|
- **Weekly**: Review aggregated insights and growth trends
|
||||||
|
- **Monthly**: Analyze cron job success rate and adjust retention if needed
|
||||||
|
- **After each release**: Compare usage patterns to previous version
|
||||||
|
|
||||||
|
## Quick Reference
|
||||||
|
|
||||||
|
### Essential Commands
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Check database health
|
||||||
|
SELECT * FROM check_database_size();
|
||||||
|
|
||||||
|
-- View recent aggregated insights
|
||||||
|
SELECT * FROM telemetry_tool_usage_daily ORDER BY aggregation_date DESC LIMIT 10;
|
||||||
|
|
||||||
|
-- Run manual cleanup (3-day retention)
|
||||||
|
SELECT * FROM run_telemetry_aggregation_and_cleanup(3);
|
||||||
|
VACUUM ANALYZE telemetry_events;
|
||||||
|
|
||||||
|
-- Emergency cleanup (7-day retention)
|
||||||
|
SELECT * FROM emergency_cleanup();
|
||||||
|
VACUUM FULL telemetry_events;
|
||||||
|
|
||||||
|
-- View cron job status
|
||||||
|
SELECT * FROM cron.job WHERE jobname = 'telemetry-daily-cleanup';
|
||||||
|
|
||||||
|
-- View cron execution history
|
||||||
|
SELECT * FROM cron.job_run_details
|
||||||
|
WHERE jobid = (SELECT jobid FROM cron.job WHERE jobname = 'telemetry-daily-cleanup')
|
||||||
|
ORDER BY start_time DESC LIMIT 5;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Support
|
||||||
|
|
||||||
|
If you encounter issues:
|
||||||
|
|
||||||
|
1. Check the troubleshooting section above
|
||||||
|
2. Review cron job execution logs
|
||||||
|
3. Verify pg_cron extension is enabled
|
||||||
|
4. Test aggregation functions manually
|
||||||
|
5. Check Supabase dashboard for errors
|
||||||
|
|
||||||
|
For questions or improvements, refer to the main project documentation.
|
||||||
BIN
data/nodes.db
BIN
data/nodes.db
Binary file not shown.
0
data/templates.db
Normal file
0
data/templates.db
Normal file
@@ -23,7 +23,11 @@ services:
|
|||||||
# Database
|
# Database
|
||||||
NODE_DB_PATH: ${NODE_DB_PATH:-/app/data/nodes.db}
|
NODE_DB_PATH: ${NODE_DB_PATH:-/app/data/nodes.db}
|
||||||
REBUILD_ON_START: ${REBUILD_ON_START:-false}
|
REBUILD_ON_START: ${REBUILD_ON_START:-false}
|
||||||
|
|
||||||
|
# Telemetry: Anonymous usage statistics are ENABLED by default
|
||||||
|
# To opt-out, uncomment and set to 'true':
|
||||||
|
# N8N_MCP_TELEMETRY_DISABLED: ${N8N_MCP_TELEMETRY_DISABLED:-true}
|
||||||
|
|
||||||
# Optional: n8n API configuration (enables 16 additional management tools)
|
# Optional: n8n API configuration (enables 16 additional management tools)
|
||||||
# Uncomment and configure to enable n8n workflow management
|
# Uncomment and configure to enable n8n workflow management
|
||||||
# N8N_API_URL: ${N8N_API_URL}
|
# N8N_API_URL: ${N8N_API_URL}
|
||||||
|
|||||||
@@ -5,7 +5,448 @@ All notable changes to this project will be documented in this file.
|
|||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased] - Phase 0: Connection Operations Critical Fixes
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **🐛 CRITICAL: Fixed `addConnection` sourceIndex handling (Issue #272, discovered in hands-on testing)**
|
||||||
|
- Multi-output nodes (IF, Switch) now work correctly with sourceIndex parameter
|
||||||
|
- Changed from `||` to `??` operator to properly handle explicit 0 values
|
||||||
|
- Added defensive array validation before accessing indices
|
||||||
|
- Improves rating from 3/10 to 8/10 for multi-output node scenarios
|
||||||
|
- **Impact**: IF nodes, Switch nodes, and all conditional routing now reliable
|
||||||
|
|
||||||
|
- **🐛 CRITICAL: Added runtime validation for `updateConnection` (Issue #272, #204)**
|
||||||
|
- Prevents server crashes when `updates` object is missing
|
||||||
|
- Provides helpful error message with:
|
||||||
|
- Clear explanation of what's wrong
|
||||||
|
- Correct format example
|
||||||
|
- Suggestion to use removeConnection + addConnection for rewiring
|
||||||
|
- Validates `updates` is an object, not string or other type
|
||||||
|
- **Impact**: No more cryptic "Cannot read properties of undefined" crashes
|
||||||
|
|
||||||
|
### Enhanced
|
||||||
|
- **Error Messages**: `updateConnection` errors now include actionable guidance
|
||||||
|
- Example format shown in error
|
||||||
|
- Alternative approaches suggested (removeConnection + addConnection)
|
||||||
|
- Clear explanation that updateConnection modifies properties, not targets
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
- Added 8 comprehensive tests for Phase 0 fixes
|
||||||
|
- 2 tests for updateConnection validation (missing updates, invalid type)
|
||||||
|
- 5 tests for sourceIndex handling (IF nodes, parallel execution, Switch nodes, explicit 0)
|
||||||
|
- 1 test for complex multi-output routing scenarios
|
||||||
|
- All 126 existing tests still passing
|
||||||
|
|
||||||
|
### Documentation
|
||||||
|
- Updated tool documentation to clarify:
|
||||||
|
- `addConnection` now properly handles sourceIndex (Phase 0 fix noted)
|
||||||
|
- `updateConnection` REQUIRES 'updates' object (Phase 0 validation noted)
|
||||||
|
- Added pitfalls about updateConnection limitations
|
||||||
|
- Clarified that updateConnection modifies properties, NOT connection targets
|
||||||
|
|
||||||
|
### Developer Experience
|
||||||
|
- More defensive programming throughout connection operations
|
||||||
|
- Better use of nullish coalescing (??) vs. logical OR (||)
|
||||||
|
- Clear inline comments explaining expected behavior
|
||||||
|
- Improved type safety with runtime guards
|
||||||
|
|
||||||
|
### References
|
||||||
|
- Comprehensive analysis: `docs/local/connection-operations-deep-dive-and-improvement-plan.md`
|
||||||
|
- Based on hands-on testing with n8n-mcp-tester agent
|
||||||
|
- Overall experience rating improved from 4.5/10 to estimated 6/10
|
||||||
|
|
||||||
|
## [2.14.4] - 2025-09-30
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Workflow Cleanup Operations**: Two new operations for `n8n_update_partial_workflow` to handle broken workflow recovery
|
||||||
|
- `cleanStaleConnections`: Automatically removes all connections referencing non-existent nodes
|
||||||
|
- Essential after node renames or deletions that leave dangling connection references
|
||||||
|
- Supports `dryRun: true` mode to preview what would be removed
|
||||||
|
- Removes both source and target stale connections
|
||||||
|
- `replaceConnections`: Replace entire connections object in a single operation
|
||||||
|
- Faster than crafting many individual connection operations
|
||||||
|
- Useful for bulk connection rewiring
|
||||||
|
|
||||||
|
- **Graceful Error Handling for Connection Operations**: Enhanced `removeConnection` operation
|
||||||
|
- New `ignoreErrors` flag: When `true`, operation succeeds even if connection doesn't exist
|
||||||
|
- Perfect for cleanup scenarios where you're not sure if connections exist
|
||||||
|
- Maintains backwards compatibility (defaults to `false` for strict validation)
|
||||||
|
|
||||||
|
- **Best-Effort Mode**: New `continueOnError` mode for `WorkflowDiffRequest`
|
||||||
|
- Apply valid operations even if some fail
|
||||||
|
- Returns detailed results with `applied` and `failed` operation indices
|
||||||
|
- Breaks atomic guarantees intentionally for bulk cleanup scenarios
|
||||||
|
- Maintains atomic mode as default for safety
|
||||||
|
|
||||||
|
### Enhanced
|
||||||
|
- **Tool Documentation**: Updated `n8n_update_partial_workflow` documentation
|
||||||
|
- Added examples for cleanup scenarios
|
||||||
|
- Documented new operation types and modes
|
||||||
|
- Added best practices for workflow recovery
|
||||||
|
- Clarified atomic vs. best-effort behavior
|
||||||
|
|
||||||
|
- **Type System**: Extended workflow diff types
|
||||||
|
- Added `CleanStaleConnectionsOperation` interface
|
||||||
|
- Added `ReplaceConnectionsOperation` interface
|
||||||
|
- Extended `WorkflowDiffResult` with `applied`, `failed`, and `staleConnectionsRemoved` fields
|
||||||
|
- Updated type guards for new connection operations
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
- Added comprehensive test suite for v2.14.4 features
|
||||||
|
- 15 new tests covering all new operations and modes
|
||||||
|
- Tests for cleanStaleConnections with various stale scenarios
|
||||||
|
- Tests for replaceConnections validation
|
||||||
|
- Tests for ignoreErrors flag behavior
|
||||||
|
- Tests for continueOnError mode with mixed success/failure
|
||||||
|
- Backwards compatibility verification tests
|
||||||
|
|
||||||
|
### Impact
|
||||||
|
- **Time Saved**: Reduces broken workflow fix time from 10-15 minutes to 30 seconds
|
||||||
|
- **Token Efficiency**: `cleanStaleConnections` is 1 operation vs 10+ manual operations
|
||||||
|
- **User Experience**: Dramatically improved workflow recovery capabilities
|
||||||
|
- **Backwards Compatibility**: 100% - all additions are optional and default to existing behavior
|
||||||
|
|
||||||
|
## [2.13.2] - 2025-01-24
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Operation and Resource Validation with Intelligent Suggestions**: New similarity services for n8n node configuration validation
|
||||||
|
- `OperationSimilarityService`: Validates operations and suggests similar alternatives using Levenshtein distance and pattern matching
|
||||||
|
- `ResourceSimilarityService`: Validates resources with automatic plural/singular conversion and typo detection
|
||||||
|
- Provides "Did you mean...?" suggestions when invalid operations or resources are used
|
||||||
|
- Example: `operation: "listFiles"` suggests `"search"` for Google Drive nodes
|
||||||
|
- Example: `resource: "files"` suggests singular `"file"` with 95% confidence
|
||||||
|
- Confidence-based suggestions (minimum 30% threshold) with contextual fix messages
|
||||||
|
- Resource-aware operation filtering ensures suggestions are contextually appropriate
|
||||||
|
- 5-minute cache duration for performance optimization
|
||||||
|
- Integrated into `EnhancedConfigValidator` for seamless validation flow
|
||||||
|
|
||||||
|
- **Custom Error Handling**: New `ValidationServiceError` class for better error management
|
||||||
|
- Proper error chaining with cause tracking
|
||||||
|
- Specialized factory methods for common error scenarios
|
||||||
|
- Type-safe error propagation throughout the validation pipeline
|
||||||
|
|
||||||
|
### Enhanced
|
||||||
|
- **Code Quality and Security Improvements** (based on code review feedback):
|
||||||
|
- Safe JSON parsing with try-catch error boundaries
|
||||||
|
- Type guards for safe property access (`getOperationValue`, `getResourceValue`)
|
||||||
|
- Memory leak prevention with periodic cache cleanup
|
||||||
|
- Performance optimization with early termination for exact matches
|
||||||
|
- Replaced magic numbers with named constants for better maintainability
|
||||||
|
- Comprehensive JSDoc documentation for all public methods
|
||||||
|
- Improved confidence calculation for typos and transpositions
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **Test Compatibility**: Updated test expectations to correctly handle exact match scenarios
|
||||||
|
- **Cache Management**: Fixed cache cleanup to prevent unbounded memory growth
|
||||||
|
- **Validation Deduplication**: Enhanced config validator now properly replaces base validator errors with detailed suggestions
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
- Added comprehensive test coverage for similarity services (37 new tests)
|
||||||
|
- All unit tests passing with proper edge case handling
|
||||||
|
- Integration confirmed via n8n-mcp-tester agent validation
|
||||||
|
|
||||||
|
## [2.13.1] - 2025-01-24
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- **Removed 5-operation limit from n8n_update_partial_workflow**: The workflow diff engine now supports unlimited operations per request
|
||||||
|
- Previously limited to 5 operations for "transactional integrity"
|
||||||
|
- Analysis revealed the limit was unnecessary - the clone-validate-apply pattern already ensures atomicity
|
||||||
|
- All operations are validated before any are applied, maintaining data integrity
|
||||||
|
- Enables complex workflow refactoring in single API calls
|
||||||
|
- Updated documentation and examples to demonstrate large batch operations (26+ operations)
|
||||||
|
|
||||||
|
## [2.13.0] - 2025-01-24
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Webhook Path Autofixer**: Automatically generates UUIDs for webhook nodes missing path configuration
|
||||||
|
- Generates unique UUID for both `path` parameter and `webhookId` field
|
||||||
|
- Conditionally updates typeVersion to 2.1 only when < 2.1 to ensure compatibility
|
||||||
|
- High confidence fix (95%) as UUID generation is deterministic
|
||||||
|
- Resolves webhook nodes showing "?" in the n8n UI
|
||||||
|
|
||||||
|
- **Enhanced Node Type Suggestions**: Intelligent node type correction with similarity matching
|
||||||
|
- Multi-factor scoring system: name similarity, category match, package match, pattern match
|
||||||
|
- Handles deprecated package prefixes (n8n-nodes-base. → nodes-base.)
|
||||||
|
- Corrects capitalization mistakes (HttpRequest → httpRequest)
|
||||||
|
- Suggests correct packages (nodes-base.openai → nodes-langchain.openAi)
|
||||||
|
- Only auto-fixes suggestions with ≥90% confidence
|
||||||
|
- 5-minute cache for performance optimization
|
||||||
|
|
||||||
|
- **n8n_autofix_workflow Tool**: New MCP tool for automatic workflow error correction
|
||||||
|
- Comprehensive documentation with examples and best practices
|
||||||
|
- Supports 5 fix types: expression-format, typeversion-correction, error-output-config, node-type-correction, webhook-missing-path
|
||||||
|
- Confidence-based system (high/medium/low) for safe fixes
|
||||||
|
- Preview mode to review changes before applying
|
||||||
|
- Integrated with workflow validation pipeline
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **Security**: Eliminated ReDoS vulnerability in NodeSimilarityService
|
||||||
|
- Replaced all regex patterns with string-based matching
|
||||||
|
- No performance impact while maintaining accuracy
|
||||||
|
|
||||||
|
- **Performance**: Optimized similarity matching algorithms
|
||||||
|
- Levenshtein distance algorithm optimized from O(m*n) space to O(n)
|
||||||
|
- Added early termination for performance improvement
|
||||||
|
- Cache invalidation with version tracking prevents memory leaks
|
||||||
|
|
||||||
|
- **Code Quality**: Improved maintainability and type safety
|
||||||
|
- Extracted magic numbers into named constants
|
||||||
|
- Added proper type guards for runtime safety
|
||||||
|
- Created centralized node-type-utils for consistent type normalization
|
||||||
|
- Fixed silent failures in setNestedValue operations
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Template sanitizer now includes defensive null checks for runtime safety
|
||||||
|
- Workflow validator uses centralized type normalization utility
|
||||||
|
|
||||||
|
## [2.12.2] - 2025-01-22
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Updated n8n dependencies to latest versions:
|
||||||
|
- n8n: 1.111.0 → 1.112.3
|
||||||
|
- n8n-core: 1.110.0 → 1.111.0
|
||||||
|
- n8n-workflow: 1.108.0 → 1.109.0
|
||||||
|
- @n8n/n8n-nodes-langchain: 1.110.0 → 1.111.1
|
||||||
|
- Rebuilt node database with 536 nodes (438 from n8n-nodes-base, 98 from langchain)
|
||||||
|
|
||||||
|
## [2.12.1] - 2025-01-21
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Comprehensive Expression Format Validation System**: Three-tier validation strategy for n8n expressions
|
||||||
|
- **Universal Expression Validator**: 100% reliable detection of expression format issues
|
||||||
|
- Enforces required `=` prefix for all expressions `{{ }}`
|
||||||
|
- Validates expression syntax (bracket matching, empty expressions)
|
||||||
|
- Detects common mistakes (template literals, nested brackets, double prefixes)
|
||||||
|
- Provides confidence score of 1.0 for universal rules
|
||||||
|
- **Confidence-Based Node-Specific Recommendations**: Intelligent resource locator suggestions
|
||||||
|
- Confidence scoring system (0.0 to 1.0) for field-specific recommendations
|
||||||
|
- High confidence (≥0.8): Exact field matches for known nodes (GitHub owner/repository, Slack channels)
|
||||||
|
- Medium confidence (≥0.5): Field pattern matches (fields ending in Id, Key, Name)
|
||||||
|
- Factors: exact field match, field patterns, value patterns, node category
|
||||||
|
- **Resource Locator Format Detection**: Identifies fields needing `__rl` structure
|
||||||
|
- Validates resource locator mode (id, url, expression, name, list)
|
||||||
|
- Auto-fixes missing prefixes in resource locator values
|
||||||
|
- Provides clear JSON examples showing correct format
|
||||||
|
- **Enhanced Safety Features**:
|
||||||
|
- Recursion depth protection (MAX_RECURSION_DEPTH = 100) prevents infinite loops
|
||||||
|
- Pattern matching precision using exact/prefix matching instead of includes()
|
||||||
|
- Circular reference detection with WeakSet
|
||||||
|
- **Separation of Concerns**: Clean architecture for maintainability
|
||||||
|
- Universal rules separated from node-specific intelligence
|
||||||
|
- Confidence-based application of suggestions
|
||||||
|
- Future-proof design that works with any n8n node
|
||||||
|
|
||||||
|
## [2.12.1] - 2025-09-22
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **Error Output Validation**: Enhanced workflow validator to detect incorrect error output configurations
|
||||||
|
- Detects when multiple nodes are incorrectly placed in the same output array (main[0])
|
||||||
|
- Validates that error handlers are properly connected to main[1] (error output) instead of main[0]
|
||||||
|
- Cross-validates onError property ('continueErrorOutput') matches actual connection structure
|
||||||
|
- Provides clear, actionable error messages with JSON examples showing correct configuration
|
||||||
|
- Uses heuristic detection for error handler nodes (names containing "error", "fail", "catch", etc.)
|
||||||
|
- Added comprehensive test coverage with 16+ test cases
|
||||||
|
|
||||||
|
### Improved
|
||||||
|
- **Validation Messages**: Error messages now include detailed JSON examples showing both incorrect and correct configurations
|
||||||
|
- **Pattern Detection**: Fixed `checkWorkflowPatterns` to check main[1] for error outputs instead of non-existent outputs.error
|
||||||
|
- **Test Coverage**: Added new test file `workflow-validator-error-outputs.test.ts` with extensive error output validation scenarios
|
||||||
|
|
||||||
|
## [2.12.0] - 2025-09-19
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Flexible Instance Configuration**: Complete multi-instance support for serving multiple n8n instances dynamically
|
||||||
|
- New `InstanceContext` interface for runtime configuration without multi-tenancy implications
|
||||||
|
- Dual-mode API client supporting both singleton (env vars) and instance-specific configurations
|
||||||
|
- LRU cache with SHA-256 hashing for secure client management (100 instances, 30-min TTL)
|
||||||
|
- Comprehensive input validation preventing injection attacks and invalid configurations
|
||||||
|
- Session context management in HTTP server for per-session instance configuration
|
||||||
|
- 100% backward compatibility - existing deployments work unchanged
|
||||||
|
- Full test coverage with 83 new tests covering security, caching, and validation
|
||||||
|
|
||||||
|
### Security
|
||||||
|
- **SHA-256 Cache Key Hashing**: All instance identifiers are hashed before caching
|
||||||
|
- **Input Validation**: Comprehensive validation for URLs, API keys, and numeric parameters
|
||||||
|
- **Secure Logging**: Sensitive data never logged, only partial hashes for debugging
|
||||||
|
- **Memory Management**: LRU eviction and TTL prevent unbounded growth
|
||||||
|
- **URL Validation**: Blocks dangerous protocols (file://, javascript://, etc.)
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
- **Efficient Caching**: LRU cache with automatic cleanup reduces API client creation
|
||||||
|
- **Fast Lookups**: SHA-256 hashed keys for O(1) cache access
|
||||||
|
- **Memory Optimized**: Maximum 100 concurrent instances with 30-minute TTL
|
||||||
|
- **Token Savings**: Reuses existing clients instead of recreating
|
||||||
|
|
||||||
|
### Documentation
|
||||||
|
- Added comprehensive [Flexible Instance Configuration Guide](./FLEXIBLE_INSTANCE_CONFIGURATION.md)
|
||||||
|
- Detailed architecture, usage examples, and security considerations
|
||||||
|
- Migration guide for existing deployments
|
||||||
|
- Complete API documentation for InstanceContext
|
||||||
|
|
||||||
|
## [2.11.3] - 2025-09-17
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **n8n_update_partial_workflow Tool**: Fixed critical bug where updateNode and updateConnection operations were using incorrect property name
|
||||||
|
- Changed from `changes` property to `updates` property to match documentation and expected behavior
|
||||||
|
- Resolves issue where AI agents would break workflow connections when updating nodes
|
||||||
|
- Fixes GitHub issues #159 (update_partial_workflow is invalid) and #168 (partial workflow update returns error)
|
||||||
|
- All related tests updated to use correct property name
|
||||||
|
|
||||||
|
## [2.11.2] - 2025-09-16
|
||||||
|
|
||||||
|
### Updated
|
||||||
|
- **n8n Dependencies**: Updated to latest versions for compatibility and new features
|
||||||
|
- n8n: 1.110.1 → 1.111.0
|
||||||
|
- n8n-core: 1.109.0 → 1.110.0
|
||||||
|
- n8n-workflow: 1.107.0 → 1.108.0
|
||||||
|
- @n8n/n8n-nodes-langchain: 1.109.1 → 1.110.0
|
||||||
|
- **Node Database**: Rebuilt with 535 nodes from updated n8n packages
|
||||||
|
- **Templates**: Preserved all 2,598 workflow templates with metadata intact
|
||||||
|
- All critical nodes validated successfully (httpRequest, code, slack, agent)
|
||||||
|
- Test suite: 1,911 tests passing, 5 flaky performance tests failing (99.7% pass rate)
|
||||||
|
|
||||||
|
## [2.11.1] - 2025-09-15
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Optional Fields Parameter for search_templates**: Enhanced search_templates tool with field filtering capability
|
||||||
|
- New optional `fields` parameter accepts an array of field names to include in response
|
||||||
|
- Supported fields: 'id', 'name', 'description', 'author', 'nodes', 'views', 'created', 'url', 'metadata'
|
||||||
|
- Reduces response size by 70-98% when requesting only specific fields (e.g., just id and name)
|
||||||
|
- Maintains full backward compatibility - existing calls without fields parameter work unchanged
|
||||||
|
- Example: `search_templates({query: "slack", fields: ["id", "name"]})` returns minimal data
|
||||||
|
- Significantly improves AI agent performance by reducing token usage
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Fuzzy Node Type Matching for Templates**: Improved template discovery with flexible node type resolution
|
||||||
|
- Templates can now be found using simple node names: `["slack"]` instead of `["n8n-nodes-base.slack"]`
|
||||||
|
- Accepts various input formats: bare names, partial prefixes, and case variations
|
||||||
|
- Automatically expands related node types: `["email"]` finds Gmail, email send, and related templates
|
||||||
|
- `["slack"]` also finds `slackTrigger` templates
|
||||||
|
- Case-insensitive matching: `["Slack"]`, `["WEBHOOK"]`, `["HttpRequest"]` all work
|
||||||
|
- Backward compatible - existing exact formats continue working
|
||||||
|
- Reduces failed queries by approximately 50%
|
||||||
|
- Added `template-node-resolver.ts` utility for node type resolution
|
||||||
|
- Added 23 tests for template node resolution
|
||||||
|
- **Structured Template Metadata System**: Comprehensive metadata for intelligent template discovery
|
||||||
|
- Generated metadata for 2,534 templates (97.5% coverage) using OpenAI's batch API
|
||||||
|
- Rich metadata structure: categories, complexity, use cases, setup time, required services, key features, target audience
|
||||||
|
- New `search_templates_by_metadata` tool for advanced filtering by multiple criteria
|
||||||
|
- Enhanced `list_templates` tool with optional `includeMetadata` parameter
|
||||||
|
- Templates now always include descriptions in list responses
|
||||||
|
- Metadata enables filtering by complexity level (simple/medium/complex)
|
||||||
|
- Filter by estimated setup time ranges (5-480 minutes)
|
||||||
|
- Filter by required external services (OpenAI, Slack, Google, etc.)
|
||||||
|
- Filter by target audience (developers, marketers, analysts, etc.)
|
||||||
|
- Multiple filter combinations supported for precise template discovery
|
||||||
|
- SQLite JSON extraction for efficient metadata queries
|
||||||
|
- Batch processing with OpenAI's gpt-4o-mini model for cost efficiency
|
||||||
|
- Added comprehensive tool documentation for new metadata features
|
||||||
|
- New database columns: metadata_json, metadata_generated_at
|
||||||
|
- Repository methods for metadata search and filtering
|
||||||
|
|
||||||
|
## [2.11.0] - 2025-01-14
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Comprehensive Template Pagination**: All template search and list tools now return paginated responses
|
||||||
|
- Consistent `PaginatedResponse` format with `items`, `total`, `limit`, `offset`, and `hasMore` fields
|
||||||
|
- Customizable limits (1-100) and offset parameters for all template tools
|
||||||
|
- Count methods for accurate pagination information across all template queries
|
||||||
|
- **New `list_templates` Tool**: Efficient browsing of all available templates
|
||||||
|
- Returns minimal data (id, name, views, nodeCount) for quick overview
|
||||||
|
- Supports sorting by views, created_at, or name
|
||||||
|
- Optimized for discovering templates without downloading full workflow data
|
||||||
|
- **Flexible Template Retrieval Modes**: Enhanced `get_template` with three response modes
|
||||||
|
- `nodes_only`: Returns just node types and names (minimal tokens)
|
||||||
|
- `structure`: Returns nodes with positions and connections (moderate detail)
|
||||||
|
- `full`: Returns complete workflow JSON (default, maximum detail)
|
||||||
|
- Reduces token usage by 80-90% in minimal modes
|
||||||
|
|
||||||
|
### Enhanced
|
||||||
|
- **Template Database Compression**: Implemented gzip compression for workflow JSONs
|
||||||
|
- Workflow data compressed from ~75MB to 12.10MB (84% reduction)
|
||||||
|
- Database size reduced from 117MB to 48MB despite 5x more templates
|
||||||
|
- Transparent compression/decompression with base64 encoding
|
||||||
|
- No API changes - compression is handled internally
|
||||||
|
- **Template Quality Filtering**: Automatic filtering of low-quality templates
|
||||||
|
- Templates with ≤10 views are excluded from the database
|
||||||
|
- Expanded coverage from 499 to 2,596 high-quality templates (5x increase)
|
||||||
|
- Filtered 4,505 raw templates down to 2,596 based on popularity
|
||||||
|
- Ensures AI agents work with proven, valuable workflows
|
||||||
|
- **Enhanced Database Statistics**: Template metrics now included
|
||||||
|
- Shows total template count, average/min/max views
|
||||||
|
- Provides complete database overview including template coverage
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
- **Database Optimization**: 59% size reduction while storing 5x more content
|
||||||
|
- Previous: ~40MB database with 499 templates
|
||||||
|
- Current: ~48MB database with 2,596 templates
|
||||||
|
- Without compression would be ~120MB+
|
||||||
|
- **Token Efficiency**: 80-90% reduction in response size for minimal queries
|
||||||
|
- `list_templates`: ~10 tokens per template vs 100+ for full data
|
||||||
|
- `get_template` with `nodes_only`: Returns just essential node information
|
||||||
|
- Pagination prevents overwhelming responses for large result sets
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **Test Suite Compatibility**: Updated all tests for new template system
|
||||||
|
- Fixed parameter validation tests to expect new method signatures
|
||||||
|
- Updated integration tests to use templates with >10 views
|
||||||
|
- Removed redundant test files that were testing at wrong abstraction level
|
||||||
|
- All 1,700+ tests now passing
|
||||||
|
|
||||||
|
## [2.10.9] - 2025-01-09
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- **Dependencies**: Updated n8n packages to 1.110.1
|
||||||
|
- n8n: 1.109.2 → 1.110.1
|
||||||
|
- n8n-core: 1.108.0 → 1.109.0
|
||||||
|
- n8n-workflow: 1.106.0 → 1.107.0
|
||||||
|
- @n8n/n8n-nodes-langchain: 1.108.1 → 1.109.1
|
||||||
|
|
||||||
|
### Updated
|
||||||
|
- **Node Database**: Rebuilt with 536 nodes from updated n8n packages
|
||||||
|
- **Templates**: Refreshed workflow templates database with latest 499 templates from n8n.io
|
||||||
|
|
||||||
|
## [2.10.8] - 2025-09-04
|
||||||
|
|
||||||
|
### Updated
|
||||||
|
- **n8n Dependencies**: Updated to latest versions for compatibility and new features
|
||||||
|
- n8n: 1.107.4 → 1.109.2
|
||||||
|
- @n8n/n8n-nodes-langchain: 1.106.2 → 1.109.1
|
||||||
|
- n8n-nodes-base: 1.106.3 → 1.108.0 (via dependencies)
|
||||||
|
- **Node Database**: Rebuilt with 535 nodes from updated n8n packages
|
||||||
|
- **Node.js Compatibility**: Optimized for Node.js v22.17.0 LTS
|
||||||
|
- Enhanced better-sqlite3 native binary compatibility
|
||||||
|
- Fixed SQL.js fallback mode for environments without native binaries
|
||||||
|
- **CI/CD Improvements**: Fixed Rollup native module compatibility for GitHub Actions
|
||||||
|
- Added explicit platform-specific rollup binaries for cross-platform builds
|
||||||
|
- Resolved npm ci failures in Linux CI environment
|
||||||
|
- Fixed package-lock.json synchronization issues
|
||||||
|
- **Platform Support**: Enhanced cross-platform deployment compatibility
|
||||||
|
- macOS ARM64 and Linux x64 platform binaries included
|
||||||
|
- Improved npm package distribution with proper dependency resolution
|
||||||
|
- All 1,728+ tests passing with updated dependencies
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **CI/CD Pipeline**: Resolved test failures in GitHub Actions
|
||||||
|
- Fixed pyodide version conflicts between langchain dependencies
|
||||||
|
- Regenerated package-lock.json with proper dependency resolution
|
||||||
|
- Fixed Rollup native module loading in Linux CI environment
|
||||||
|
- **Database Compatibility**: Enhanced SQL.js fallback reliability
|
||||||
|
- Improved parameter binding and state management
|
||||||
|
- Fixed statement cleanup to prevent memory leaks
|
||||||
|
- **Deployment Reliability**: Better handling of platform-specific dependencies
|
||||||
|
- npm ci now works consistently across development and CI environments
|
||||||
|
|
||||||
|
## [2.10.5] - 2025-08-20
|
||||||
|
|
||||||
|
### Updated
|
||||||
|
- **n8n Dependencies**: Updated to latest versions for compatibility and new features
|
||||||
|
- n8n: 1.106.3 → 1.107.4
|
||||||
|
- n8n-core: 1.105.3 → 1.106.2
|
||||||
|
- n8n-workflow: 1.103.3 → 1.104.1
|
||||||
|
- @n8n/n8n-nodes-langchain: 1.105.3 → 1.106.2
|
||||||
|
- **Node Database**: Rebuilt with 535 nodes from updated n8n packages
|
||||||
|
- All tests passing with updated dependencies
|
||||||
|
|
||||||
## [2.10.4] - 2025-08-12
|
## [2.10.4] - 2025-08-12
|
||||||
|
|
||||||
@@ -1154,6 +1595,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
- Basic n8n and MCP integration
|
- Basic n8n and MCP integration
|
||||||
- Core workflow automation features
|
- Core workflow automation features
|
||||||
|
|
||||||
|
[2.12.0]: https://github.com/czlonkowski/n8n-mcp/compare/v2.11.3...v2.12.0
|
||||||
|
[2.11.3]: https://github.com/czlonkowski/n8n-mcp/compare/v2.11.2...v2.11.3
|
||||||
|
[2.11.2]: https://github.com/czlonkowski/n8n-mcp/compare/v2.11.1...v2.11.2
|
||||||
|
[2.11.1]: https://github.com/czlonkowski/n8n-mcp/compare/v2.11.0...v2.11.1
|
||||||
|
[2.11.0]: https://github.com/czlonkowski/n8n-mcp/compare/v2.10.9...v2.11.0
|
||||||
|
[2.10.9]: https://github.com/czlonkowski/n8n-mcp/compare/v2.10.8...v2.10.9
|
||||||
|
[2.10.8]: https://github.com/czlonkowski/n8n-mcp/compare/v2.10.5...v2.10.8
|
||||||
|
[2.10.5]: https://github.com/czlonkowski/n8n-mcp/compare/v2.10.4...v2.10.5
|
||||||
[2.10.4]: https://github.com/czlonkowski/n8n-mcp/compare/v2.10.3...v2.10.4
|
[2.10.4]: https://github.com/czlonkowski/n8n-mcp/compare/v2.10.3...v2.10.4
|
||||||
[2.10.3]: https://github.com/czlonkowski/n8n-mcp/compare/v2.10.2...v2.10.3
|
[2.10.3]: https://github.com/czlonkowski/n8n-mcp/compare/v2.10.2...v2.10.3
|
||||||
[2.10.2]: https://github.com/czlonkowski/n8n-mcp/compare/v2.10.1...v2.10.2
|
[2.10.2]: https://github.com/czlonkowski/n8n-mcp/compare/v2.10.1...v2.10.2
|
||||||
|
|||||||
34
docs/CODEX_SETUP.md
Normal file
34
docs/CODEX_SETUP.md
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
# Codex Setup
|
||||||
|
|
||||||
|
Connect n8n-MCP to Codex for enhanced n8n workflow development.
|
||||||
|
|
||||||
|
## Update your Codex configuration
|
||||||
|
|
||||||
|
Go to your Codex settings at `~/.codex/config.toml` and add the following configuration:
|
||||||
|
|
||||||
|
### Basic configuration (documentation tools only):
|
||||||
|
```toml
|
||||||
|
[mcp_servers.n8n]
|
||||||
|
command = "npx"
|
||||||
|
args = ["n8n-mcp"]
|
||||||
|
env = { "MCP_MODE" = "stdio", "LOG_LEVEL" = "error", "DISABLE_CONSOLE_OUTPUT" = "true" }
|
||||||
|
```
|
||||||
|
|
||||||
|
### Full configuration (with n8n management tools):
|
||||||
|
```toml
|
||||||
|
[mcp_servers.n8n]
|
||||||
|
command = "npx"
|
||||||
|
args = ["n8n-mcp"]
|
||||||
|
env = { "MCP_MODE" = "stdio", "LOG_LEVEL" = "error", "DISABLE_CONSOLE_OUTPUT" = "true", "N8N_API_URL" = "https://your-n8n-instance.com", "N8N_API_KEY" = "your-api-key" }
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure to replace `https://your-n8n-instance.com` with your actual n8n URL and `your-api-key` with your n8n API key.
|
||||||
|
|
||||||
|
## Managing Your MCP Server
|
||||||
|
Enter the Codex CLI and use the `/mcp` command to see server status and available tools.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## Project Instructions
|
||||||
|
|
||||||
|
For optimal results, create a `AGENTS.md` file in your project root with the instructions same with [main README's Claude Project Setup section](../README.md#-claude-project-setup).
|
||||||
@@ -65,6 +65,9 @@ docker run -d \
|
|||||||
| `NODE_ENV` | Environment: `development` or `production` | `production` | No |
|
| `NODE_ENV` | Environment: `development` or `production` | `production` | No |
|
||||||
| `LOG_LEVEL` | Logging level: `debug`, `info`, `warn`, `error` | `info` | No |
|
| `LOG_LEVEL` | Logging level: `debug`, `info`, `warn`, `error` | `info` | No |
|
||||||
| `NODE_DB_PATH` | Custom database path (v2.7.16+) | `/app/data/nodes.db` | No |
|
| `NODE_DB_PATH` | Custom database path (v2.7.16+) | `/app/data/nodes.db` | No |
|
||||||
|
| `AUTH_RATE_LIMIT_WINDOW` | Rate limit window in ms (v2.16.3+) | `900000` (15 min) | No |
|
||||||
|
| `AUTH_RATE_LIMIT_MAX` | Max auth attempts per window (v2.16.3+) | `20` | No |
|
||||||
|
| `WEBHOOK_SECURITY_MODE` | SSRF protection: `strict`/`moderate`/`permissive` (v2.16.3+) | `strict` | No |
|
||||||
|
|
||||||
*Either `AUTH_TOKEN` or `AUTH_TOKEN_FILE` must be set for HTTP mode. If both are set, `AUTH_TOKEN` takes precedence.
|
*Either `AUTH_TOKEN` or `AUTH_TOKEN_FILE` must be set for HTTP mode. If both are set, `AUTH_TOKEN` takes precedence.
|
||||||
|
|
||||||
@@ -283,7 +286,36 @@ docker ps --format "table {{.Names}}\t{{.Status}}"
|
|||||||
docker inspect n8n-mcp | jq '.[0].State.Health'
|
docker inspect n8n-mcp | jq '.[0].State.Health'
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔒 Security Considerations
|
## 🔒 Security Features (v2.16.3+)
|
||||||
|
|
||||||
|
### Rate Limiting
|
||||||
|
|
||||||
|
Protects against brute force authentication attacks:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Configure in .env or docker-compose.yml
|
||||||
|
AUTH_RATE_LIMIT_WINDOW=900000 # 15 minutes in milliseconds
|
||||||
|
AUTH_RATE_LIMIT_MAX=20 # 20 attempts per IP per window
|
||||||
|
```
|
||||||
|
|
||||||
|
### SSRF Protection
|
||||||
|
|
||||||
|
Prevents Server-Side Request Forgery when using webhook triggers:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# For production (blocks localhost + private IPs + cloud metadata)
|
||||||
|
WEBHOOK_SECURITY_MODE=strict
|
||||||
|
|
||||||
|
# For local development with local n8n instance
|
||||||
|
WEBHOOK_SECURITY_MODE=moderate
|
||||||
|
|
||||||
|
# For internal testing only (allows private IPs)
|
||||||
|
WEBHOOK_SECURITY_MODE=permissive
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note:** Cloud metadata endpoints (169.254.169.254, metadata.google.internal, etc.) are ALWAYS blocked in all modes.
|
||||||
|
|
||||||
|
## 🔒 Authentication
|
||||||
|
|
||||||
### Authentication
|
### Authentication
|
||||||
|
|
||||||
|
|||||||
@@ -196,6 +196,41 @@ docker ps -a | grep n8n-mcp | grep Exited | awk '{print $1}' | xargs -r docker r
|
|||||||
- Manually clean up containers periodically
|
- Manually clean up containers periodically
|
||||||
- Consider using HTTP mode instead
|
- Consider using HTTP mode instead
|
||||||
|
|
||||||
|
### Webhooks to Local n8n Fail (v2.16.3+)
|
||||||
|
|
||||||
|
**Symptoms:**
|
||||||
|
- `n8n_trigger_webhook_workflow` fails with "SSRF protection" error
|
||||||
|
- Error message: "SSRF protection: Localhost access is blocked"
|
||||||
|
- Webhooks work from n8n UI but not from n8n-MCP
|
||||||
|
|
||||||
|
**Root Cause:** Default strict SSRF protection blocks localhost access to prevent attacks.
|
||||||
|
|
||||||
|
**Solution:** Use moderate security mode for local development
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# For Docker run
|
||||||
|
docker run -d \
|
||||||
|
--name n8n-mcp \
|
||||||
|
-e MCP_MODE=http \
|
||||||
|
-e AUTH_TOKEN=your-token \
|
||||||
|
-e WEBHOOK_SECURITY_MODE=moderate \
|
||||||
|
-p 3000:3000 \
|
||||||
|
ghcr.io/czlonkowski/n8n-mcp:latest
|
||||||
|
|
||||||
|
# For Docker Compose - add to environment:
|
||||||
|
services:
|
||||||
|
n8n-mcp:
|
||||||
|
environment:
|
||||||
|
WEBHOOK_SECURITY_MODE: moderate
|
||||||
|
```
|
||||||
|
|
||||||
|
**Security Modes Explained:**
|
||||||
|
- `strict` (default): Blocks localhost + private IPs + cloud metadata (production)
|
||||||
|
- `moderate`: Allows localhost, blocks private IPs + cloud metadata (local development)
|
||||||
|
- `permissive`: Allows localhost + private IPs, blocks cloud metadata (testing only)
|
||||||
|
|
||||||
|
**Important:** Always use `strict` mode in production. Cloud metadata is blocked in all modes.
|
||||||
|
|
||||||
### n8n API Connection Issues
|
### n8n API Connection Issues
|
||||||
|
|
||||||
**Symptoms:**
|
**Symptoms:**
|
||||||
|
|||||||
3491
docs/FINAL_AI_VALIDATION_SPEC.md
Normal file
3491
docs/FINAL_AI_VALIDATION_SPEC.md
Normal file
File diff suppressed because it is too large
Load Diff
371
docs/FLEXIBLE_INSTANCE_CONFIGURATION.md
Normal file
371
docs/FLEXIBLE_INSTANCE_CONFIGURATION.md
Normal file
@@ -0,0 +1,371 @@
|
|||||||
|
# Flexible Instance Configuration
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Flexible Instance Configuration feature enables n8n-mcp to serve multiple users with different n8n instances dynamically, without requiring separate deployments for each user. This feature is designed for scenarios where n8n-mcp is hosted centrally and needs to connect to different n8n instances based on runtime context.
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Core Components
|
||||||
|
|
||||||
|
1. **InstanceContext Interface** (`src/types/instance-context.ts`)
|
||||||
|
- Runtime configuration container for instance-specific settings
|
||||||
|
- Optional fields for backward compatibility
|
||||||
|
- Comprehensive validation with security checks
|
||||||
|
|
||||||
|
2. **Dual-Mode API Client**
|
||||||
|
- **Singleton Mode**: Uses environment variables (backward compatible)
|
||||||
|
- **Instance Mode**: Uses runtime context for multi-instance support
|
||||||
|
- Automatic fallback between modes
|
||||||
|
|
||||||
|
3. **LRU Cache with Security**
|
||||||
|
- SHA-256 hashed cache keys for security
|
||||||
|
- 30-minute TTL with automatic cleanup
|
||||||
|
- Maximum 100 concurrent instances
|
||||||
|
- Secure dispose callbacks without logging sensitive data
|
||||||
|
|
||||||
|
4. **Session Management**
|
||||||
|
- HTTP server tracks session context
|
||||||
|
- Each session can have different instance configuration
|
||||||
|
- Automatic cleanup on session end
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Environment Variables
|
||||||
|
|
||||||
|
New environment variables for cache configuration:
|
||||||
|
|
||||||
|
- `INSTANCE_CACHE_MAX` - Maximum number of cached instances (default: 100, min: 1, max: 10000)
|
||||||
|
- `INSTANCE_CACHE_TTL_MINUTES` - Cache TTL in minutes (default: 30, min: 1, max: 1440/24 hours)
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```bash
|
||||||
|
# Increase cache size for high-volume deployments
|
||||||
|
export INSTANCE_CACHE_MAX=500
|
||||||
|
export INSTANCE_CACHE_TTL_MINUTES=60
|
||||||
|
```
|
||||||
|
|
||||||
|
### InstanceContext Structure
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
interface InstanceContext {
|
||||||
|
n8nApiUrl?: string; // n8n instance URL
|
||||||
|
n8nApiKey?: string; // API key for authentication
|
||||||
|
n8nApiTimeout?: number; // Request timeout in ms (default: 30000)
|
||||||
|
n8nApiMaxRetries?: number; // Max retry attempts (default: 3)
|
||||||
|
instanceId?: string; // Unique instance identifier
|
||||||
|
sessionId?: string; // Session identifier
|
||||||
|
metadata?: Record<string, any>; // Additional metadata
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Validation Rules
|
||||||
|
|
||||||
|
1. **URL Validation**:
|
||||||
|
- Must be valid HTTP/HTTPS URL
|
||||||
|
- No file://, javascript:, or other dangerous protocols
|
||||||
|
- Proper URL format with protocol and host
|
||||||
|
|
||||||
|
2. **API Key Validation**:
|
||||||
|
- Non-empty string required when provided
|
||||||
|
- No placeholder values (e.g., "YOUR_API_KEY")
|
||||||
|
- Case-insensitive placeholder detection
|
||||||
|
|
||||||
|
3. **Numeric Validation**:
|
||||||
|
- Timeout must be positive number (>0)
|
||||||
|
- Max retries must be non-negative (≥0)
|
||||||
|
- No Infinity or NaN values
|
||||||
|
|
||||||
|
## Usage Examples
|
||||||
|
|
||||||
|
### Basic Usage
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { getN8nApiClient } from './mcp/handlers-n8n-manager';
|
||||||
|
import { InstanceContext } from './types/instance-context';
|
||||||
|
|
||||||
|
// Create context for a specific instance
|
||||||
|
const context: InstanceContext = {
|
||||||
|
n8nApiUrl: 'https://customer1.n8n.cloud',
|
||||||
|
n8nApiKey: 'customer1-api-key',
|
||||||
|
instanceId: 'customer1'
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get client for this instance
|
||||||
|
const client = getN8nApiClient(context);
|
||||||
|
if (client) {
|
||||||
|
// Use client for API operations
|
||||||
|
const workflows = await client.getWorkflows();
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### HTTP Headers for Multi-Tenant Support
|
||||||
|
|
||||||
|
When using the HTTP server mode, clients can pass instance-specific configuration via HTTP headers:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Example curl request with instance headers
|
||||||
|
curl -X POST http://localhost:3000/mcp \
|
||||||
|
-H "Authorization: Bearer your-auth-token" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-N8n-Url: https://instance1.n8n.cloud" \
|
||||||
|
-H "X-N8n-Key: instance1-api-key" \
|
||||||
|
-H "X-Instance-Id: instance-1" \
|
||||||
|
-H "X-Session-Id: session-123" \
|
||||||
|
-d '{"method": "n8n_list_workflows", "params": {}, "id": 1}'
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Supported Headers
|
||||||
|
|
||||||
|
- **X-N8n-Url**: The n8n instance URL (e.g., `https://instance.n8n.cloud`)
|
||||||
|
- **X-N8n-Key**: The API key for authentication with the n8n instance
|
||||||
|
- **X-Instance-Id**: A unique identifier for the instance (optional, for tracking)
|
||||||
|
- **X-Session-Id**: A session identifier (optional, for session tracking)
|
||||||
|
|
||||||
|
#### Header Extraction Logic
|
||||||
|
|
||||||
|
1. If either `X-N8n-Url` or `X-N8n-Key` header is present, an instance context is created
|
||||||
|
2. All headers are extracted and passed to the MCP server
|
||||||
|
3. The server uses the instance-specific configuration instead of environment variables
|
||||||
|
4. If no headers are present, the server falls back to environment variables (backward compatible)
|
||||||
|
|
||||||
|
#### Example: JavaScript Client
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const headers = {
|
||||||
|
'Authorization': 'Bearer your-auth-token',
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'X-N8n-Url': 'https://customer1.n8n.cloud',
|
||||||
|
'X-N8n-Key': 'customer1-api-key',
|
||||||
|
'X-Instance-Id': 'customer-1',
|
||||||
|
'X-Session-Id': 'session-456'
|
||||||
|
};
|
||||||
|
|
||||||
|
const response = await fetch('http://localhost:3000/mcp', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: headers,
|
||||||
|
body: JSON.stringify({
|
||||||
|
method: 'n8n_list_workflows',
|
||||||
|
params: {},
|
||||||
|
id: 1
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
```
|
||||||
|
|
||||||
|
### HTTP Server Integration
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// In HTTP request handler
|
||||||
|
app.post('/mcp', (req, res) => {
|
||||||
|
const context: InstanceContext = {
|
||||||
|
n8nApiUrl: req.headers['x-n8n-url'],
|
||||||
|
n8nApiKey: req.headers['x-n8n-key'],
|
||||||
|
sessionId: req.sessionID
|
||||||
|
};
|
||||||
|
|
||||||
|
// Context passed to handlers
|
||||||
|
const result = await handleRequest(req.body, context);
|
||||||
|
res.json(result);
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Validation Example
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { validateInstanceContext } from './types/instance-context';
|
||||||
|
|
||||||
|
const context: InstanceContext = {
|
||||||
|
n8nApiUrl: 'https://api.n8n.cloud',
|
||||||
|
n8nApiKey: 'valid-key'
|
||||||
|
};
|
||||||
|
|
||||||
|
const validation = validateInstanceContext(context);
|
||||||
|
if (!validation.valid) {
|
||||||
|
console.error('Validation errors:', validation.errors);
|
||||||
|
} else {
|
||||||
|
// Context is valid, proceed
|
||||||
|
const client = getN8nApiClient(context);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Security Features
|
||||||
|
|
||||||
|
### 1. Cache Key Hashing
|
||||||
|
- All cache keys use SHA-256 hashing with memoization
|
||||||
|
- Prevents sensitive data exposure in logs
|
||||||
|
- Example: `sha256(url:key:instance)` → 64-char hex string
|
||||||
|
- Memoization cache limited to 1000 entries
|
||||||
|
|
||||||
|
### 2. Enhanced Input Validation
|
||||||
|
- Field-specific error messages with detailed reasons
|
||||||
|
- URL protocol restrictions (HTTP/HTTPS only)
|
||||||
|
- API key placeholder detection (case-insensitive)
|
||||||
|
- Numeric range validation with specific error messages
|
||||||
|
- Example: "Invalid n8nApiUrl: ftp://example.com - URL must use HTTP or HTTPS protocol"
|
||||||
|
|
||||||
|
### 3. Secure Logging
|
||||||
|
- Only first 8 characters of cache keys logged
|
||||||
|
- No sensitive data in debug logs
|
||||||
|
- URL sanitization (domain only, no paths)
|
||||||
|
- Configuration fallback logging for debugging
|
||||||
|
|
||||||
|
### 4. Memory Management
|
||||||
|
- Configurable LRU cache with automatic eviction
|
||||||
|
- TTL-based expiration (configurable, default 30 minutes)
|
||||||
|
- Dispose callbacks for cleanup
|
||||||
|
- Maximum cache size limits with bounds checking
|
||||||
|
|
||||||
|
### 5. Concurrency Protection
|
||||||
|
- Mutex-based locking for cache operations
|
||||||
|
- Prevents duplicate client creation
|
||||||
|
- Simple lock checking with timeout
|
||||||
|
- Thread-safe cache operations
|
||||||
|
|
||||||
|
## Performance Optimization
|
||||||
|
|
||||||
|
### Cache Strategy
|
||||||
|
- **Max Size**: Configurable via `INSTANCE_CACHE_MAX` (default: 100)
|
||||||
|
- **TTL**: Configurable via `INSTANCE_CACHE_TTL_MINUTES` (default: 30)
|
||||||
|
- **Update on Access**: Age refreshed on each use
|
||||||
|
- **Eviction**: Least Recently Used (LRU) policy
|
||||||
|
- **Memoization**: Hash creation uses memoization for frequently used keys
|
||||||
|
|
||||||
|
### Cache Metrics
|
||||||
|
The system tracks comprehensive metrics:
|
||||||
|
- Cache hits and misses
|
||||||
|
- Hit rate percentage
|
||||||
|
- Eviction count
|
||||||
|
- Current size vs maximum size
|
||||||
|
- Operation timing
|
||||||
|
|
||||||
|
Retrieve metrics using:
|
||||||
|
```typescript
|
||||||
|
import { getInstanceCacheStatistics } from './mcp/handlers-n8n-manager';
|
||||||
|
console.log(getInstanceCacheStatistics());
|
||||||
|
```
|
||||||
|
|
||||||
|
### Benefits
|
||||||
|
- **Performance**: ~12ms average response time
|
||||||
|
- **Memory Efficient**: Minimal footprint per instance
|
||||||
|
- **Thread Safe**: Mutex protection for concurrent operations
|
||||||
|
- **Auto Cleanup**: Unused instances automatically evicted
|
||||||
|
- **No Memory Leaks**: Proper disposal callbacks
|
||||||
|
|
||||||
|
## Backward Compatibility
|
||||||
|
|
||||||
|
The feature maintains 100% backward compatibility:
|
||||||
|
|
||||||
|
1. **Environment Variables Still Work**:
|
||||||
|
- If no context provided, falls back to env vars
|
||||||
|
- Existing deployments continue working unchanged
|
||||||
|
|
||||||
|
2. **Optional Parameters**:
|
||||||
|
- All context fields are optional
|
||||||
|
- Missing fields use defaults or env vars
|
||||||
|
|
||||||
|
3. **API Unchanged**:
|
||||||
|
- Same handler signatures with optional context
|
||||||
|
- No breaking changes to existing code
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
Comprehensive test coverage ensures reliability:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run all flexible instance tests
|
||||||
|
npm test -- tests/unit/flexible-instance-security-advanced.test.ts
|
||||||
|
npm test -- tests/unit/mcp/lru-cache-behavior.test.ts
|
||||||
|
npm test -- tests/unit/types/instance-context-coverage.test.ts
|
||||||
|
npm test -- tests/unit/mcp/handlers-n8n-manager-simple.test.ts
|
||||||
|
```
|
||||||
|
|
||||||
|
### Test Coverage Areas
|
||||||
|
- Input validation edge cases
|
||||||
|
- Cache behavior and eviction
|
||||||
|
- Security (hashing, sanitization)
|
||||||
|
- Session management
|
||||||
|
- Memory leak prevention
|
||||||
|
- Concurrent access patterns
|
||||||
|
|
||||||
|
## Migration Guide
|
||||||
|
|
||||||
|
### For Existing Deployments
|
||||||
|
No changes required - environment variables continue to work.
|
||||||
|
|
||||||
|
### For Multi-Instance Support
|
||||||
|
|
||||||
|
1. **Update HTTP Server** (if using HTTP mode):
|
||||||
|
```typescript
|
||||||
|
// Add context extraction from headers
|
||||||
|
const context = extractInstanceContext(req);
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Pass Context to Handlers**:
|
||||||
|
```typescript
|
||||||
|
// Old way (still works)
|
||||||
|
await handleListWorkflows(params);
|
||||||
|
|
||||||
|
// New way (with instance context)
|
||||||
|
await handleListWorkflows(params, context);
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Configure Clients** to send instance information:
|
||||||
|
```typescript
|
||||||
|
// Client sends instance info in headers
|
||||||
|
headers: {
|
||||||
|
'X-N8n-Url': 'https://instance.n8n.cloud',
|
||||||
|
'X-N8n-Key': 'api-key',
|
||||||
|
'X-Instance-Id': 'customer-123'
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Monitoring
|
||||||
|
|
||||||
|
### Metrics to Track
|
||||||
|
- Cache hit/miss ratio
|
||||||
|
- Instance count in cache
|
||||||
|
- Average TTL utilization
|
||||||
|
- Memory usage per instance
|
||||||
|
- API client creation rate
|
||||||
|
|
||||||
|
### Debug Logging
|
||||||
|
Enable debug logs to monitor cache behavior:
|
||||||
|
```bash
|
||||||
|
LOG_LEVEL=debug npm start
|
||||||
|
```
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
|
||||||
|
1. **Maximum Instances**: 100 concurrent instances (configurable)
|
||||||
|
2. **TTL**: 30-minute cache lifetime (configurable)
|
||||||
|
3. **Memory**: ~1MB per cached instance (estimated)
|
||||||
|
4. **Validation**: Strict validation may reject edge cases
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
1. **Never Log Sensitive Data**: API keys are never logged
|
||||||
|
2. **Hash All Identifiers**: Use SHA-256 for cache keys
|
||||||
|
3. **Validate All Input**: Comprehensive validation before use
|
||||||
|
4. **Limit Resources**: Cache size and TTL limits
|
||||||
|
5. **Clean Up Properly**: Dispose callbacks for resource cleanup
|
||||||
|
|
||||||
|
## Future Enhancements
|
||||||
|
|
||||||
|
Potential improvements for future versions:
|
||||||
|
|
||||||
|
1. **Configurable Cache Settings**: Runtime cache size/TTL configuration
|
||||||
|
2. **Instance Metrics**: Per-instance usage tracking
|
||||||
|
3. **Rate Limiting**: Per-instance rate limits
|
||||||
|
4. **Instance Groups**: Logical grouping of instances
|
||||||
|
5. **Persistent Cache**: Optional Redis/database backing
|
||||||
|
6. **Instance Discovery**: Automatic instance detection
|
||||||
|
|
||||||
|
## Support
|
||||||
|
|
||||||
|
For issues or questions about flexible instance configuration:
|
||||||
|
1. Check validation errors for specific problems
|
||||||
|
2. Enable debug logging for detailed diagnostics
|
||||||
|
3. Review test files for usage examples
|
||||||
|
4. Open an issue on GitHub with details
|
||||||
@@ -73,6 +73,13 @@ PORT=3000
|
|||||||
# Optional: Enable n8n management tools
|
# Optional: Enable n8n management tools
|
||||||
# N8N_API_URL=https://your-n8n-instance.com
|
# N8N_API_URL=https://your-n8n-instance.com
|
||||||
# N8N_API_KEY=your-api-key-here
|
# N8N_API_KEY=your-api-key-here
|
||||||
|
# Security Configuration (v2.16.3+)
|
||||||
|
# Rate limiting (default: 20 attempts per 15 minutes)
|
||||||
|
AUTH_RATE_LIMIT_WINDOW=900000
|
||||||
|
AUTH_RATE_LIMIT_MAX=20
|
||||||
|
# SSRF protection mode (default: strict)
|
||||||
|
# Use 'moderate' for local n8n, 'strict' for production
|
||||||
|
WEBHOOK_SECURITY_MODE=strict
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# 2. Deploy with Docker
|
# 2. Deploy with Docker
|
||||||
@@ -592,6 +599,67 @@ curl -H "Authorization: Bearer $AUTH_TOKEN" \
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## 🔒 Security Features (v2.16.3+)
|
||||||
|
|
||||||
|
### Rate Limiting
|
||||||
|
|
||||||
|
Built-in rate limiting protects authentication endpoints from brute force attacks:
|
||||||
|
|
||||||
|
**Configuration:**
|
||||||
|
```bash
|
||||||
|
# Defaults (15 minutes window, 20 attempts per IP)
|
||||||
|
AUTH_RATE_LIMIT_WINDOW=900000 # milliseconds
|
||||||
|
AUTH_RATE_LIMIT_MAX=20
|
||||||
|
```
|
||||||
|
|
||||||
|
**Features:**
|
||||||
|
- Per-IP rate limiting with configurable window and max attempts
|
||||||
|
- Standard rate limit headers (RateLimit-Limit, RateLimit-Remaining, RateLimit-Reset)
|
||||||
|
- JSON-RPC formatted error responses
|
||||||
|
- Automatic IP tracking behind reverse proxies (requires TRUST_PROXY=1)
|
||||||
|
|
||||||
|
**Behavior:**
|
||||||
|
- First 20 attempts: Return 401 Unauthorized for invalid credentials
|
||||||
|
- Attempts 21+: Return 429 Too Many Requests with Retry-After header
|
||||||
|
- Counter resets after 15 minutes (configurable)
|
||||||
|
|
||||||
|
### SSRF Protection
|
||||||
|
|
||||||
|
Prevents Server-Side Request Forgery attacks when using webhook triggers:
|
||||||
|
|
||||||
|
**Three Security Modes:**
|
||||||
|
|
||||||
|
1. **Strict Mode (default)** - Production deployments
|
||||||
|
```bash
|
||||||
|
WEBHOOK_SECURITY_MODE=strict
|
||||||
|
```
|
||||||
|
- ✅ Block localhost (127.0.0.1, ::1)
|
||||||
|
- ✅ Block private IPs (10.x, 192.168.x, 172.16-31.x)
|
||||||
|
- ✅ Block cloud metadata (169.254.169.254, metadata.google.internal)
|
||||||
|
- ✅ DNS rebinding prevention
|
||||||
|
- 🎯 **Use for**: Cloud deployments, production environments
|
||||||
|
|
||||||
|
2. **Moderate Mode** - Local development with local n8n
|
||||||
|
```bash
|
||||||
|
WEBHOOK_SECURITY_MODE=moderate
|
||||||
|
```
|
||||||
|
- ✅ Allow localhost (for local n8n instances)
|
||||||
|
- ✅ Block private IPs
|
||||||
|
- ✅ Block cloud metadata
|
||||||
|
- ✅ DNS rebinding prevention
|
||||||
|
- 🎯 **Use for**: Development with n8n on localhost:5678
|
||||||
|
|
||||||
|
3. **Permissive Mode** - Internal networks only
|
||||||
|
```bash
|
||||||
|
WEBHOOK_SECURITY_MODE=permissive
|
||||||
|
```
|
||||||
|
- ✅ Allow localhost and private IPs
|
||||||
|
- ✅ Block cloud metadata (always blocked)
|
||||||
|
- ✅ DNS rebinding prevention
|
||||||
|
- 🎯 **Use for**: Internal testing (NOT for production)
|
||||||
|
|
||||||
|
**Important:** Cloud metadata endpoints are ALWAYS blocked in all modes for security.
|
||||||
|
|
||||||
## 🔒 Security Best Practices
|
## 🔒 Security Best Practices
|
||||||
|
|
||||||
### 1. Token Management
|
### 1. Token Management
|
||||||
|
|||||||
724
docs/LIBRARY_USAGE.md
Normal file
724
docs/LIBRARY_USAGE.md
Normal file
@@ -0,0 +1,724 @@
|
|||||||
|
# Library Usage Guide - Multi-Tenant / Hosted Deployments
|
||||||
|
|
||||||
|
This guide covers using n8n-mcp as a library dependency for building multi-tenant hosted services.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
n8n-mcp can be used as a Node.js library to build multi-tenant backends that provide MCP services to multiple users or instances. The package exports all necessary components for integration into your existing services.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm install n8n-mcp
|
||||||
|
```
|
||||||
|
|
||||||
|
## Core Concepts
|
||||||
|
|
||||||
|
### Library Mode vs CLI Mode
|
||||||
|
|
||||||
|
- **CLI Mode** (default): Single-player usage via `npx n8n-mcp` or Docker
|
||||||
|
- **Library Mode**: Multi-tenant usage by importing and using the `N8NMCPEngine` class
|
||||||
|
|
||||||
|
### Instance Context
|
||||||
|
|
||||||
|
The `InstanceContext` type allows you to pass per-request configuration to the MCP engine:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
interface InstanceContext {
|
||||||
|
// Instance-specific n8n API configuration
|
||||||
|
n8nApiUrl?: string;
|
||||||
|
n8nApiKey?: string;
|
||||||
|
n8nApiTimeout?: number;
|
||||||
|
n8nApiMaxRetries?: number;
|
||||||
|
|
||||||
|
// Instance identification
|
||||||
|
instanceId?: string;
|
||||||
|
sessionId?: string;
|
||||||
|
|
||||||
|
// Extensible metadata
|
||||||
|
metadata?: Record<string, any>;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Basic Example
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import express from 'express';
|
||||||
|
import { N8NMCPEngine } from 'n8n-mcp';
|
||||||
|
|
||||||
|
const app = express();
|
||||||
|
const mcpEngine = new N8NMCPEngine({
|
||||||
|
sessionTimeout: 3600000, // 1 hour
|
||||||
|
logLevel: 'info'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle MCP requests with per-user context
|
||||||
|
app.post('/mcp', async (req, res) => {
|
||||||
|
const instanceContext = {
|
||||||
|
n8nApiUrl: req.user.n8nUrl,
|
||||||
|
n8nApiKey: req.user.n8nApiKey,
|
||||||
|
instanceId: req.user.id
|
||||||
|
};
|
||||||
|
|
||||||
|
await mcpEngine.processRequest(req, res, instanceContext);
|
||||||
|
});
|
||||||
|
|
||||||
|
app.listen(3000);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Multi-Tenant Backend Example
|
||||||
|
|
||||||
|
This example shows a complete multi-tenant implementation with user authentication and instance management:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import express from 'express';
|
||||||
|
import { N8NMCPEngine, InstanceContext, validateInstanceContext } from 'n8n-mcp';
|
||||||
|
|
||||||
|
const app = express();
|
||||||
|
const mcpEngine = new N8NMCPEngine({
|
||||||
|
sessionTimeout: 3600000, // 1 hour
|
||||||
|
logLevel: 'info'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Start MCP engine
|
||||||
|
await mcpEngine.start();
|
||||||
|
|
||||||
|
// Authentication middleware
|
||||||
|
const authenticate = async (req, res, next) => {
|
||||||
|
const token = req.headers.authorization?.replace('Bearer ', '');
|
||||||
|
if (!token) {
|
||||||
|
return res.status(401).json({ error: 'Unauthorized' });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify token and attach user to request
|
||||||
|
req.user = await getUserFromToken(token);
|
||||||
|
next();
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get instance configuration from database
|
||||||
|
const getInstanceConfig = async (instanceId: string, userId: string) => {
|
||||||
|
// Your database logic here
|
||||||
|
const instance = await db.instances.findOne({
|
||||||
|
where: { id: instanceId, userId }
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!instance) {
|
||||||
|
throw new Error('Instance not found');
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
n8nApiUrl: instance.n8nUrl,
|
||||||
|
n8nApiKey: await decryptApiKey(instance.encryptedApiKey),
|
||||||
|
instanceId: instance.id
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
// MCP endpoint with per-instance context
|
||||||
|
app.post('/api/instances/:instanceId/mcp', authenticate, async (req, res) => {
|
||||||
|
try {
|
||||||
|
// Get instance configuration
|
||||||
|
const instance = await getInstanceConfig(req.params.instanceId, req.user.id);
|
||||||
|
|
||||||
|
// Create instance context
|
||||||
|
const context: InstanceContext = {
|
||||||
|
n8nApiUrl: instance.n8nApiUrl,
|
||||||
|
n8nApiKey: instance.n8nApiKey,
|
||||||
|
instanceId: instance.instanceId,
|
||||||
|
metadata: {
|
||||||
|
userId: req.user.id,
|
||||||
|
userAgent: req.headers['user-agent'],
|
||||||
|
ip: req.ip
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Validate context before processing
|
||||||
|
const validation = validateInstanceContext(context);
|
||||||
|
if (!validation.valid) {
|
||||||
|
return res.status(400).json({
|
||||||
|
error: 'Invalid instance configuration',
|
||||||
|
details: validation.errors
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process request with instance context
|
||||||
|
await mcpEngine.processRequest(req, res, context);
|
||||||
|
|
||||||
|
} catch (error) {
|
||||||
|
console.error('MCP request error:', error);
|
||||||
|
res.status(500).json({ error: 'Internal server error' });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Health endpoint
|
||||||
|
app.get('/health', async (req, res) => {
|
||||||
|
const health = await mcpEngine.healthCheck();
|
||||||
|
res.status(health.status === 'healthy' ? 200 : 503).json(health);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Graceful shutdown
|
||||||
|
process.on('SIGTERM', async () => {
|
||||||
|
await mcpEngine.shutdown();
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
app.listen(3000);
|
||||||
|
```
|
||||||
|
|
||||||
|
## API Reference
|
||||||
|
|
||||||
|
### N8NMCPEngine
|
||||||
|
|
||||||
|
#### Constructor
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
new N8NMCPEngine(options?: {
|
||||||
|
sessionTimeout?: number; // Session TTL in ms (default: 1800000 = 30min)
|
||||||
|
logLevel?: 'error' | 'warn' | 'info' | 'debug'; // Default: 'info'
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Methods
|
||||||
|
|
||||||
|
##### `async processRequest(req, res, context?)`
|
||||||
|
|
||||||
|
Process a single MCP request with optional instance context.
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- `req`: Express request object
|
||||||
|
- `res`: Express response object
|
||||||
|
- `context` (optional): InstanceContext with per-instance configuration
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```typescript
|
||||||
|
const context: InstanceContext = {
|
||||||
|
n8nApiUrl: 'https://instance1.n8n.cloud',
|
||||||
|
n8nApiKey: 'instance1-key',
|
||||||
|
instanceId: 'tenant-123'
|
||||||
|
};
|
||||||
|
|
||||||
|
await engine.processRequest(req, res, context);
|
||||||
|
```
|
||||||
|
|
||||||
|
##### `async healthCheck()`
|
||||||
|
|
||||||
|
Get engine health status for monitoring.
|
||||||
|
|
||||||
|
**Returns:** `EngineHealth`
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
status: 'healthy' | 'unhealthy';
|
||||||
|
uptime: number; // seconds
|
||||||
|
sessionActive: boolean;
|
||||||
|
memoryUsage: {
|
||||||
|
used: number;
|
||||||
|
total: number;
|
||||||
|
unit: string;
|
||||||
|
};
|
||||||
|
version: string;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```typescript
|
||||||
|
app.get('/health', async (req, res) => {
|
||||||
|
const health = await engine.healthCheck();
|
||||||
|
res.status(health.status === 'healthy' ? 200 : 503).json(health);
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
##### `getSessionInfo()`
|
||||||
|
|
||||||
|
Get current session information for debugging.
|
||||||
|
|
||||||
|
**Returns:**
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
active: boolean;
|
||||||
|
sessionId?: string;
|
||||||
|
age?: number; // milliseconds
|
||||||
|
sessions?: {
|
||||||
|
total: number;
|
||||||
|
active: number;
|
||||||
|
expired: number;
|
||||||
|
max: number;
|
||||||
|
sessionIds: string[];
|
||||||
|
};
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
##### `async start()`
|
||||||
|
|
||||||
|
Start the engine (for standalone mode). Not needed when using `processRequest()` directly.
|
||||||
|
|
||||||
|
##### `async shutdown()`
|
||||||
|
|
||||||
|
Graceful shutdown for service lifecycle management.
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```typescript
|
||||||
|
process.on('SIGTERM', async () => {
|
||||||
|
await engine.shutdown();
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Types
|
||||||
|
|
||||||
|
#### InstanceContext
|
||||||
|
|
||||||
|
Configuration for a specific user instance:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
interface InstanceContext {
|
||||||
|
n8nApiUrl?: string;
|
||||||
|
n8nApiKey?: string;
|
||||||
|
n8nApiTimeout?: number;
|
||||||
|
n8nApiMaxRetries?: number;
|
||||||
|
instanceId?: string;
|
||||||
|
sessionId?: string;
|
||||||
|
metadata?: Record<string, any>;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Validation Functions
|
||||||
|
|
||||||
|
##### `validateInstanceContext(context: InstanceContext)`
|
||||||
|
|
||||||
|
Validate and sanitize instance context.
|
||||||
|
|
||||||
|
**Returns:**
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
valid: boolean;
|
||||||
|
errors?: string[];
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```typescript
|
||||||
|
import { validateInstanceContext } from 'n8n-mcp';
|
||||||
|
|
||||||
|
const validation = validateInstanceContext(context);
|
||||||
|
if (!validation.valid) {
|
||||||
|
console.error('Invalid context:', validation.errors);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
##### `isInstanceContext(obj: any)`
|
||||||
|
|
||||||
|
Type guard to check if an object is a valid InstanceContext.
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```typescript
|
||||||
|
import { isInstanceContext } from 'n8n-mcp';
|
||||||
|
|
||||||
|
if (isInstanceContext(req.body.context)) {
|
||||||
|
// TypeScript knows this is InstanceContext
|
||||||
|
await engine.processRequest(req, res, req.body.context);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Session Management
|
||||||
|
|
||||||
|
### Session Strategies
|
||||||
|
|
||||||
|
The MCP engine supports flexible session ID formats:
|
||||||
|
|
||||||
|
- **UUIDv4**: Internal n8n-mcp format (default)
|
||||||
|
- **Instance-prefixed**: `instance-{userId}-{hash}-{uuid}` for multi-tenant isolation
|
||||||
|
- **Custom formats**: Any non-empty string for mcp-remote and other proxies
|
||||||
|
|
||||||
|
Session validation happens via transport lookup, not format validation. This ensures compatibility with all MCP clients.
|
||||||
|
|
||||||
|
### Multi-Tenant Configuration
|
||||||
|
|
||||||
|
Set these environment variables for multi-tenant mode:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Enable multi-tenant mode
|
||||||
|
ENABLE_MULTI_TENANT=true
|
||||||
|
|
||||||
|
# Session strategy: "instance" (default) or "shared"
|
||||||
|
MULTI_TENANT_SESSION_STRATEGY=instance
|
||||||
|
```
|
||||||
|
|
||||||
|
**Session Strategies:**
|
||||||
|
|
||||||
|
- **instance** (recommended): Each tenant gets isolated sessions
|
||||||
|
- Session ID: `instance-{instanceId}-{configHash}-{uuid}`
|
||||||
|
- Better isolation and security
|
||||||
|
- Easier debugging per tenant
|
||||||
|
|
||||||
|
- **shared**: Multiple tenants share sessions with context switching
|
||||||
|
- More efficient for high tenant count
|
||||||
|
- Requires careful context management
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
### API Key Management
|
||||||
|
|
||||||
|
Always encrypt API keys server-side:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { createCipheriv, createDecipheriv } from 'crypto';
|
||||||
|
|
||||||
|
// Encrypt before storing
|
||||||
|
const encryptApiKey = (apiKey: string) => {
|
||||||
|
const cipher = createCipheriv('aes-256-gcm', encryptionKey, iv);
|
||||||
|
return cipher.update(apiKey, 'utf8', 'hex') + cipher.final('hex');
|
||||||
|
};
|
||||||
|
|
||||||
|
// Decrypt before using
|
||||||
|
const decryptApiKey = (encrypted: string) => {
|
||||||
|
const decipher = createDecipheriv('aes-256-gcm', encryptionKey, iv);
|
||||||
|
return decipher.update(encrypted, 'hex', 'utf8') + decipher.final('utf8');
|
||||||
|
};
|
||||||
|
|
||||||
|
// Use decrypted key in context
|
||||||
|
const context: InstanceContext = {
|
||||||
|
n8nApiKey: await decryptApiKey(instance.encryptedApiKey),
|
||||||
|
// ...
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
### Input Validation
|
||||||
|
|
||||||
|
Always validate instance context before processing:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { validateInstanceContext } from 'n8n-mcp';
|
||||||
|
|
||||||
|
const validation = validateInstanceContext(context);
|
||||||
|
if (!validation.valid) {
|
||||||
|
throw new Error(`Invalid context: ${validation.errors?.join(', ')}`);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Rate Limiting
|
||||||
|
|
||||||
|
Implement rate limiting per tenant:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import rateLimit from 'express-rate-limit';
|
||||||
|
|
||||||
|
const limiter = rateLimit({
|
||||||
|
windowMs: 15 * 60 * 1000, // 15 minutes
|
||||||
|
max: 100, // limit each IP to 100 requests per windowMs
|
||||||
|
keyGenerator: (req) => req.user?.id || req.ip
|
||||||
|
});
|
||||||
|
|
||||||
|
app.post('/api/instances/:instanceId/mcp', authenticate, limiter, async (req, res) => {
|
||||||
|
// ...
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
Always wrap MCP requests in try-catch blocks:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
app.post('/api/instances/:instanceId/mcp', authenticate, async (req, res) => {
|
||||||
|
try {
|
||||||
|
const context = await getInstanceConfig(req.params.instanceId, req.user.id);
|
||||||
|
await mcpEngine.processRequest(req, res, context);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('MCP error:', error);
|
||||||
|
|
||||||
|
// Don't leak internal errors to clients
|
||||||
|
if (error.message.includes('not found')) {
|
||||||
|
return res.status(404).json({ error: 'Instance not found' });
|
||||||
|
}
|
||||||
|
|
||||||
|
res.status(500).json({ error: 'Internal server error' });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
## Monitoring
|
||||||
|
|
||||||
|
### Health Checks
|
||||||
|
|
||||||
|
Set up periodic health checks:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
setInterval(async () => {
|
||||||
|
const health = await mcpEngine.healthCheck();
|
||||||
|
|
||||||
|
if (health.status === 'unhealthy') {
|
||||||
|
console.error('MCP engine unhealthy:', health);
|
||||||
|
// Alert your monitoring system
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log metrics
|
||||||
|
console.log('MCP engine metrics:', {
|
||||||
|
uptime: health.uptime,
|
||||||
|
memory: health.memoryUsage,
|
||||||
|
sessionActive: health.sessionActive
|
||||||
|
});
|
||||||
|
}, 60000); // Every minute
|
||||||
|
```
|
||||||
|
|
||||||
|
### Session Monitoring
|
||||||
|
|
||||||
|
Track active sessions:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
app.get('/admin/sessions', authenticate, async (req, res) => {
|
||||||
|
if (!req.user.isAdmin) {
|
||||||
|
return res.status(403).json({ error: 'Forbidden' });
|
||||||
|
}
|
||||||
|
|
||||||
|
const sessionInfo = mcpEngine.getSessionInfo();
|
||||||
|
res.json(sessionInfo);
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
### Unit Testing
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { N8NMCPEngine, InstanceContext } from 'n8n-mcp';
|
||||||
|
|
||||||
|
describe('MCP Engine', () => {
|
||||||
|
let engine: N8NMCPEngine;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
engine = new N8NMCPEngine({ logLevel: 'error' });
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(async () => {
|
||||||
|
await engine.shutdown();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should process request with context', async () => {
|
||||||
|
const context: InstanceContext = {
|
||||||
|
n8nApiUrl: 'https://test.n8n.io',
|
||||||
|
n8nApiKey: 'test-key',
|
||||||
|
instanceId: 'test-instance'
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockReq = createMockRequest();
|
||||||
|
const mockRes = createMockResponse();
|
||||||
|
|
||||||
|
await engine.processRequest(mockReq, mockRes, context);
|
||||||
|
|
||||||
|
expect(mockRes.status).toBe(200);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Integration Testing
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import request from 'supertest';
|
||||||
|
import { createApp } from './app';
|
||||||
|
|
||||||
|
describe('Multi-tenant MCP API', () => {
|
||||||
|
let app;
|
||||||
|
let authToken;
|
||||||
|
|
||||||
|
beforeAll(async () => {
|
||||||
|
app = await createApp();
|
||||||
|
authToken = await getTestAuthToken();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle MCP request for instance', async () => {
|
||||||
|
const response = await request(app)
|
||||||
|
.post('/api/instances/test-instance/mcp')
|
||||||
|
.set('Authorization', `Bearer ${authToken}`)
|
||||||
|
.send({
|
||||||
|
jsonrpc: '2.0',
|
||||||
|
method: 'initialize',
|
||||||
|
params: {
|
||||||
|
protocolVersion: '2024-11-05',
|
||||||
|
capabilities: {}
|
||||||
|
},
|
||||||
|
id: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(response.status).toBe(200);
|
||||||
|
expect(response.body.result).toBeDefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deployment Considerations
|
||||||
|
|
||||||
|
### Environment Variables
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Required for multi-tenant mode
|
||||||
|
ENABLE_MULTI_TENANT=true
|
||||||
|
MULTI_TENANT_SESSION_STRATEGY=instance
|
||||||
|
|
||||||
|
# Optional: Logging
|
||||||
|
LOG_LEVEL=info
|
||||||
|
DISABLE_CONSOLE_OUTPUT=false
|
||||||
|
|
||||||
|
# Optional: Session configuration
|
||||||
|
SESSION_TIMEOUT=1800000 # 30 minutes in milliseconds
|
||||||
|
MAX_SESSIONS=100
|
||||||
|
|
||||||
|
# Optional: Performance
|
||||||
|
NODE_ENV=production
|
||||||
|
```
|
||||||
|
|
||||||
|
### Docker Deployment
|
||||||
|
|
||||||
|
```dockerfile
|
||||||
|
FROM node:20-alpine
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
COPY package*.json ./
|
||||||
|
RUN npm ci --only=production
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
ENV NODE_ENV=production
|
||||||
|
ENV ENABLE_MULTI_TENANT=true
|
||||||
|
ENV LOG_LEVEL=info
|
||||||
|
|
||||||
|
EXPOSE 3000
|
||||||
|
|
||||||
|
CMD ["node", "dist/server.js"]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Kubernetes Deployment
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: n8n-mcp-backend
|
||||||
|
spec:
|
||||||
|
replicas: 3
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: n8n-mcp-backend
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: n8n-mcp-backend
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: backend
|
||||||
|
image: your-registry/n8n-mcp-backend:latest
|
||||||
|
ports:
|
||||||
|
- containerPort: 3000
|
||||||
|
env:
|
||||||
|
- name: ENABLE_MULTI_TENANT
|
||||||
|
value: "true"
|
||||||
|
- name: LOG_LEVEL
|
||||||
|
value: "info"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "256Mi"
|
||||||
|
cpu: "250m"
|
||||||
|
limits:
|
||||||
|
memory: "512Mi"
|
||||||
|
cpu: "500m"
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /health
|
||||||
|
port: 3000
|
||||||
|
initialDelaySeconds: 10
|
||||||
|
periodSeconds: 30
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /health
|
||||||
|
port: 3000
|
||||||
|
initialDelaySeconds: 5
|
||||||
|
periodSeconds: 10
|
||||||
|
```
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Complete Multi-Tenant SaaS Example
|
||||||
|
|
||||||
|
For a complete implementation example, see:
|
||||||
|
- [n8n-mcp-backend](https://github.com/czlonkowski/n8n-mcp-backend) - Full hosted service implementation
|
||||||
|
|
||||||
|
### Migration from Single-Player
|
||||||
|
|
||||||
|
If you're migrating from single-player (CLI/Docker) to multi-tenant:
|
||||||
|
|
||||||
|
1. **Keep backward compatibility** - Use environment fallback:
|
||||||
|
```typescript
|
||||||
|
const context: InstanceContext = {
|
||||||
|
n8nApiUrl: instanceUrl || process.env.N8N_API_URL,
|
||||||
|
n8nApiKey: instanceKey || process.env.N8N_API_KEY,
|
||||||
|
instanceId: instanceId || 'default'
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Gradual rollout** - Start with a feature flag:
|
||||||
|
```typescript
|
||||||
|
const isMultiTenant = process.env.ENABLE_MULTI_TENANT === 'true';
|
||||||
|
|
||||||
|
if (isMultiTenant) {
|
||||||
|
const context = await getInstanceConfig(req.params.instanceId);
|
||||||
|
await engine.processRequest(req, res, context);
|
||||||
|
} else {
|
||||||
|
// Legacy single-player mode
|
||||||
|
await engine.processRequest(req, res);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
#### Module Resolution Errors
|
||||||
|
|
||||||
|
If you see `Cannot find module 'n8n-mcp'`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clear node_modules and reinstall
|
||||||
|
rm -rf node_modules package-lock.json
|
||||||
|
npm install
|
||||||
|
|
||||||
|
# Verify package has types field
|
||||||
|
npm info n8n-mcp
|
||||||
|
|
||||||
|
# Check TypeScript can resolve it
|
||||||
|
npx tsc --noEmit
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Session ID Validation Errors
|
||||||
|
|
||||||
|
If you see `Invalid session ID format` errors:
|
||||||
|
|
||||||
|
- Ensure you're using n8n-mcp v2.18.9 or later
|
||||||
|
- Session IDs can be any non-empty string
|
||||||
|
- No need to generate UUIDs - use your own format
|
||||||
|
|
||||||
|
#### Memory Leaks
|
||||||
|
|
||||||
|
If memory usage grows over time:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Ensure proper cleanup
|
||||||
|
process.on('SIGTERM', async () => {
|
||||||
|
await engine.shutdown();
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Monitor session count
|
||||||
|
const sessionInfo = engine.getSessionInfo();
|
||||||
|
console.log('Active sessions:', sessionInfo.sessions?.active);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Further Reading
|
||||||
|
|
||||||
|
- [MCP Protocol Specification](https://modelcontextprotocol.io/docs)
|
||||||
|
- [n8n API Documentation](https://docs.n8n.io/api/)
|
||||||
|
- [Express.js Guide](https://expressjs.com/en/guide/routing.html)
|
||||||
|
- [n8n-mcp Main README](../README.md)
|
||||||
|
|
||||||
|
## Support
|
||||||
|
|
||||||
|
- **Issues**: [GitHub Issues](https://github.com/czlonkowski/n8n-mcp/issues)
|
||||||
|
- **Discussions**: [GitHub Discussions](https://github.com/czlonkowski/n8n-mcp/discussions)
|
||||||
|
- **Security**: For security issues, see [SECURITY.md](../SECURITY.md)
|
||||||
83
docs/MULTI_APP_INTEGRATION.md
Normal file
83
docs/MULTI_APP_INTEGRATION.md
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
# Multi-App Integration Guide
|
||||||
|
|
||||||
|
This guide explains how session restoration works in n8n-mcp for multi-tenant deployments.
|
||||||
|
|
||||||
|
## Session Restoration: Warm Start Pattern
|
||||||
|
|
||||||
|
When a container restarts, existing client sessions are lost. The warm start pattern allows clients to seamlessly restore sessions without manual intervention.
|
||||||
|
|
||||||
|
### How It Works
|
||||||
|
|
||||||
|
1. **Client sends request** with existing session ID after restart
|
||||||
|
2. **Server detects** unknown session ID
|
||||||
|
3. **Restoration hook** is called to load session context from your database
|
||||||
|
4. **New session created** using restored context
|
||||||
|
5. **Current request handled** immediately through new transport
|
||||||
|
6. **Client receives** standard MCP error `-32000` (Server not initialized)
|
||||||
|
7. **Client auto-retries** with initialize request on same connection
|
||||||
|
8. **Session fully restored** and client continues normally
|
||||||
|
|
||||||
|
### Key Features
|
||||||
|
|
||||||
|
- **Zero client changes**: Standard MCP clients auto-retry on -32000
|
||||||
|
- **Single HTTP round-trip**: No extra network requests needed
|
||||||
|
- **Concurrent-safe**: Idempotency guards prevent duplicate restoration
|
||||||
|
- **Automatic cleanup**: Failed restorations clean up resources automatically
|
||||||
|
|
||||||
|
### Implementation
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { SingleSessionHTTPServer } from 'n8n-mcp';
|
||||||
|
|
||||||
|
const server = new SingleSessionHTTPServer({
|
||||||
|
// Hook to load session context from your storage
|
||||||
|
onSessionNotFound: async (sessionId) => {
|
||||||
|
const session = await database.loadSession(sessionId);
|
||||||
|
if (!session || session.expired) {
|
||||||
|
return null; // Reject restoration
|
||||||
|
}
|
||||||
|
return session.instanceContext; // Restore session
|
||||||
|
},
|
||||||
|
|
||||||
|
// Optional: Configure timeouts and retries
|
||||||
|
sessionRestorationTimeout: 5000, // 5 seconds (default)
|
||||||
|
sessionRestorationRetries: 2, // Retry on transient failures
|
||||||
|
sessionRestorationRetryDelay: 100 // Delay between retries
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Session Lifecycle Events
|
||||||
|
|
||||||
|
Track session restoration for metrics and debugging:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const server = new SingleSessionHTTPServer({
|
||||||
|
sessionEvents: {
|
||||||
|
onSessionRestored: (sessionId, context) => {
|
||||||
|
console.log(`Session ${sessionId} restored`);
|
||||||
|
metrics.increment('session.restored');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Error Handling
|
||||||
|
|
||||||
|
The restoration hook can return three outcomes:
|
||||||
|
|
||||||
|
- **Return context**: Session is restored successfully
|
||||||
|
- **Return null/undefined**: Session is rejected (client gets 400 Bad Request)
|
||||||
|
- **Throw error**: Restoration failed (client gets 500 Internal Server Error)
|
||||||
|
|
||||||
|
Timeout errors are never retried (already took too long).
|
||||||
|
|
||||||
|
### Concurrency Safety
|
||||||
|
|
||||||
|
Multiple concurrent requests for the same session ID are handled safely:
|
||||||
|
|
||||||
|
- First request triggers restoration
|
||||||
|
- Subsequent requests reuse the restored session
|
||||||
|
- No duplicate session creation
|
||||||
|
- No race conditions
|
||||||
|
|
||||||
|
This ensures correct behavior even under high load or network retries.
|
||||||
@@ -1,62 +0,0 @@
|
|||||||
# PR #104 Test Suite Improvements Summary
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
Based on comprehensive review feedback from PR #104, we've significantly improved the test suite quality, organization, and coverage.
|
|
||||||
|
|
||||||
## Test Results
|
|
||||||
- **Before:** 78 failing tests
|
|
||||||
- **After:** 0 failing tests (1,356 passed, 19 skipped)
|
|
||||||
- **Coverage:** 85.34% statements, 85.3% branches
|
|
||||||
|
|
||||||
## Key Improvements
|
|
||||||
|
|
||||||
### 1. Fixed All Test Failures
|
|
||||||
- Fixed logger test spy issues by properly handling DEBUG environment variable
|
|
||||||
- Fixed MSW configuration test by restoring environment variables
|
|
||||||
- Fixed workflow validator tests by adding proper node connections
|
|
||||||
- Fixed mock setup issues in edge case tests
|
|
||||||
|
|
||||||
### 2. Improved Test Organization
|
|
||||||
- Split large config-validator.test.ts (1,075 lines) into 4 focused files:
|
|
||||||
- config-validator-basic.test.ts
|
|
||||||
- config-validator-node-specific.test.ts
|
|
||||||
- config-validator-security.test.ts
|
|
||||||
- config-validator-edge-cases.test.ts
|
|
||||||
|
|
||||||
### 3. Enhanced Test Coverage
|
|
||||||
- Added comprehensive edge case tests for all major validators
|
|
||||||
- Added null/undefined handling tests
|
|
||||||
- Added boundary value tests
|
|
||||||
- Added performance tests with CI-aware timeouts
|
|
||||||
- Added security validation tests
|
|
||||||
|
|
||||||
### 4. Improved Test Quality
|
|
||||||
- Fixed test naming conventions (100% compliance with "should X when Y" pattern)
|
|
||||||
- Added JSDoc comments to test utilities and factories
|
|
||||||
- Created comprehensive test documentation (tests/README.md)
|
|
||||||
- Improved test isolation to prevent cross-test pollution
|
|
||||||
|
|
||||||
### 5. New Features
|
|
||||||
- Implemented validateBatch method for ConfigValidator
|
|
||||||
- Added test factories for better test data management
|
|
||||||
- Created test utilities for common scenarios
|
|
||||||
|
|
||||||
## Files Modified
|
|
||||||
- 7 existing test files fixed
|
|
||||||
- 8 new test files created
|
|
||||||
- 1 source file enhanced (ConfigValidator)
|
|
||||||
- 4 debug files removed before commit
|
|
||||||
|
|
||||||
## Skipped Tests
|
|
||||||
19 tests remain skipped with documented reasons:
|
|
||||||
- FTS5 search sync test (database corruption in CI)
|
|
||||||
- Template clearing (not implemented)
|
|
||||||
- Mock API configuration tests
|
|
||||||
- Duplicate edge case tests with mocking issues (working versions exist)
|
|
||||||
|
|
||||||
## Next Steps
|
|
||||||
The only remaining task from the improvement plan is:
|
|
||||||
- Add performance regression tests and boundaries (low priority, future sprint)
|
|
||||||
|
|
||||||
## Conclusion
|
|
||||||
The test suite is now robust, well-organized, and provides excellent coverage. All critical issues have been resolved, and the codebase is ready for merge.
|
|
||||||
@@ -105,6 +105,9 @@ These are automatically set by the Railway template:
|
|||||||
| `CORS_ORIGIN` | `*` | Allow any origin |
|
| `CORS_ORIGIN` | `*` | Allow any origin |
|
||||||
| `HOST` | `0.0.0.0` | Listen on all interfaces |
|
| `HOST` | `0.0.0.0` | Listen on all interfaces |
|
||||||
| `PORT` | (Railway provides) | Don't set manually |
|
| `PORT` | (Railway provides) | Don't set manually |
|
||||||
|
| `AUTH_RATE_LIMIT_WINDOW` | `900000` (15 min) | Rate limit window (v2.16.3+) |
|
||||||
|
| `AUTH_RATE_LIMIT_MAX` | `20` | Max auth attempts (v2.16.3+) |
|
||||||
|
| `WEBHOOK_SECURITY_MODE` | `strict` | SSRF protection mode (v2.16.3+) |
|
||||||
|
|
||||||
### Optional Variables
|
### Optional Variables
|
||||||
|
|
||||||
@@ -180,6 +183,46 @@ Claude Desktop → mcp-remote → Railway (HTTPS) → n8n-MCP Server
|
|||||||
- Ensure the URL is correct and includes `https://`
|
- Ensure the URL is correct and includes `https://`
|
||||||
- Check Railway logs for any errors
|
- Check Railway logs for any errors
|
||||||
|
|
||||||
|
**Windows: "The filename, directory name, or volume label syntax is incorrect" or npx command not found:**
|
||||||
|
|
||||||
|
This is a common Windows issue with spaces in Node.js installation paths. The error occurs because Claude Desktop can't properly execute npx.
|
||||||
|
|
||||||
|
**Solution 1: Use node directly (Recommended)**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"n8n-railway": {
|
||||||
|
"command": "node",
|
||||||
|
"args": [
|
||||||
|
"C:\\Program Files\\nodejs\\node_modules\\npm\\bin\\npx-cli.js",
|
||||||
|
"-y",
|
||||||
|
"mcp-remote",
|
||||||
|
"https://your-app-name.up.railway.app/mcp",
|
||||||
|
"--header",
|
||||||
|
"Authorization: Bearer YOUR_SECURE_TOKEN_HERE"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution 2: Use cmd wrapper**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"n8n-railway": {
|
||||||
|
"command": "cmd",
|
||||||
|
"args": [
|
||||||
|
"/C",
|
||||||
|
"\"C:\\Program Files\\nodejs\\npx\" -y mcp-remote https://your-app-name.up.railway.app/mcp --header \"Authorization: Bearer YOUR_SECURE_TOKEN_HERE\""
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
To find your exact npx path, open Command Prompt and run: `where npx`
|
||||||
|
|
||||||
### Railway-Specific Issues
|
### Railway-Specific Issues
|
||||||
|
|
||||||
**Build failures:**
|
**Build failures:**
|
||||||
@@ -244,6 +287,32 @@ Since the Railway template uses a specific Docker image tag, updates are manual:
|
|||||||
|
|
||||||
You could use the `latest` tag, but this may cause unexpected breaking changes.
|
You could use the `latest` tag, but this may cause unexpected breaking changes.
|
||||||
|
|
||||||
|
## 🔒 Security Features (v2.16.3+)
|
||||||
|
|
||||||
|
Railway deployments include enhanced security features:
|
||||||
|
|
||||||
|
### Rate Limiting
|
||||||
|
- **Automatic brute force protection** - 20 attempts per 15 minutes per IP
|
||||||
|
- **Configurable limits** via `AUTH_RATE_LIMIT_WINDOW` and `AUTH_RATE_LIMIT_MAX`
|
||||||
|
- **Standard rate limit headers** for client awareness
|
||||||
|
|
||||||
|
### SSRF Protection
|
||||||
|
- **Default strict mode** blocks localhost, private IPs, and cloud metadata
|
||||||
|
- **Cloud metadata always blocked** (169.254.169.254, metadata.google.internal, etc.)
|
||||||
|
- **Use `moderate` mode only if** connecting to local n8n instance
|
||||||
|
|
||||||
|
**Security Configuration:**
|
||||||
|
```bash
|
||||||
|
# In Railway Variables tab:
|
||||||
|
WEBHOOK_SECURITY_MODE=strict # Production (recommended)
|
||||||
|
# or
|
||||||
|
WEBHOOK_SECURITY_MODE=moderate # If using local n8n with port forwarding
|
||||||
|
|
||||||
|
# Rate limiting (defaults are good for most use cases)
|
||||||
|
AUTH_RATE_LIMIT_WINDOW=900000 # 15 minutes
|
||||||
|
AUTH_RATE_LIMIT_MAX=20 # 20 attempts per IP
|
||||||
|
```
|
||||||
|
|
||||||
## 📝 Best Practices
|
## 📝 Best Practices
|
||||||
|
|
||||||
1. **Always change the default AUTH_TOKEN immediately**
|
1. **Always change the default AUTH_TOKEN immediately**
|
||||||
|
|||||||
180
docs/bugfix-onSessionCreated-event.md
Normal file
180
docs/bugfix-onSessionCreated-event.md
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
# Bug Fix: onSessionCreated Event Not Firing (v2.19.0)
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
Fixed critical bug where `onSessionCreated` lifecycle event was never emitted for sessions created during the standard MCP initialize flow, completely breaking session persistence functionality.
|
||||||
|
|
||||||
|
## Impact
|
||||||
|
|
||||||
|
- **Severity**: Critical
|
||||||
|
- **Affected Version**: v2.19.0
|
||||||
|
- **Component**: Session Persistence (Phase 3)
|
||||||
|
- **Status**: ✅ Fixed
|
||||||
|
|
||||||
|
## Root Cause
|
||||||
|
|
||||||
|
The `handleRequest()` method in `http-server-single-session.ts` had two different paths for session creation:
|
||||||
|
|
||||||
|
1. **Standard initialize flow** (lines 868-943): Created session inline but **did not emit** `onSessionCreated` event
|
||||||
|
2. **Manual restoration flow** (line 1048): Called `createSession()` which **correctly emitted** the event
|
||||||
|
|
||||||
|
This inconsistency meant that:
|
||||||
|
- New sessions during normal operation were **never saved to database**
|
||||||
|
- Only manually restored sessions triggered the save event
|
||||||
|
- Session persistence was completely broken for new sessions
|
||||||
|
- Container restarts caused all sessions to be lost
|
||||||
|
|
||||||
|
## The Fix
|
||||||
|
|
||||||
|
### Location
|
||||||
|
- **File**: `src/http-server-single-session.ts`
|
||||||
|
- **Method**: `handleRequest()`
|
||||||
|
- **Line**: After line 943 (`await server.connect(transport);`)
|
||||||
|
|
||||||
|
### Code Change
|
||||||
|
|
||||||
|
Added event emission after successfully connecting server to transport during initialize flow:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Connect the server to the transport BEFORE handling the request
|
||||||
|
logger.info('handleRequest: Connecting server to new transport');
|
||||||
|
await server.connect(transport);
|
||||||
|
|
||||||
|
// Phase 3: Emit onSessionCreated event (REQ-4)
|
||||||
|
// Fire-and-forget: don't await or block session creation
|
||||||
|
this.emitEvent('onSessionCreated', sessionIdToUse, instanceContext).catch(eventErr => {
|
||||||
|
logger.error('Failed to emit onSessionCreated event (non-blocking)', {
|
||||||
|
sessionId: sessionIdToUse,
|
||||||
|
error: eventErr instanceof Error ? eventErr.message : String(eventErr)
|
||||||
|
});
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Why This Works
|
||||||
|
|
||||||
|
1. **Consistent with existing pattern**: Matches the `createSession()` method pattern (line 664)
|
||||||
|
2. **Non-blocking**: Uses `.catch()` to ensure event handler errors don't break session creation
|
||||||
|
3. **Correct timing**: Fires after `server.connect(transport)` succeeds, ensuring session is fully initialized
|
||||||
|
4. **Same parameters**: Passes `sessionId` and `instanceContext` just like the restoration flow
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
### Test Results
|
||||||
|
|
||||||
|
Created comprehensive test suite to verify the fix:
|
||||||
|
|
||||||
|
**Test File**: `tests/unit/session/onSessionCreated-event.test.ts`
|
||||||
|
|
||||||
|
**Test Results**:
|
||||||
|
```
|
||||||
|
✓ onSessionCreated Event - Initialize Flow
|
||||||
|
✓ should emit onSessionCreated event when session is created during initialize flow (1594ms)
|
||||||
|
|
||||||
|
Test Files 5 passed (5)
|
||||||
|
Tests 78 passed (78)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Manual Testing**:
|
||||||
|
```typescript
|
||||||
|
const server = new SingleSessionHTTPServer({
|
||||||
|
sessionEvents: {
|
||||||
|
onSessionCreated: async (sessionId, context) => {
|
||||||
|
console.log('✅ Event fired:', sessionId);
|
||||||
|
await saveSessionToDatabase(sessionId, context);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Result: Event fires successfully on initialize!
|
||||||
|
// ✅ Event fired: 40dcc123-46bd-4994-945e-f2dbe60e54c2
|
||||||
|
```
|
||||||
|
|
||||||
|
### Behavior After Fix
|
||||||
|
|
||||||
|
1. **Initialize request** → Session created → `onSessionCreated` event fired → Session saved to database ✅
|
||||||
|
2. **Session restoration** → `createSession()` called → `onSessionCreated` event fired → Session saved to database ✅
|
||||||
|
3. **Manual restoration** → `manuallyRestoreSession()` → Session created → Event fired ✅
|
||||||
|
|
||||||
|
All three paths now correctly emit the event!
|
||||||
|
|
||||||
|
## Backward Compatibility
|
||||||
|
|
||||||
|
✅ **Fully backward compatible**:
|
||||||
|
- No breaking changes to API
|
||||||
|
- Event handler is optional (defaults to no-op)
|
||||||
|
- Non-blocking implementation ensures session creation succeeds even if handler fails
|
||||||
|
- Matches existing behavior of `createSession()` method
|
||||||
|
- All existing tests pass
|
||||||
|
|
||||||
|
## Related Code
|
||||||
|
|
||||||
|
### Event Emission Points
|
||||||
|
|
||||||
|
1. ✅ **Standard initialize flow**: `handleRequest()` at line ~947 (NEW - fixed)
|
||||||
|
2. ✅ **Manual restoration**: `createSession()` at line 664 (EXISTING - working)
|
||||||
|
3. ✅ **Session restoration**: calls `createSession()` indirectly (EXISTING - working)
|
||||||
|
|
||||||
|
### Other Lifecycle Events
|
||||||
|
|
||||||
|
The following events are working correctly:
|
||||||
|
- `onSessionRestored`: Fires when session is restored from database
|
||||||
|
- `onSessionAccessed`: Fires on every request (with throttling recommended)
|
||||||
|
- `onSessionExpired`: Fires before expired session cleanup
|
||||||
|
- `onSessionDeleted`: Fires on manual session deletion
|
||||||
|
|
||||||
|
## Testing Recommendations
|
||||||
|
|
||||||
|
After applying this fix, verify session persistence works:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// 1. Start server with session events
|
||||||
|
const engine = new N8NMCPEngine({
|
||||||
|
sessionEvents: {
|
||||||
|
onSessionCreated: async (sessionId, context) => {
|
||||||
|
await database.upsertSession({ sessionId, ...context });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// 2. Client connects and initializes
|
||||||
|
// 3. Verify session saved to database
|
||||||
|
const sessions = await database.query('SELECT * FROM mcp_sessions');
|
||||||
|
expect(sessions.length).toBeGreaterThan(0);
|
||||||
|
|
||||||
|
// 4. Restart server
|
||||||
|
await engine.shutdown();
|
||||||
|
await engine.start();
|
||||||
|
|
||||||
|
// 5. Client reconnects with old session ID
|
||||||
|
// 6. Verify session restored from database
|
||||||
|
```
|
||||||
|
|
||||||
|
## Impact on n8n-mcp-backend
|
||||||
|
|
||||||
|
This fix **unblocks** the multi-tenant n8n-mcp-backend service that depends on session persistence:
|
||||||
|
|
||||||
|
- ✅ Sessions now persist across container restarts
|
||||||
|
- ✅ Users no longer need to restart Claude Desktop after backend updates
|
||||||
|
- ✅ Session continuity maintained for all users
|
||||||
|
- ✅ Production deployment viable
|
||||||
|
|
||||||
|
## Lessons Learned
|
||||||
|
|
||||||
|
1. **Consistency is critical**: Session creation should follow the same pattern everywhere
|
||||||
|
2. **Event-driven architecture**: Events must fire at all creation points, not just some
|
||||||
|
3. **Testing lifecycle events**: Need integration tests that verify events fire, not just that code runs
|
||||||
|
4. **Documentation**: Clearly document when events should fire and where
|
||||||
|
|
||||||
|
## Files Changed
|
||||||
|
|
||||||
|
- `src/http-server-single-session.ts`: Added event emission (lines 945-952)
|
||||||
|
- `tests/unit/session/onSessionCreated-event.test.ts`: New test file
|
||||||
|
- `tests/integration/session/test-onSessionCreated-event.ts`: Manual verification test
|
||||||
|
|
||||||
|
## Build Status
|
||||||
|
|
||||||
|
- ✅ TypeScript compilation: Success
|
||||||
|
- ✅ Type checking: Success
|
||||||
|
- ✅ All unit tests: 78 passed
|
||||||
|
- ✅ Integration tests: Pass
|
||||||
|
- ✅ Backward compatibility: Verified
|
||||||
BIN
docs/img/codex_connected.png
Normal file
BIN
docs/img/codex_connected.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 125 KiB |
@@ -1,162 +0,0 @@
|
|||||||
# Issue #90: "propertyValues[itemName] is not iterable" Error - Research Findings
|
|
||||||
|
|
||||||
## Executive Summary
|
|
||||||
|
|
||||||
The error "propertyValues[itemName] is not iterable" occurs when AI agents create workflows with incorrect data structures for n8n nodes that use `fixedCollection` properties. This primarily affects Switch Node v2, If Node, and Filter Node. The error prevents workflows from loading in the n8n UI, resulting in empty canvases.
|
|
||||||
|
|
||||||
## Root Cause Analysis
|
|
||||||
|
|
||||||
### 1. Data Structure Mismatch
|
|
||||||
|
|
||||||
The error occurs when n8n's validation engine expects an iterable array but encounters a non-iterable object. This happens with nodes using `fixedCollection` type properties.
|
|
||||||
|
|
||||||
**Incorrect Structure (causes error):**
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"rules": {
|
|
||||||
"conditions": {
|
|
||||||
"values": [
|
|
||||||
{
|
|
||||||
"value1": "={{$json.status}}",
|
|
||||||
"operation": "equals",
|
|
||||||
"value2": "active"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
**Correct Structure:**
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"rules": {
|
|
||||||
"conditions": [
|
|
||||||
{
|
|
||||||
"value1": "={{$json.status}}",
|
|
||||||
"operation": "equals",
|
|
||||||
"value2": "active"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Affected Nodes
|
|
||||||
|
|
||||||
Based on the research and issue comments, the following nodes are affected:
|
|
||||||
|
|
||||||
1. **Switch Node v2** (`n8n-nodes-base.switch` with typeVersion: 2)
|
|
||||||
- Uses `rules` parameter with `conditions` fixedCollection
|
|
||||||
- v3 doesn't have this issue due to restructured schema
|
|
||||||
|
|
||||||
2. **If Node** (`n8n-nodes-base.if` with typeVersion: 1)
|
|
||||||
- Uses `conditions` parameter with nested conditions array
|
|
||||||
- Similar structure to Switch v2
|
|
||||||
|
|
||||||
3. **Filter Node** (`n8n-nodes-base.filter`)
|
|
||||||
- Uses `conditions` parameter
|
|
||||||
- Same fixedCollection pattern
|
|
||||||
|
|
||||||
### 3. Why AI Agents Create Incorrect Structures
|
|
||||||
|
|
||||||
1. **Training Data Issues**: AI models may have been trained on outdated or incorrect n8n workflow examples
|
|
||||||
2. **Nested Object Inference**: AI tends to create unnecessarily nested structures when it sees collection-type parameters
|
|
||||||
3. **Legacy Format Confusion**: Mixing v2 and v3 Switch node formats
|
|
||||||
4. **Schema Misinterpretation**: The term "fixedCollection" may lead AI to create object wrappers
|
|
||||||
|
|
||||||
## Current Impact
|
|
||||||
|
|
||||||
From issue #90 comments:
|
|
||||||
- Multiple users experiencing the issue
|
|
||||||
- Workflows fail to load completely (empty canvas)
|
|
||||||
- Users resort to using Switch Node v3 or direct API calls
|
|
||||||
- The issue appears in "most MCPs" according to user feedback
|
|
||||||
|
|
||||||
## Recommended Actions
|
|
||||||
|
|
||||||
### 1. Immediate Validation Enhancement
|
|
||||||
|
|
||||||
Add specific validation for fixedCollection properties in the workflow validator:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// In workflow-validator.ts or enhanced-config-validator.ts
|
|
||||||
function validateFixedCollectionParameters(node, result) {
|
|
||||||
const problematicNodes = {
|
|
||||||
'n8n-nodes-base.switch': { version: 2, fields: ['rules'] },
|
|
||||||
'n8n-nodes-base.if': { version: 1, fields: ['conditions'] },
|
|
||||||
'n8n-nodes-base.filter': { version: 1, fields: ['conditions'] }
|
|
||||||
};
|
|
||||||
|
|
||||||
const nodeConfig = problematicNodes[node.type];
|
|
||||||
if (nodeConfig && node.typeVersion === nodeConfig.version) {
|
|
||||||
// Validate structure
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Enhanced MCP Tool Validation
|
|
||||||
|
|
||||||
Update the validation tools to detect and prevent this specific error pattern:
|
|
||||||
|
|
||||||
1. **In `validate_node_operation` tool**: Add checks for fixedCollection structures
|
|
||||||
2. **In `validate_workflow` tool**: Include specific validation for Switch/If nodes
|
|
||||||
3. **In `n8n_create_workflow` tool**: Pre-validate parameters before submission
|
|
||||||
|
|
||||||
### 3. AI-Friendly Examples
|
|
||||||
|
|
||||||
Update workflow examples to show correct structures:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// In workflow-examples.ts
|
|
||||||
export const SWITCH_NODE_EXAMPLE = {
|
|
||||||
name: "Switch",
|
|
||||||
type: "n8n-nodes-base.switch",
|
|
||||||
typeVersion: 3, // Prefer v3 over v2
|
|
||||||
parameters: {
|
|
||||||
// Correct v3 structure
|
|
||||||
}
|
|
||||||
};
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4. Migration Strategy
|
|
||||||
|
|
||||||
For existing workflows with Switch v2:
|
|
||||||
1. Detect Switch v2 nodes in validation
|
|
||||||
2. Suggest migration to v3
|
|
||||||
3. Provide automatic conversion utility
|
|
||||||
|
|
||||||
### 5. Documentation Updates
|
|
||||||
|
|
||||||
1. Add warnings about fixedCollection structures in tool documentation
|
|
||||||
2. Include specific examples of correct vs incorrect structures
|
|
||||||
3. Document the Switch v2 to v3 migration path
|
|
||||||
|
|
||||||
## Proposed Implementation Priority
|
|
||||||
|
|
||||||
1. **High Priority**: Add validation to prevent creation of invalid structures
|
|
||||||
2. **High Priority**: Update existing validation tools to catch this error
|
|
||||||
3. **Medium Priority**: Add auto-fix capabilities to correct structures
|
|
||||||
4. **Medium Priority**: Update examples and documentation
|
|
||||||
5. **Low Priority**: Create migration utilities for v2 to v3
|
|
||||||
|
|
||||||
## Testing Strategy
|
|
||||||
|
|
||||||
1. Create test cases for each affected node type
|
|
||||||
2. Test both correct and incorrect structures
|
|
||||||
3. Verify validation catches all variants of the error
|
|
||||||
4. Test auto-fix suggestions work correctly
|
|
||||||
|
|
||||||
## Success Metrics
|
|
||||||
|
|
||||||
- Zero instances of "propertyValues[itemName] is not iterable" in newly created workflows
|
|
||||||
- Clear error messages that guide users to correct structures
|
|
||||||
- Successful validation of all Switch/If node configurations before workflow creation
|
|
||||||
|
|
||||||
## Next Steps
|
|
||||||
|
|
||||||
1. Implement validation enhancements in the workflow validator
|
|
||||||
2. Update MCP tools to include these validations
|
|
||||||
3. Add comprehensive tests
|
|
||||||
4. Update documentation with clear examples
|
|
||||||
5. Consider adding a migration tool for existing workflows
|
|
||||||
1213
docs/local/DEEP_DIVE_ANALYSIS_2025-10-02.md
Normal file
1213
docs/local/DEEP_DIVE_ANALYSIS_2025-10-02.md
Normal file
File diff suppressed because it is too large
Load Diff
225
docs/local/DEEP_DIVE_ANALYSIS_README.md
Normal file
225
docs/local/DEEP_DIVE_ANALYSIS_README.md
Normal file
@@ -0,0 +1,225 @@
|
|||||||
|
# N8N-MCP Deep Dive Analysis - October 2, 2025
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This directory contains a comprehensive deep-dive analysis of n8n-mcp usage data from September 26 - October 2, 2025.
|
||||||
|
|
||||||
|
**Data Volume Analyzed:**
|
||||||
|
- 212,375 telemetry events
|
||||||
|
- 5,751 workflow creations
|
||||||
|
- 2,119 unique users
|
||||||
|
- 6 days of usage data
|
||||||
|
|
||||||
|
## Report Structure
|
||||||
|
|
||||||
|
|
||||||
|
###: `DEEP_DIVE_ANALYSIS_2025-10-02.md` (Main Report)
|
||||||
|
|
||||||
|
**Sections Covered:**
|
||||||
|
1. **Executive Summary** - Key findings and recommendations
|
||||||
|
2. **Tool Performance Analysis** - Success rates, performance metrics, critical findings
|
||||||
|
3. **Validation Catastrophe** - The node type prefix disaster analysis
|
||||||
|
4. **Usage Patterns & User Segmentation** - User distribution, daily trends
|
||||||
|
5. **Tool Sequence Analysis** - How AI agents use tools together
|
||||||
|
6. **Workflow Creation Patterns** - Complexity distribution, popular nodes
|
||||||
|
7. **Platform & Version Distribution** - OS, architecture, version adoption
|
||||||
|
8. **Error Patterns & Root Causes** - TypeErrors, validation errors, discovery failures
|
||||||
|
9. **P0-P1 Refactoring Recommendations** - Detailed implementation guides
|
||||||
|
|
||||||
|
**Sections Covered:**
|
||||||
|
- Remaining P1 and P2 recommendations
|
||||||
|
- Architectural refactoring suggestions
|
||||||
|
- Telemetry enhancements
|
||||||
|
- CHANGELOG integration
|
||||||
|
- Final recommendations summary
|
||||||
|
|
||||||
|
## Key Findings Summary
|
||||||
|
|
||||||
|
### Critical Issues (P0 - Fix Immediately)
|
||||||
|
|
||||||
|
1. **Node Type Prefix Validation Catastrophe**
|
||||||
|
- 5,000+ validation errors from single root cause
|
||||||
|
- `nodes-base.X` vs `n8n-nodes-base.X` confusion
|
||||||
|
- **Solution**: Auto-normalize prefixes (2-4 hours effort)
|
||||||
|
|
||||||
|
2. **TypeError in Node Information Tools**
|
||||||
|
- 10-18% failure rate in get_node_essentials/info
|
||||||
|
- 1,000+ failures affecting hundreds of users
|
||||||
|
- **Solution**: Complete null-safety audit (1 day effort)
|
||||||
|
|
||||||
|
3. **Task Discovery Failures**
|
||||||
|
- `get_node_for_task` failing 28% of the time
|
||||||
|
- Worst-performing tool in entire system
|
||||||
|
- **Solution**: Expand task library + fuzzy matching (3 days effort)
|
||||||
|
|
||||||
|
### Performance Metrics
|
||||||
|
|
||||||
|
**Excellent Reliability (96-100% success):**
|
||||||
|
- n8n_update_partial_workflow: 98.7%
|
||||||
|
- search_nodes: 99.8%
|
||||||
|
- n8n_create_workflow: 96.1%
|
||||||
|
- All workflow management tools: 100%
|
||||||
|
|
||||||
|
**User Distribution:**
|
||||||
|
- Power Users (12): 2,112 events/user, 33 workflows
|
||||||
|
- Heavy Users (47): 673 events/user, 18 workflows
|
||||||
|
- Regular Users (516): 199 events/user, 7 workflows (CORE AUDIENCE)
|
||||||
|
- Active Users (919): 52 events/user, 2 workflows
|
||||||
|
- Casual Users (625): 8 events/user, 1 workflow
|
||||||
|
|
||||||
|
### Usage Insights
|
||||||
|
|
||||||
|
**Most Used Tools:**
|
||||||
|
1. n8n_update_partial_workflow: 10,177 calls (iterative refinement)
|
||||||
|
2. search_nodes: 8,839 calls (node discovery)
|
||||||
|
3. n8n_create_workflow: 6,046 calls (workflow creation)
|
||||||
|
|
||||||
|
**Most Common Tool Sequences:**
|
||||||
|
1. update → update → update (549x) - Iterative refinement pattern
|
||||||
|
2. create → update (297x) - Create then refine
|
||||||
|
3. update → get_workflow (265x) - Update then verify
|
||||||
|
|
||||||
|
**Most Popular Nodes:**
|
||||||
|
1. code (53% of workflows) - AI agents love programmatic control
|
||||||
|
2. httpRequest (47%) - Integration-heavy usage
|
||||||
|
3. webhook (32%) - Event-driven automation
|
||||||
|
|
||||||
|
## SQL Analytical Views Created
|
||||||
|
|
||||||
|
15 comprehensive views were created in Supabase for ongoing analysis:
|
||||||
|
|
||||||
|
1. `vw_tool_performance` - Performance metrics per tool
|
||||||
|
2. `vw_error_analysis` - Error patterns and frequencies
|
||||||
|
3. `vw_validation_analysis` - Validation failure details
|
||||||
|
4. `vw_tool_sequences` - Tool-to-tool transition patterns
|
||||||
|
5. `vw_workflow_creation_patterns` - Workflow characteristics
|
||||||
|
6. `vw_node_usage_analysis` - Node popularity and complexity
|
||||||
|
7. `vw_node_cooccurrence` - Which nodes are used together
|
||||||
|
8. `vw_user_activity` - Per-user activity metrics
|
||||||
|
9. `vw_session_analysis` - Platform/version distribution
|
||||||
|
10. `vw_workflow_validation_failures` - Workflow validation issues
|
||||||
|
11. `vw_temporal_patterns` - Time-based usage patterns
|
||||||
|
12. `vw_tool_funnel` - User progression through tools
|
||||||
|
13. `vw_search_analysis` - Search behavior
|
||||||
|
14. `vw_tool_success_summary` - Success/failure rates
|
||||||
|
15. `vw_user_journeys` - Complete user session reconstruction
|
||||||
|
|
||||||
|
## Priority Recommendations
|
||||||
|
|
||||||
|
### Immediate Actions (This Week)
|
||||||
|
|
||||||
|
✅ **P0-R1**: Auto-normalize node type prefixes → Eliminate 4,800 errors
|
||||||
|
✅ **P0-R2**: Complete null-safety audit → Fix 10-18% TypeError failures
|
||||||
|
✅ **P0-R3**: Expand get_node_for_task library → 72% → 95% success rate
|
||||||
|
|
||||||
|
**Expected Impact**: Reduce error rate from 5-10% to <2% overall
|
||||||
|
|
||||||
|
### Next Release (2-3 Weeks)
|
||||||
|
|
||||||
|
✅ **P1-R4**: Batch workflow operations → Save 30-50% tokens
|
||||||
|
✅ **P1-R5**: Proactive node suggestions → Reduce search iterations
|
||||||
|
✅ **P1-R6**: Auto-fix suggestions in errors → Self-service recovery
|
||||||
|
|
||||||
|
**Expected Impact**: 40% faster workflow creation, better UX
|
||||||
|
|
||||||
|
### Future Roadmap (1-3 Months)
|
||||||
|
|
||||||
|
✅ **A1**: Service layer consolidation → Cleaner architecture
|
||||||
|
✅ **A2**: Repository caching → 50% faster node operations
|
||||||
|
✅ **R10**: Workflow template library from usage → 80% coverage
|
||||||
|
✅ **T1-T3**: Enhanced telemetry → Better observability
|
||||||
|
|
||||||
|
**Expected Impact**: Scalable foundation for 10x growth
|
||||||
|
|
||||||
|
## Methodology
|
||||||
|
|
||||||
|
### Data Sources
|
||||||
|
|
||||||
|
1. **Supabase Telemetry Database**
|
||||||
|
- `telemetry_events` table: 212,375 rows
|
||||||
|
- `telemetry_workflows` table: 5,751 rows
|
||||||
|
|
||||||
|
2. **Analytical Views**
|
||||||
|
- Created 15 SQL views for multi-dimensional analysis
|
||||||
|
- Enabled complex queries and pattern recognition
|
||||||
|
|
||||||
|
3. **CHANGELOG Review**
|
||||||
|
- Analyzed recent changes (v2.14.0 - v2.14.6)
|
||||||
|
- Correlated fixes with error patterns
|
||||||
|
|
||||||
|
### Analysis Approach
|
||||||
|
|
||||||
|
1. **Quantitative Analysis**
|
||||||
|
- Success/failure rates per tool
|
||||||
|
- Performance metrics (avg, median, p95, p99)
|
||||||
|
- User segmentation and cohort analysis
|
||||||
|
- Temporal trends and growth patterns
|
||||||
|
|
||||||
|
2. **Pattern Recognition**
|
||||||
|
- Tool sequence analysis (Markov chains)
|
||||||
|
- Node co-occurrence patterns
|
||||||
|
- Workflow complexity distribution
|
||||||
|
- Error clustering and root cause analysis
|
||||||
|
|
||||||
|
3. **Qualitative Insights**
|
||||||
|
- CHANGELOG integration
|
||||||
|
- Error message analysis
|
||||||
|
- User journey reconstruction
|
||||||
|
- Best practice identification
|
||||||
|
|
||||||
|
## How to Use This Analysis
|
||||||
|
|
||||||
|
### For Development Priorities
|
||||||
|
|
||||||
|
1. Review **P0 Critical Recommendations** (Section 8)
|
||||||
|
2. Check estimated effort and impact
|
||||||
|
3. Prioritize based on ROI (impact/effort ratio)
|
||||||
|
4. Follow implementation guides with code examples
|
||||||
|
|
||||||
|
### For Architecture Decisions
|
||||||
|
|
||||||
|
1. Review **Architectural Recommendations** (Section 9)
|
||||||
|
2. Consider service layer consolidation
|
||||||
|
3. Evaluate repository caching opportunities
|
||||||
|
4. Plan for 10x scale
|
||||||
|
|
||||||
|
### For Product Strategy
|
||||||
|
|
||||||
|
1. Review **Usage Patterns** (Section 3 & 5)
|
||||||
|
2. Understand user segments (power vs casual)
|
||||||
|
3. Identify high-value features (most-used tools)
|
||||||
|
4. Focus on reliability over features (96% success rate target)
|
||||||
|
|
||||||
|
### For Telemetry Enhancement
|
||||||
|
|
||||||
|
1. Review **Telemetry Enhancements** (Section 10)
|
||||||
|
2. Add fine-grained timing metrics
|
||||||
|
3. Track workflow creation funnels
|
||||||
|
4. Monitor node-level analytics
|
||||||
|
|
||||||
|
## Contact & Feedback
|
||||||
|
|
||||||
|
For questions about this analysis or to request additional insights:
|
||||||
|
- Data Analyst: Claude Code with Supabase MCP
|
||||||
|
- Analysis Date: October 2, 2025
|
||||||
|
- Data Period: September 26 - October 2, 2025
|
||||||
|
|
||||||
|
## Change Log
|
||||||
|
|
||||||
|
- **2025-10-02**: Initial comprehensive analysis completed
|
||||||
|
- 15 SQL analytical views created
|
||||||
|
- 13 sections of detailed findings
|
||||||
|
- P0/P1/P2 recommendations with implementation guides
|
||||||
|
- Code examples and effort estimates provided
|
||||||
|
|
||||||
|
## Next Steps
|
||||||
|
|
||||||
|
1. ✅ Review findings with development team
|
||||||
|
2. ✅ Prioritize P0 recommendations for immediate implementation
|
||||||
|
3. ✅ Plan P1 features for next release cycle
|
||||||
|
4. ✅ Set up monitoring for key metrics
|
||||||
|
5. ✅ Schedule follow-up analysis (weekly recommended)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*This analysis represents a snapshot of n8n-mcp usage during early adoption phase. Patterns may evolve as the user base grows and matures.*
|
||||||
1328
docs/local/Deep_dive_p1_p2.md
Normal file
1328
docs/local/Deep_dive_p1_p2.md
Normal file
File diff suppressed because it is too large
Load Diff
3396
docs/local/N8N_AI_WORKFLOW_BUILDER_ANALYSIS.md
Normal file
3396
docs/local/N8N_AI_WORKFLOW_BUILDER_ANALYSIS.md
Normal file
File diff suppressed because it is too large
Load Diff
1489
docs/local/P0_IMPLEMENTATION_PLAN.md
Normal file
1489
docs/local/P0_IMPLEMENTATION_PLAN.md
Normal file
File diff suppressed because it is too large
Load Diff
369
docs/local/TEMPLATE_MINING_ANALYSIS.md
Normal file
369
docs/local/TEMPLATE_MINING_ANALYSIS.md
Normal file
@@ -0,0 +1,369 @@
|
|||||||
|
# Template Mining Analysis - Alternative to P0-R3
|
||||||
|
|
||||||
|
**Date**: 2025-10-02
|
||||||
|
**Context**: Analyzing whether to fix `get_node_for_task` (28% failure rate) or replace it with template-based configuration extraction
|
||||||
|
|
||||||
|
## Executive Summary
|
||||||
|
|
||||||
|
**RECOMMENDATION**: Replace `get_node_for_task` with template-based configuration extraction. The template database contains 2,646 real-world workflows with rich node configurations that far exceed the 31 hardcoded task templates.
|
||||||
|
|
||||||
|
## Key Findings
|
||||||
|
|
||||||
|
### 1. Template Database Coverage
|
||||||
|
|
||||||
|
- **Total Templates**: 2,646 production workflows from n8n.io
|
||||||
|
- **Unique Node Types**: 543 (covers 103% of our 525 core nodes)
|
||||||
|
- **Metadata Coverage**: 100% (AI-generated structured metadata)
|
||||||
|
|
||||||
|
### 2. Node Type Coverage in Templates
|
||||||
|
|
||||||
|
Top node types by template usage:
|
||||||
|
```
|
||||||
|
3,820 templates: n8n-nodes-base.httpRequest (144% of total templates!)
|
||||||
|
3,678 templates: n8n-nodes-base.set
|
||||||
|
2,445 templates: n8n-nodes-base.code
|
||||||
|
1,700 templates: n8n-nodes-base.googleSheets
|
||||||
|
1,471 templates: @n8n/n8n-nodes-langchain.agent
|
||||||
|
1,269 templates: @n8n/n8n-nodes-langchain.lmChatOpenAi
|
||||||
|
792 templates: n8n-nodes-base.telegram
|
||||||
|
702 templates: n8n-nodes-base.httpRequestTool
|
||||||
|
596 templates: n8n-nodes-base.gmail
|
||||||
|
466 templates: n8n-nodes-base.webhook
|
||||||
|
```
|
||||||
|
|
||||||
|
**Comparison**:
|
||||||
|
- Hardcoded task templates: 31 tasks covering 5.9% of nodes
|
||||||
|
- Real templates: 2,646 templates with 2-3k examples for common nodes
|
||||||
|
|
||||||
|
### 3. Database Structure
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE templates (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
workflow_id INTEGER UNIQUE NOT NULL,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
description TEXT,
|
||||||
|
-- Node information
|
||||||
|
nodes_used TEXT, -- JSON array: ["n8n-nodes-base.httpRequest", ...]
|
||||||
|
workflow_json_compressed TEXT, -- Base64 encoded gzip of full workflow
|
||||||
|
-- Metadata (100% coverage)
|
||||||
|
metadata_json TEXT, -- AI-generated structured metadata
|
||||||
|
-- Stats
|
||||||
|
views INTEGER DEFAULT 0,
|
||||||
|
created_at DATETIME,
|
||||||
|
-- ...
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Real Configuration Examples
|
||||||
|
|
||||||
|
#### HTTP Request Node Configurations
|
||||||
|
|
||||||
|
**Simple URL fetch**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"url": "https://api.example.com/data",
|
||||||
|
"options": {}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**With authentication**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"url": "=https://api.wavespeed.ai/api/v3/predictions/{{ $json.data.id }}/result",
|
||||||
|
"options": {},
|
||||||
|
"authentication": "genericCredentialType",
|
||||||
|
"genericAuthType": "httpHeaderAuth"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Complex expressions**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"url": "=https://image.pollinations.ai/prompt/{{$('Social Media Content Factory').item.json.output.description.replaceAll(' ','-').replaceAll(',','').replaceAll('.','') }}",
|
||||||
|
"options": {}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Webhook Node Configurations
|
||||||
|
|
||||||
|
**Basic webhook**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"path": "ytube",
|
||||||
|
"options": {},
|
||||||
|
"httpMethod": "POST",
|
||||||
|
"responseMode": "responseNode"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**With binary data**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"path": "your-endpoint",
|
||||||
|
"options": {
|
||||||
|
"binaryPropertyName": "data"
|
||||||
|
},
|
||||||
|
"httpMethod": "POST"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. AI-Generated Metadata
|
||||||
|
|
||||||
|
Each template has structured metadata including:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"categories": ["automation", "integration", "data processing"],
|
||||||
|
"complexity": "medium",
|
||||||
|
"use_cases": [
|
||||||
|
"Extract transaction data from Gmail",
|
||||||
|
"Automate bookkeeping",
|
||||||
|
"Expense tracking"
|
||||||
|
],
|
||||||
|
"estimated_setup_minutes": 30,
|
||||||
|
"required_services": ["Gmail", "Google Sheets", "Google Gemini"],
|
||||||
|
"key_features": [
|
||||||
|
"Fetch emails by label",
|
||||||
|
"Extract transaction data",
|
||||||
|
"Use LLM for structured output"
|
||||||
|
],
|
||||||
|
"target_audience": ["Accountants", "Small business owners"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Comparison: Task Templates vs Real Templates
|
||||||
|
|
||||||
|
### Current Approach (get_node_for_task)
|
||||||
|
|
||||||
|
**Pros**:
|
||||||
|
- Curated configurations with best practices
|
||||||
|
- Predictable, stable responses
|
||||||
|
- Fast lookup (no decompression needed)
|
||||||
|
|
||||||
|
**Cons**:
|
||||||
|
- Only 31 tasks (5.9% node coverage)
|
||||||
|
- 28% failure rate (users can't find what they need)
|
||||||
|
- Requires manual maintenance
|
||||||
|
- Static configurations without real-world context
|
||||||
|
- Usage ratio 22.5:1 (search_nodes is preferred)
|
||||||
|
|
||||||
|
### Template-Based Approach
|
||||||
|
|
||||||
|
**Pros**:
|
||||||
|
- 2,646 real workflows with 2-3k examples for common nodes
|
||||||
|
- 100% metadata coverage for semantic matching
|
||||||
|
- Real-world patterns and best practices
|
||||||
|
- Covers 543 node types (103% coverage)
|
||||||
|
- Self-updating (templates fetched from n8n.io)
|
||||||
|
- Rich context (use cases, complexity, setup time)
|
||||||
|
|
||||||
|
**Cons**:
|
||||||
|
- Requires decompression for full workflow access
|
||||||
|
- May contain template-specific context (but can be filtered)
|
||||||
|
- Need ranking/filtering logic for best matches
|
||||||
|
|
||||||
|
## Proposed Implementation Strategy
|
||||||
|
|
||||||
|
### Phase 1: Extract Node Configurations from Templates
|
||||||
|
|
||||||
|
Create a new service: `TemplateConfigExtractor`
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
interface ExtractedNodeConfig {
|
||||||
|
nodeType: string;
|
||||||
|
configuration: Record<string, any>;
|
||||||
|
source: {
|
||||||
|
templateId: number;
|
||||||
|
templateName: string;
|
||||||
|
templateViews: number;
|
||||||
|
useCases: string[];
|
||||||
|
complexity: 'simple' | 'medium' | 'complex';
|
||||||
|
};
|
||||||
|
patterns: {
|
||||||
|
hasAuthentication: boolean;
|
||||||
|
hasExpressions: boolean;
|
||||||
|
hasOptionalFields: boolean;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
class TemplateConfigExtractor {
|
||||||
|
async extractConfigsForNode(
|
||||||
|
nodeType: string,
|
||||||
|
options?: {
|
||||||
|
complexity?: 'simple' | 'medium' | 'complex';
|
||||||
|
requiresAuth?: boolean;
|
||||||
|
limit?: number;
|
||||||
|
}
|
||||||
|
): Promise<ExtractedNodeConfig[]> {
|
||||||
|
// 1. Query templates containing nodeType
|
||||||
|
// 2. Decompress workflow_json_compressed
|
||||||
|
// 3. Extract node configurations
|
||||||
|
// 4. Rank by popularity + complexity match
|
||||||
|
// 5. Return top N configurations
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 2: Integrate with Existing Tools
|
||||||
|
|
||||||
|
**Option A**: Enhance `get_node_essentials`
|
||||||
|
- Add `includeExamples: boolean` parameter
|
||||||
|
- Return 2-3 real configurations from templates
|
||||||
|
- Preserve existing compact format
|
||||||
|
|
||||||
|
**Option B**: Enhance `get_node_info`
|
||||||
|
- Add `examples` section with template-sourced configs
|
||||||
|
- Include source attribution (template name, views)
|
||||||
|
|
||||||
|
**Option C**: New tool `get_node_examples`
|
||||||
|
- Dedicated tool for retrieving configuration examples
|
||||||
|
- Query by node type, complexity, use case
|
||||||
|
- Returns ranked list of real configurations
|
||||||
|
|
||||||
|
### Phase 3: Deprecate get_node_for_task
|
||||||
|
|
||||||
|
- Mark as deprecated in tool documentation
|
||||||
|
- Redirect to enhanced tools
|
||||||
|
- Remove after 2-3 version cycles
|
||||||
|
|
||||||
|
## Performance Considerations
|
||||||
|
|
||||||
|
### Decompression Cost
|
||||||
|
|
||||||
|
- Average compressed size: 6-12 KB
|
||||||
|
- Decompression time: ~5-10ms per template
|
||||||
|
- Caching strategy needed for frequently accessed templates
|
||||||
|
|
||||||
|
### Query Strategy
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Fast: Get templates for a node type (no decompression)
|
||||||
|
SELECT id, name, views, metadata_json
|
||||||
|
FROM templates
|
||||||
|
WHERE nodes_used LIKE '%n8n-nodes-base.httpRequest%'
|
||||||
|
ORDER BY views DESC
|
||||||
|
LIMIT 10;
|
||||||
|
|
||||||
|
-- Then decompress only top matches
|
||||||
|
```
|
||||||
|
|
||||||
|
### Caching
|
||||||
|
|
||||||
|
- Cache decompressed workflows for popular templates (top 100)
|
||||||
|
- TTL: 1 hour
|
||||||
|
- Estimated memory: 100 * 50KB = 5MB
|
||||||
|
|
||||||
|
## Impact on P0-R3
|
||||||
|
|
||||||
|
**Original P0-R3 Plan**: Expand task library from 31 to 100+ tasks using fuzzy matching
|
||||||
|
|
||||||
|
**New Approach**: Mine 2,646 templates for real configurations
|
||||||
|
|
||||||
|
**Impact Assessment**:
|
||||||
|
|
||||||
|
| Metric | Original Plan | Template Mining |
|
||||||
|
|--------|--------------|-----------------|
|
||||||
|
| Configuration examples | 100 (estimated) | 2,646+ actual |
|
||||||
|
| Node coverage | ~20% | 103% |
|
||||||
|
| Maintenance | High (manual) | Low (auto-fetch) |
|
||||||
|
| Accuracy | Curated | Production-tested |
|
||||||
|
| Context richness | Limited | Rich metadata |
|
||||||
|
| Development time | 2-3 weeks | 1 week |
|
||||||
|
|
||||||
|
**Recommendation**: PIVOT to template mining approach for P0-R3
|
||||||
|
|
||||||
|
## Implementation Estimate
|
||||||
|
|
||||||
|
### Week 1: Core Infrastructure
|
||||||
|
- Day 1-2: Create `TemplateConfigExtractor` service
|
||||||
|
- Day 3: Implement caching layer
|
||||||
|
- Day 4-5: Testing and optimization
|
||||||
|
|
||||||
|
### Week 2: Integration
|
||||||
|
- Day 1-2: Enhance `get_node_essentials` with examples
|
||||||
|
- Day 3: Update tool documentation
|
||||||
|
- Day 4-5: Integration testing
|
||||||
|
|
||||||
|
**Total**: 2 weeks vs 3 weeks for original plan
|
||||||
|
|
||||||
|
## Validation Tests
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
// Test: Extract HTTP Request configs
|
||||||
|
const configs = await extractor.extractConfigsForNode(
|
||||||
|
'n8n-nodes-base.httpRequest',
|
||||||
|
{ complexity: 'simple', limit: 5 }
|
||||||
|
);
|
||||||
|
|
||||||
|
// Expected: 5 configs from top templates
|
||||||
|
// - Simple URL fetch
|
||||||
|
// - With authentication
|
||||||
|
// - With custom headers
|
||||||
|
// - With expressions
|
||||||
|
// - With error handling
|
||||||
|
|
||||||
|
// Test: Extract webhook configs
|
||||||
|
const webhookConfigs = await extractor.extractConfigsForNode(
|
||||||
|
'n8n-nodes-base.webhook',
|
||||||
|
{ limit: 3 }
|
||||||
|
);
|
||||||
|
|
||||||
|
// Expected: 3 configs showing different patterns
|
||||||
|
// - Basic POST webhook
|
||||||
|
// - With response node
|
||||||
|
// - With binary data handling
|
||||||
|
```
|
||||||
|
|
||||||
|
## Risks and Mitigation
|
||||||
|
|
||||||
|
### Risk 1: Template Quality Varies
|
||||||
|
- **Mitigation**: Filter by views (popularity) and metadata complexity rating
|
||||||
|
- Only use templates with >1000 views for examples
|
||||||
|
|
||||||
|
### Risk 2: Decompression Performance
|
||||||
|
- **Mitigation**: Cache decompressed popular templates
|
||||||
|
- Implement lazy loading (decompress on demand)
|
||||||
|
|
||||||
|
### Risk 3: Template-Specific Context
|
||||||
|
- **Mitigation**: Extract only node configuration, strip workflow-specific context
|
||||||
|
- Provide source attribution for context
|
||||||
|
|
||||||
|
### Risk 4: Breaking Changes in Template Structure
|
||||||
|
- **Mitigation**: Robust error handling in decompression
|
||||||
|
- Fallback to cached configs if template fetch fails
|
||||||
|
|
||||||
|
## Success Metrics
|
||||||
|
|
||||||
|
**Before** (get_node_for_task):
|
||||||
|
- 392 calls, 72% success rate
|
||||||
|
- 28% failure rate
|
||||||
|
- 31 task templates
|
||||||
|
- 5.9% node coverage
|
||||||
|
|
||||||
|
**Target** (template-based):
|
||||||
|
- 90%+ success rate for configuration discovery
|
||||||
|
- 100%+ node coverage
|
||||||
|
- 2,646+ real-world examples
|
||||||
|
- Self-updating from n8n.io
|
||||||
|
|
||||||
|
## Next Steps
|
||||||
|
|
||||||
|
1. ✅ Complete template database analysis
|
||||||
|
2. ⏳ Create `TemplateConfigExtractor` service
|
||||||
|
3. ⏳ Implement caching layer
|
||||||
|
4. ⏳ Enhance `get_node_essentials` with examples
|
||||||
|
5. ⏳ Update P0 implementation plan
|
||||||
|
6. ⏳ Begin implementation
|
||||||
|
|
||||||
|
## Conclusion
|
||||||
|
|
||||||
|
The template database provides a vastly superior alternative to hardcoded task templates:
|
||||||
|
|
||||||
|
- **2,646 templates** vs 31 tasks (85x more examples)
|
||||||
|
- **103% node coverage** vs 5.9% coverage (17x improvement)
|
||||||
|
- **Real-world configurations** vs synthetic examples
|
||||||
|
- **Self-updating** vs manual maintenance
|
||||||
|
- **Rich metadata** for semantic matching
|
||||||
|
|
||||||
|
**Recommendation**: Pivot P0-R3 from "expand task library" to "mine template configurations"
|
||||||
1306
docs/local/integration-testing-plan.md
Normal file
1306
docs/local/integration-testing-plan.md
Normal file
File diff suppressed because it is too large
Load Diff
260
docs/local/integration-tests-phase1-summary.md
Normal file
260
docs/local/integration-tests-phase1-summary.md
Normal file
@@ -0,0 +1,260 @@
|
|||||||
|
# Integration Tests Phase 1: Foundation - COMPLETED
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
Phase 1 establishes the foundation for n8n API integration testing. All core utilities, fixtures, and infrastructure are now in place.
|
||||||
|
|
||||||
|
## Branch
|
||||||
|
`feat/integration-tests-foundation`
|
||||||
|
|
||||||
|
## Completed Tasks
|
||||||
|
|
||||||
|
### 1. Environment Configuration
|
||||||
|
- ✅ Updated `.env.example` with integration testing configuration
|
||||||
|
- ✅ Added environment variables for:
|
||||||
|
- n8n API credentials (`N8N_API_URL`, `N8N_API_KEY`)
|
||||||
|
- Webhook workflow IDs (4 workflows for GET/POST/PUT/DELETE)
|
||||||
|
- Test configuration (cleanup, tags, naming)
|
||||||
|
- ✅ Included detailed setup instructions in comments
|
||||||
|
|
||||||
|
### 2. Directory Structure
|
||||||
|
```
|
||||||
|
tests/integration/n8n-api/
|
||||||
|
├── workflows/ (empty - for Phase 2+)
|
||||||
|
├── executions/ (empty - for Phase 2+)
|
||||||
|
├── system/ (empty - for Phase 2+)
|
||||||
|
├── scripts/
|
||||||
|
│ └── cleanup-orphans.ts
|
||||||
|
└── utils/
|
||||||
|
├── credentials.ts
|
||||||
|
├── n8n-client.ts
|
||||||
|
├── test-context.ts
|
||||||
|
├── cleanup-helpers.ts
|
||||||
|
├── fixtures.ts
|
||||||
|
├── factories.ts
|
||||||
|
└── webhook-workflows.ts
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Core Utilities
|
||||||
|
|
||||||
|
#### `credentials.ts` (200 lines)
|
||||||
|
- Environment-aware credential loading
|
||||||
|
- Detects CI vs local environment automatically
|
||||||
|
- Validation functions with helpful error messages
|
||||||
|
- Non-throwing credential check functions
|
||||||
|
|
||||||
|
**Key Functions:**
|
||||||
|
- `getN8nCredentials()` - Load credentials from .env or GitHub secrets
|
||||||
|
- `validateCredentials()` - Ensure required credentials are present
|
||||||
|
- `validateWebhookWorkflows()` - Check webhook workflow IDs with setup instructions
|
||||||
|
- `hasCredentials()` - Non-throwing credential check
|
||||||
|
- `hasWebhookWorkflows()` - Non-throwing webhook check
|
||||||
|
|
||||||
|
#### `n8n-client.ts` (45 lines)
|
||||||
|
- Singleton n8n API client wrapper
|
||||||
|
- Pre-configured with test credentials
|
||||||
|
- Health check functionality
|
||||||
|
|
||||||
|
**Key Functions:**
|
||||||
|
- `getTestN8nClient()` - Get/create configured API client
|
||||||
|
- `resetTestN8nClient()` - Reset client instance
|
||||||
|
- `isN8nApiAccessible()` - Check API connectivity
|
||||||
|
|
||||||
|
#### `test-context.ts` (120 lines)
|
||||||
|
- Resource tracking for automatic cleanup
|
||||||
|
- Test workflow naming utilities
|
||||||
|
- Tag management
|
||||||
|
|
||||||
|
**Key Functions:**
|
||||||
|
- `createTestContext()` - Create context for tracking resources
|
||||||
|
- `TestContext.trackWorkflow()` - Track workflow for cleanup
|
||||||
|
- `TestContext.trackExecution()` - Track execution for cleanup
|
||||||
|
- `TestContext.cleanup()` - Delete all tracked resources
|
||||||
|
- `createTestWorkflowName()` - Generate unique workflow names
|
||||||
|
- `getTestTag()` - Get configured test tag
|
||||||
|
|
||||||
|
#### `cleanup-helpers.ts` (275 lines)
|
||||||
|
- Multi-level cleanup strategies
|
||||||
|
- Orphaned resource detection
|
||||||
|
- Age-based execution cleanup
|
||||||
|
- Tag-based workflow cleanup
|
||||||
|
|
||||||
|
**Key Functions:**
|
||||||
|
- `cleanupOrphanedWorkflows()` - Find and delete test workflows
|
||||||
|
- `cleanupOldExecutions()` - Delete executions older than X hours
|
||||||
|
- `cleanupAllTestResources()` - Comprehensive cleanup
|
||||||
|
- `cleanupWorkflowsByTag()` - Delete workflows by tag
|
||||||
|
- `cleanupExecutionsByWorkflow()` - Delete workflow's executions
|
||||||
|
|
||||||
|
#### `fixtures.ts` (310 lines)
|
||||||
|
- Pre-built workflow templates
|
||||||
|
- All using FULL node type format (n8n-nodes-base.*)
|
||||||
|
|
||||||
|
**Available Fixtures:**
|
||||||
|
- `SIMPLE_WEBHOOK_WORKFLOW` - Single webhook node
|
||||||
|
- `SIMPLE_HTTP_WORKFLOW` - Webhook + HTTP Request
|
||||||
|
- `MULTI_NODE_WORKFLOW` - Complex branching workflow
|
||||||
|
- `ERROR_HANDLING_WORKFLOW` - Error output configuration
|
||||||
|
- `AI_AGENT_WORKFLOW` - Langchain agent node
|
||||||
|
- `EXPRESSION_WORKFLOW` - n8n expressions testing
|
||||||
|
|
||||||
|
**Helper Functions:**
|
||||||
|
- `getFixture()` - Get fixture by name (with deep clone)
|
||||||
|
- `createCustomWorkflow()` - Build custom workflow from nodes
|
||||||
|
|
||||||
|
#### `factories.ts` (315 lines)
|
||||||
|
- Dynamic test data generation
|
||||||
|
- Node builders with sensible defaults
|
||||||
|
- Workflow composition helpers
|
||||||
|
|
||||||
|
**Node Factories:**
|
||||||
|
- `createWebhookNode()` - Webhook node with customization
|
||||||
|
- `createHttpRequestNode()` - HTTP Request node
|
||||||
|
- `createSetNode()` - Set node with assignments
|
||||||
|
- `createManualTriggerNode()` - Manual trigger node
|
||||||
|
|
||||||
|
**Connection Factories:**
|
||||||
|
- `createConnection()` - Simple node connection
|
||||||
|
- `createSequentialWorkflow()` - Auto-connected sequential nodes
|
||||||
|
- `createParallelWorkflow()` - Trigger with parallel branches
|
||||||
|
- `createErrorHandlingWorkflow()` - Workflow with error handling
|
||||||
|
|
||||||
|
**Utilities:**
|
||||||
|
- `randomString()` - Generate random test data
|
||||||
|
- `uniqueId()` - Unique IDs for testing
|
||||||
|
- `createTestTags()` - Test workflow tags
|
||||||
|
- `createWorkflowSettings()` - Common settings
|
||||||
|
|
||||||
|
#### `webhook-workflows.ts` (215 lines)
|
||||||
|
- Webhook workflow configuration templates
|
||||||
|
- Setup instructions generator
|
||||||
|
- URL generation utilities
|
||||||
|
|
||||||
|
**Key Features:**
|
||||||
|
- `WEBHOOK_WORKFLOW_CONFIGS` - Configurations for all 4 HTTP methods
|
||||||
|
- `printSetupInstructions()` - Print detailed setup guide
|
||||||
|
- `generateWebhookWorkflowJson()` - Generate workflow JSON
|
||||||
|
- `exportAllWebhookWorkflows()` - Export all 4 configs
|
||||||
|
- `getWebhookUrl()` - Get webhook URL for testing
|
||||||
|
- `isValidWebhookWorkflow()` - Validate workflow structure
|
||||||
|
|
||||||
|
### 4. Scripts
|
||||||
|
|
||||||
|
#### `cleanup-orphans.ts` (40 lines)
|
||||||
|
- Standalone cleanup script
|
||||||
|
- Can be run manually or in CI
|
||||||
|
- Comprehensive output logging
|
||||||
|
|
||||||
|
**Usage:**
|
||||||
|
```bash
|
||||||
|
npm run test:cleanup:orphans
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. npm Scripts
|
||||||
|
Added to `package.json`:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"test:integration:n8n": "vitest run tests/integration/n8n-api",
|
||||||
|
"test:cleanup:orphans": "tsx tests/integration/n8n-api/scripts/cleanup-orphans.ts"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Code Quality
|
||||||
|
|
||||||
|
### TypeScript
|
||||||
|
- ✅ All code passes `npm run typecheck`
|
||||||
|
- ✅ All code compiles with `npm run build`
|
||||||
|
- ✅ No TypeScript errors
|
||||||
|
- ✅ Proper type annotations throughout
|
||||||
|
|
||||||
|
### Error Handling
|
||||||
|
- ✅ Comprehensive error messages
|
||||||
|
- ✅ Helpful setup instructions in error messages
|
||||||
|
- ✅ Non-throwing validation functions where appropriate
|
||||||
|
- ✅ Graceful handling of missing credentials
|
||||||
|
|
||||||
|
### Documentation
|
||||||
|
- ✅ All functions have JSDoc comments
|
||||||
|
- ✅ Usage examples in comments
|
||||||
|
- ✅ Clear parameter descriptions
|
||||||
|
- ✅ Return type documentation
|
||||||
|
|
||||||
|
## Files Created
|
||||||
|
|
||||||
|
### Documentation
|
||||||
|
1. `docs/local/integration-testing-plan.md` (550 lines)
|
||||||
|
2. `docs/local/integration-tests-phase1-summary.md` (this file)
|
||||||
|
|
||||||
|
### Code
|
||||||
|
1. `.env.example` - Updated with test configuration (32 new lines)
|
||||||
|
2. `package.json` - Added 2 npm scripts
|
||||||
|
3. `tests/integration/n8n-api/utils/credentials.ts` (200 lines)
|
||||||
|
4. `tests/integration/n8n-api/utils/n8n-client.ts` (45 lines)
|
||||||
|
5. `tests/integration/n8n-api/utils/test-context.ts` (120 lines)
|
||||||
|
6. `tests/integration/n8n-api/utils/cleanup-helpers.ts` (275 lines)
|
||||||
|
7. `tests/integration/n8n-api/utils/fixtures.ts` (310 lines)
|
||||||
|
8. `tests/integration/n8n-api/utils/factories.ts` (315 lines)
|
||||||
|
9. `tests/integration/n8n-api/utils/webhook-workflows.ts` (215 lines)
|
||||||
|
10. `tests/integration/n8n-api/scripts/cleanup-orphans.ts` (40 lines)
|
||||||
|
|
||||||
|
**Total New Code:** ~1,520 lines of production-ready TypeScript
|
||||||
|
|
||||||
|
## Next Steps (Phase 2)
|
||||||
|
|
||||||
|
Phase 2 will implement the first actual integration tests:
|
||||||
|
- Create workflow creation tests (10+ scenarios)
|
||||||
|
- Test P0 bug fix (SHORT vs FULL node types)
|
||||||
|
- Test workflow retrieval
|
||||||
|
- Test workflow deletion
|
||||||
|
|
||||||
|
**Branch:** `feat/integration-tests-workflow-creation`
|
||||||
|
|
||||||
|
## Prerequisites for Running Tests
|
||||||
|
|
||||||
|
Before running integration tests, you need to:
|
||||||
|
|
||||||
|
1. **Set up n8n instance:**
|
||||||
|
- Local: `npx n8n start`
|
||||||
|
- Or use cloud/self-hosted n8n
|
||||||
|
|
||||||
|
2. **Configure credentials in `.env`:**
|
||||||
|
```bash
|
||||||
|
N8N_API_URL=http://localhost:5678
|
||||||
|
N8N_API_KEY=<your-api-key>
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Create 4 webhook workflows manually:**
|
||||||
|
- One for each HTTP method (GET, POST, PUT, DELETE)
|
||||||
|
- Activate each workflow in n8n UI
|
||||||
|
- Set workflow IDs in `.env`:
|
||||||
|
```bash
|
||||||
|
N8N_TEST_WEBHOOK_GET_ID=<workflow-id>
|
||||||
|
N8N_TEST_WEBHOOK_POST_ID=<workflow-id>
|
||||||
|
N8N_TEST_WEBHOOK_PUT_ID=<workflow-id>
|
||||||
|
N8N_TEST_WEBHOOK_DELETE_ID=<workflow-id>
|
||||||
|
```
|
||||||
|
|
||||||
|
See `docs/local/integration-testing-plan.md` for detailed setup instructions.
|
||||||
|
|
||||||
|
## Success Metrics
|
||||||
|
|
||||||
|
Phase 1 Success Criteria - ALL MET:
|
||||||
|
- ✅ All utilities implemented and tested
|
||||||
|
- ✅ TypeScript compiles without errors
|
||||||
|
- ✅ Code follows project conventions
|
||||||
|
- ✅ Comprehensive documentation
|
||||||
|
- ✅ Environment configuration complete
|
||||||
|
- ✅ Cleanup infrastructure in place
|
||||||
|
- ✅ Ready for Phase 2 test implementation
|
||||||
|
|
||||||
|
## Lessons Learned
|
||||||
|
|
||||||
|
1. **N8nApiClient Constructor:** Uses config object, not separate parameters
|
||||||
|
2. **Cursor Handling:** n8n API returns `null` for no more pages, need to convert to `undefined`
|
||||||
|
3. **Workflow ID Validation:** Some workflows might have undefined IDs, need null checks
|
||||||
|
4. **Connection Types:** Error connections need explicit typing to avoid TypeScript errors
|
||||||
|
5. **Webhook Activation:** Cannot be done via API, must be manual - hence pre-activated workflow requirement
|
||||||
|
|
||||||
|
## Time Invested
|
||||||
|
|
||||||
|
Phase 1 actual time: ~2 hours (estimated 2-3 days in plan)
|
||||||
|
- Faster than expected due to clear architecture and reusable patterns
|
||||||
@@ -1,712 +0,0 @@
|
|||||||
# MCP Tools Documentation for LLMs
|
|
||||||
|
|
||||||
This document provides comprehensive documentation for the most commonly used MCP tools in the n8n-mcp server. Each tool includes parameters, return formats, examples, and best practices.
|
|
||||||
|
|
||||||
## Table of Contents
|
|
||||||
1. [search_nodes](#search_nodes)
|
|
||||||
2. [get_node_essentials](#get_node_essentials)
|
|
||||||
3. [list_nodes](#list_nodes)
|
|
||||||
4. [validate_node_minimal](#validate_node_minimal)
|
|
||||||
5. [validate_node_operation](#validate_node_operation)
|
|
||||||
6. [get_node_for_task](#get_node_for_task)
|
|
||||||
7. [n8n_create_workflow](#n8n_create_workflow)
|
|
||||||
8. [n8n_update_partial_workflow](#n8n_update_partial_workflow)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## search_nodes
|
|
||||||
|
|
||||||
**Brief Description**: Search for n8n nodes by keywords in names and descriptions.
|
|
||||||
|
|
||||||
### Parameters
|
|
||||||
- `query` (string, required): Search term - single word recommended for best results
|
|
||||||
- `limit` (number, optional): Maximum results to return (default: 20)
|
|
||||||
|
|
||||||
### Return Format
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"nodes": [
|
|
||||||
{
|
|
||||||
"nodeType": "nodes-base.slack",
|
|
||||||
"displayName": "Slack",
|
|
||||||
"description": "Send messages to Slack channels"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"totalFound": 5
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Common Use Cases
|
|
||||||
1. **Finding integration nodes**: `search_nodes("slack")` to find Slack integration
|
|
||||||
2. **Finding HTTP nodes**: `search_nodes("http")` for HTTP/webhook nodes
|
|
||||||
3. **Finding database nodes**: `search_nodes("postgres")` for PostgreSQL nodes
|
|
||||||
|
|
||||||
### Examples
|
|
||||||
```json
|
|
||||||
// Search for Slack-related nodes
|
|
||||||
{
|
|
||||||
"query": "slack",
|
|
||||||
"limit": 10
|
|
||||||
}
|
|
||||||
|
|
||||||
// Search for webhook nodes
|
|
||||||
{
|
|
||||||
"query": "webhook",
|
|
||||||
"limit": 20
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Performance Notes
|
|
||||||
- Fast operation (cached results)
|
|
||||||
- Single-word queries are more precise
|
|
||||||
- Returns results with OR logic (any word matches)
|
|
||||||
|
|
||||||
### Best Practices
|
|
||||||
- Use single words for precise results: "slack" not "send slack message"
|
|
||||||
- Try shorter terms if no results: "sheet" instead of "spreadsheet"
|
|
||||||
- Search is case-insensitive
|
|
||||||
- Common searches: "http", "webhook", "email", "database", "slack"
|
|
||||||
|
|
||||||
### Common Pitfalls
|
|
||||||
- Multi-word searches return too many results (OR logic)
|
|
||||||
- Searching for exact phrases doesn't work
|
|
||||||
- Node types aren't searchable here (use exact type with get_node_info)
|
|
||||||
|
|
||||||
### Related Tools
|
|
||||||
- `list_nodes` - Browse nodes by category
|
|
||||||
- `get_node_essentials` - Get node configuration after finding it
|
|
||||||
- `list_ai_tools` - Find AI-capable nodes specifically
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## get_node_essentials
|
|
||||||
|
|
||||||
**Brief Description**: Get only the 10-20 most important properties for a node with working examples.
|
|
||||||
|
|
||||||
### Parameters
|
|
||||||
- `nodeType` (string, required): Full node type with prefix (e.g., "nodes-base.httpRequest")
|
|
||||||
|
|
||||||
### Return Format
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"nodeType": "nodes-base.httpRequest",
|
|
||||||
"displayName": "HTTP Request",
|
|
||||||
"essentialProperties": [
|
|
||||||
{
|
|
||||||
"name": "method",
|
|
||||||
"type": "options",
|
|
||||||
"default": "GET",
|
|
||||||
"options": ["GET", "POST", "PUT", "DELETE"],
|
|
||||||
"required": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "url",
|
|
||||||
"type": "string",
|
|
||||||
"required": true,
|
|
||||||
"placeholder": "https://api.example.com/endpoint"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"examples": [
|
|
||||||
{
|
|
||||||
"name": "Simple GET Request",
|
|
||||||
"configuration": {
|
|
||||||
"method": "GET",
|
|
||||||
"url": "https://api.example.com/users"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"tips": [
|
|
||||||
"Use expressions like {{$json.url}} to make URLs dynamic",
|
|
||||||
"Enable 'Split Into Items' for array responses"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Common Use Cases
|
|
||||||
1. **Quick node configuration**: Get just what you need without parsing 100KB+ of data
|
|
||||||
2. **Learning node basics**: Understand essential properties with examples
|
|
||||||
3. **Building workflows efficiently**: 95% smaller responses than get_node_info
|
|
||||||
|
|
||||||
### Examples
|
|
||||||
```json
|
|
||||||
// Get essentials for HTTP Request node
|
|
||||||
{
|
|
||||||
"nodeType": "nodes-base.httpRequest"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get essentials for Slack node
|
|
||||||
{
|
|
||||||
"nodeType": "nodes-base.slack"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get essentials for OpenAI node
|
|
||||||
{
|
|
||||||
"nodeType": "nodes-langchain.openAi"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Performance Notes
|
|
||||||
- Very fast (<5KB responses vs 100KB+ for full info)
|
|
||||||
- Curated for 20+ common nodes
|
|
||||||
- Automatic fallback for unconfigured nodes
|
|
||||||
|
|
||||||
### Best Practices
|
|
||||||
- Always use this before get_node_info
|
|
||||||
- Node type must include prefix: "nodes-base.slack" not "slack"
|
|
||||||
- Check examples section for working configurations
|
|
||||||
- Use tips section for common patterns
|
|
||||||
|
|
||||||
### Common Pitfalls
|
|
||||||
- Forgetting the prefix in node type
|
|
||||||
- Using wrong package name (n8n-nodes-base vs @n8n/n8n-nodes-langchain)
|
|
||||||
- Case sensitivity in node types
|
|
||||||
|
|
||||||
### Related Tools
|
|
||||||
- `get_node_info` - Full schema when essentials aren't enough
|
|
||||||
- `search_node_properties` - Find specific properties
|
|
||||||
- `get_node_for_task` - Pre-configured for common tasks
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## list_nodes
|
|
||||||
|
|
||||||
**Brief Description**: List available n8n nodes with optional filtering by package, category, or capabilities.
|
|
||||||
|
|
||||||
### Parameters
|
|
||||||
- `package` (string, optional): Filter by exact package name
|
|
||||||
- `category` (string, optional): Filter by category (trigger, transform, output, input)
|
|
||||||
- `developmentStyle` (string, optional): Filter by implementation style
|
|
||||||
- `isAITool` (boolean, optional): Filter for AI-capable nodes
|
|
||||||
- `limit` (number, optional): Maximum results (default: 50, max: 500)
|
|
||||||
|
|
||||||
### Return Format
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"nodes": [
|
|
||||||
{
|
|
||||||
"nodeType": "nodes-base.webhook",
|
|
||||||
"displayName": "Webhook",
|
|
||||||
"description": "Receive HTTP requests",
|
|
||||||
"categories": ["trigger"],
|
|
||||||
"version": 2
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"total": 104,
|
|
||||||
"hasMore": false
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Common Use Cases
|
|
||||||
1. **Browse all triggers**: `list_nodes({category: "trigger", limit: 200})`
|
|
||||||
2. **List all nodes**: `list_nodes({limit: 500})`
|
|
||||||
3. **Find AI nodes**: `list_nodes({isAITool: true})`
|
|
||||||
4. **Browse core nodes**: `list_nodes({package: "n8n-nodes-base"})`
|
|
||||||
|
|
||||||
### Examples
|
|
||||||
```json
|
|
||||||
// List all trigger nodes
|
|
||||||
{
|
|
||||||
"category": "trigger",
|
|
||||||
"limit": 200
|
|
||||||
}
|
|
||||||
|
|
||||||
// List all AI-capable nodes
|
|
||||||
{
|
|
||||||
"isAITool": true,
|
|
||||||
"limit": 100
|
|
||||||
}
|
|
||||||
|
|
||||||
// List nodes from core package
|
|
||||||
{
|
|
||||||
"package": "n8n-nodes-base",
|
|
||||||
"limit": 200
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Performance Notes
|
|
||||||
- Fast operation (cached results)
|
|
||||||
- Default limit of 50 may miss nodes - use 200+
|
|
||||||
- Returns metadata only, not full schemas
|
|
||||||
|
|
||||||
### Best Practices
|
|
||||||
- Always set limit to 200+ for complete results
|
|
||||||
- Use exact package names: "n8n-nodes-base" not "@n8n/n8n-nodes-base"
|
|
||||||
- Categories are singular: "trigger" not "triggers"
|
|
||||||
- Common categories: trigger (104), transform, output, input
|
|
||||||
|
|
||||||
### Common Pitfalls
|
|
||||||
- Default limit (50) misses many nodes
|
|
||||||
- Using wrong package name format
|
|
||||||
- Multiple filters may return empty results
|
|
||||||
|
|
||||||
### Related Tools
|
|
||||||
- `search_nodes` - Search by keywords
|
|
||||||
- `list_ai_tools` - Specifically for AI nodes
|
|
||||||
- `get_database_statistics` - Overview of all nodes
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## validate_node_minimal
|
|
||||||
|
|
||||||
**Brief Description**: Quick validation checking only for missing required fields.
|
|
||||||
|
|
||||||
### Parameters
|
|
||||||
- `nodeType` (string, required): Node type to validate (e.g., "nodes-base.slack")
|
|
||||||
- `config` (object, required): Node configuration to check
|
|
||||||
|
|
||||||
### Return Format
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"valid": false,
|
|
||||||
"missingRequired": ["channel", "messageType"],
|
|
||||||
"message": "Missing 2 required fields"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Common Use Cases
|
|
||||||
1. **Quick validation**: Check if all required fields are present
|
|
||||||
2. **Pre-flight check**: Validate before creating workflow
|
|
||||||
3. **Minimal overhead**: Fastest validation option
|
|
||||||
|
|
||||||
### Examples
|
|
||||||
```json
|
|
||||||
// Validate Slack message configuration
|
|
||||||
{
|
|
||||||
"nodeType": "nodes-base.slack",
|
|
||||||
"config": {
|
|
||||||
"resource": "message",
|
|
||||||
"operation": "send",
|
|
||||||
"text": "Hello World"
|
|
||||||
// Missing: channel
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate HTTP Request
|
|
||||||
{
|
|
||||||
"nodeType": "nodes-base.httpRequest",
|
|
||||||
"config": {
|
|
||||||
"method": "POST"
|
|
||||||
// Missing: url
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Performance Notes
|
|
||||||
- Fastest validation option
|
|
||||||
- No schema loading overhead
|
|
||||||
- Returns only missing fields
|
|
||||||
|
|
||||||
### Best Practices
|
|
||||||
- Use for quick checks during workflow building
|
|
||||||
- Follow up with validate_node_operation for complex nodes
|
|
||||||
- Check operation-specific requirements
|
|
||||||
|
|
||||||
### Common Pitfalls
|
|
||||||
- Doesn't validate field values or types
|
|
||||||
- Doesn't check operation-specific requirements
|
|
||||||
- Won't catch configuration errors beyond missing fields
|
|
||||||
|
|
||||||
### Related Tools
|
|
||||||
- `validate_node_operation` - Comprehensive validation
|
|
||||||
- `validate_workflow` - Full workflow validation
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## validate_node_operation
|
|
||||||
|
|
||||||
**Brief Description**: Comprehensive node configuration validation with operation awareness and helpful error messages.
|
|
||||||
|
|
||||||
### Parameters
|
|
||||||
- `nodeType` (string, required): Node type to validate
|
|
||||||
- `config` (object, required): Complete node configuration including operation fields
|
|
||||||
- `profile` (string, optional): Validation profile (minimal, runtime, ai-friendly, strict)
|
|
||||||
|
|
||||||
### Return Format
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"valid": false,
|
|
||||||
"errors": [
|
|
||||||
{
|
|
||||||
"field": "channel",
|
|
||||||
"message": "Channel is required to send Slack message",
|
|
||||||
"suggestion": "Add channel: '#general' or '@username'"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"warnings": [
|
|
||||||
{
|
|
||||||
"field": "unfurl_links",
|
|
||||||
"message": "Consider setting unfurl_links: false for better performance"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"examples": {
|
|
||||||
"minimal": {
|
|
||||||
"resource": "message",
|
|
||||||
"operation": "send",
|
|
||||||
"channel": "#general",
|
|
||||||
"text": "Hello World"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Common Use Cases
|
|
||||||
1. **Complex node validation**: Slack, Google Sheets, databases
|
|
||||||
2. **Operation-specific checks**: Different rules per operation
|
|
||||||
3. **Getting fix suggestions**: Helpful error messages with solutions
|
|
||||||
|
|
||||||
### Examples
|
|
||||||
```json
|
|
||||||
// Validate Slack configuration
|
|
||||||
{
|
|
||||||
"nodeType": "nodes-base.slack",
|
|
||||||
"config": {
|
|
||||||
"resource": "message",
|
|
||||||
"operation": "send",
|
|
||||||
"text": "Hello team!"
|
|
||||||
},
|
|
||||||
"profile": "ai-friendly"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate Google Sheets operation
|
|
||||||
{
|
|
||||||
"nodeType": "nodes-base.googleSheets",
|
|
||||||
"config": {
|
|
||||||
"operation": "append",
|
|
||||||
"sheetId": "1234567890",
|
|
||||||
"range": "Sheet1!A:Z"
|
|
||||||
},
|
|
||||||
"profile": "runtime"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Performance Notes
|
|
||||||
- Slower than minimal validation
|
|
||||||
- Loads full node schema
|
|
||||||
- Operation-aware validation rules
|
|
||||||
|
|
||||||
### Best Practices
|
|
||||||
- Use "ai-friendly" profile for balanced validation
|
|
||||||
- Check examples in response for working configurations
|
|
||||||
- Follow suggestions to fix errors
|
|
||||||
- Essential for complex nodes (Slack, databases, APIs)
|
|
||||||
|
|
||||||
### Common Pitfalls
|
|
||||||
- Forgetting operation fields (resource, operation, action)
|
|
||||||
- Using wrong profile (too strict or too lenient)
|
|
||||||
- Ignoring warnings that could cause runtime issues
|
|
||||||
|
|
||||||
### Related Tools
|
|
||||||
- `validate_node_minimal` - Quick required field check
|
|
||||||
- `get_property_dependencies` - Understand field relationships
|
|
||||||
- `validate_workflow` - Validate entire workflow
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## get_node_for_task
|
|
||||||
|
|
||||||
**Brief Description**: Get pre-configured node settings for common automation tasks.
|
|
||||||
|
|
||||||
### Parameters
|
|
||||||
- `task` (string, required): Task identifier (e.g., "post_json_request", "receive_webhook")
|
|
||||||
|
|
||||||
### Return Format
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"task": "post_json_request",
|
|
||||||
"nodeType": "nodes-base.httpRequest",
|
|
||||||
"displayName": "HTTP Request",
|
|
||||||
"configuration": {
|
|
||||||
"method": "POST",
|
|
||||||
"url": "={{ $json.api_endpoint }}",
|
|
||||||
"responseFormat": "json",
|
|
||||||
"options": {
|
|
||||||
"bodyContentType": "json"
|
|
||||||
},
|
|
||||||
"bodyParametersJson": "={{ JSON.stringify($json) }}"
|
|
||||||
},
|
|
||||||
"userMustProvide": [
|
|
||||||
"url - The API endpoint URL",
|
|
||||||
"bodyParametersJson - The JSON data to send"
|
|
||||||
],
|
|
||||||
"tips": [
|
|
||||||
"Use expressions to make values dynamic",
|
|
||||||
"Enable 'Split Into Items' for batch processing"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Common Use Cases
|
|
||||||
1. **Quick task setup**: Configure nodes for specific tasks instantly
|
|
||||||
2. **Learning patterns**: See how to configure nodes properly
|
|
||||||
3. **Common workflows**: Standard patterns like webhooks, API calls, database queries
|
|
||||||
|
|
||||||
### Examples
|
|
||||||
```json
|
|
||||||
// Get configuration for JSON POST request
|
|
||||||
{
|
|
||||||
"task": "post_json_request"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get webhook receiver configuration
|
|
||||||
{
|
|
||||||
"task": "receive_webhook"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get AI chat configuration
|
|
||||||
{
|
|
||||||
"task": "chat_with_ai"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Performance Notes
|
|
||||||
- Instant response (pre-configured templates)
|
|
||||||
- No database lookups required
|
|
||||||
- Includes working examples
|
|
||||||
|
|
||||||
### Best Practices
|
|
||||||
- Use list_tasks first to see available options
|
|
||||||
- Check userMustProvide section
|
|
||||||
- Follow tips for best results
|
|
||||||
- Common tasks: API calls, webhooks, database queries, AI chat
|
|
||||||
|
|
||||||
### Common Pitfalls
|
|
||||||
- Not all tasks available (use list_tasks)
|
|
||||||
- Configuration needs customization
|
|
||||||
- Some fields still need user input
|
|
||||||
|
|
||||||
### Related Tools
|
|
||||||
- `list_tasks` - See all available tasks
|
|
||||||
- `get_node_essentials` - Alternative approach
|
|
||||||
- `search_templates` - Find complete workflow templates
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## n8n_create_workflow
|
|
||||||
|
|
||||||
**Brief Description**: Create a new workflow in n8n with nodes and connections.
|
|
||||||
|
|
||||||
### Parameters
|
|
||||||
- `name` (string, required): Workflow name
|
|
||||||
- `nodes` (array, required): Array of node definitions
|
|
||||||
- `connections` (object, required): Node connections mapping
|
|
||||||
- `settings` (object, optional): Workflow settings
|
|
||||||
|
|
||||||
### Return Format
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"id": "workflow-uuid",
|
|
||||||
"name": "My Workflow",
|
|
||||||
"active": false,
|
|
||||||
"createdAt": "2024-01-15T10:30:00Z",
|
|
||||||
"updatedAt": "2024-01-15T10:30:00Z",
|
|
||||||
"nodes": [...],
|
|
||||||
"connections": {...}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Common Use Cases
|
|
||||||
1. **Automated workflow creation**: Build workflows programmatically
|
|
||||||
2. **Template deployment**: Deploy pre-built workflow patterns
|
|
||||||
3. **Multi-workflow systems**: Create interconnected workflows
|
|
||||||
|
|
||||||
### Examples
|
|
||||||
```json
|
|
||||||
// Create simple webhook → HTTP request workflow
|
|
||||||
{
|
|
||||||
"name": "Webhook to API",
|
|
||||||
"nodes": [
|
|
||||||
{
|
|
||||||
"id": "webhook-1",
|
|
||||||
"name": "Webhook",
|
|
||||||
"type": "n8n-nodes-base.webhook",
|
|
||||||
"typeVersion": 2,
|
|
||||||
"position": [250, 300],
|
|
||||||
"parameters": {
|
|
||||||
"path": "/my-webhook",
|
|
||||||
"httpMethod": "POST"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "http-1",
|
|
||||||
"name": "HTTP Request",
|
|
||||||
"type": "n8n-nodes-base.httpRequest",
|
|
||||||
"typeVersion": 4.2,
|
|
||||||
"position": [450, 300],
|
|
||||||
"parameters": {
|
|
||||||
"method": "POST",
|
|
||||||
"url": "https://api.example.com/process",
|
|
||||||
"responseFormat": "json"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"connections": {
|
|
||||||
"Webhook": {
|
|
||||||
"main": [[{"node": "HTTP Request", "type": "main", "index": 0}]]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Performance Notes
|
|
||||||
- API call to n8n instance required
|
|
||||||
- Workflow created in inactive state
|
|
||||||
- Must be manually activated in UI
|
|
||||||
|
|
||||||
### Best Practices
|
|
||||||
- Always include typeVersion for nodes
|
|
||||||
- Use node names (not IDs) in connections
|
|
||||||
- Position nodes logically ([x, y] coordinates)
|
|
||||||
- Test with validate_workflow first
|
|
||||||
- Start simple, add complexity gradually
|
|
||||||
|
|
||||||
### Common Pitfalls
|
|
||||||
- Missing typeVersion causes errors
|
|
||||||
- Using node IDs instead of names in connections
|
|
||||||
- Forgetting required node properties
|
|
||||||
- Creating cycles in connections
|
|
||||||
- Workflow can't be activated via API
|
|
||||||
|
|
||||||
### Related Tools
|
|
||||||
- `validate_workflow` - Validate before creating
|
|
||||||
- `n8n_update_partial_workflow` - Modify existing workflows
|
|
||||||
- `n8n_trigger_webhook_workflow` - Execute workflows
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## n8n_update_partial_workflow
|
|
||||||
|
|
||||||
**Brief Description**: Update workflows using diff operations for precise, incremental changes without sending the entire workflow.
|
|
||||||
|
|
||||||
### Parameters
|
|
||||||
- `id` (string, required): Workflow ID to update
|
|
||||||
- `operations` (array, required): Array of diff operations (max 5)
|
|
||||||
- `validateOnly` (boolean, optional): Test without applying changes
|
|
||||||
|
|
||||||
### Return Format
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"success": true,
|
|
||||||
"workflow": {
|
|
||||||
"id": "workflow-uuid",
|
|
||||||
"name": "Updated Workflow",
|
|
||||||
"nodes": [...],
|
|
||||||
"connections": {...}
|
|
||||||
},
|
|
||||||
"appliedOperations": 3
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Common Use Cases
|
|
||||||
1. **Add nodes to existing workflows**: Insert new functionality
|
|
||||||
2. **Update node configurations**: Change parameters without full replacement
|
|
||||||
3. **Manage connections**: Add/remove node connections
|
|
||||||
4. **Quick edits**: Rename, enable/disable nodes, update settings
|
|
||||||
|
|
||||||
### Examples
|
|
||||||
```json
|
|
||||||
// Add a new node and connect it
|
|
||||||
{
|
|
||||||
"id": "workflow-123",
|
|
||||||
"operations": [
|
|
||||||
{
|
|
||||||
"type": "addNode",
|
|
||||||
"node": {
|
|
||||||
"id": "set-1",
|
|
||||||
"name": "Set Data",
|
|
||||||
"type": "n8n-nodes-base.set",
|
|
||||||
"typeVersion": 3,
|
|
||||||
"position": [600, 300],
|
|
||||||
"parameters": {
|
|
||||||
"values": {
|
|
||||||
"string": [{
|
|
||||||
"name": "status",
|
|
||||||
"value": "processed"
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "addConnection",
|
|
||||||
"source": "HTTP Request",
|
|
||||||
"target": "Set Data"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update multiple properties
|
|
||||||
{
|
|
||||||
"id": "workflow-123",
|
|
||||||
"operations": [
|
|
||||||
{
|
|
||||||
"type": "updateName",
|
|
||||||
"name": "Production Workflow v2"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "updateNode",
|
|
||||||
"nodeName": "Webhook",
|
|
||||||
"changes": {
|
|
||||||
"parameters.path": "/v2/webhook"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "addTag",
|
|
||||||
"tag": "production"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Performance Notes
|
|
||||||
- 80-90% token savings vs full updates
|
|
||||||
- Maximum 5 operations per request
|
|
||||||
- Two-pass processing handles dependencies
|
|
||||||
- Transactional: all or nothing
|
|
||||||
|
|
||||||
### Best Practices
|
|
||||||
- Use validateOnly: true to test first
|
|
||||||
- Keep operations under 5 for reliability
|
|
||||||
- Operations can be in any order (v2.7.0+)
|
|
||||||
- Use node names, not IDs in operations
|
|
||||||
- For updateNode, use dot notation for nested paths
|
|
||||||
|
|
||||||
### Common Pitfalls
|
|
||||||
- Exceeding 5 operations limit
|
|
||||||
- Using node IDs instead of names
|
|
||||||
- Forgetting required node properties in addNode
|
|
||||||
- Not testing with validateOnly first
|
|
||||||
|
|
||||||
### Related Tools
|
|
||||||
- `n8n_update_full_workflow` - Complete workflow replacement
|
|
||||||
- `n8n_get_workflow` - Fetch current workflow state
|
|
||||||
- `validate_workflow` - Validate changes before applying
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Quick Reference
|
|
||||||
|
|
||||||
### Workflow Building Process
|
|
||||||
1. **Discovery**: `search_nodes` → `list_nodes`
|
|
||||||
2. **Configuration**: `get_node_essentials` → `get_node_for_task`
|
|
||||||
3. **Validation**: `validate_node_minimal` → `validate_node_operation`
|
|
||||||
4. **Creation**: `validate_workflow` → `n8n_create_workflow`
|
|
||||||
5. **Updates**: `n8n_update_partial_workflow`
|
|
||||||
|
|
||||||
### Performance Tips
|
|
||||||
- Use `get_node_essentials` instead of `get_node_info` (95% smaller)
|
|
||||||
- Set high limits on `list_nodes` (200+)
|
|
||||||
- Use single words in `search_nodes`
|
|
||||||
- Validate incrementally while building
|
|
||||||
|
|
||||||
### Common Node Types
|
|
||||||
- **Triggers**: webhook, schedule, emailReadImap, slackTrigger
|
|
||||||
- **Core**: httpRequest, code, set, if, merge, splitInBatches
|
|
||||||
- **Integrations**: slack, gmail, googleSheets, postgres, mongodb
|
|
||||||
- **AI**: agent, openAi, chainLlm, documentLoader
|
|
||||||
|
|
||||||
### Error Prevention
|
|
||||||
- Always include node type prefixes: "nodes-base.slack"
|
|
||||||
- Use node names (not IDs) in connections
|
|
||||||
- Include typeVersion in all nodes
|
|
||||||
- Test with validateOnly before applying changes
|
|
||||||
- Check userMustProvide sections in templates
|
|
||||||
@@ -1,514 +0,0 @@
|
|||||||
# n8n MCP Client Tool Integration - Implementation Plan (Simplified)
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
This document provides a **simplified** implementation plan for making n8n-mcp compatible with n8n's MCP Client Tool (v1.1). Based on expert review, we're taking a minimal approach that extends the existing single-session server rather than creating new architecture.
|
|
||||||
|
|
||||||
## Key Design Principles
|
|
||||||
|
|
||||||
1. **Minimal Changes**: Extend existing single-session server with n8n compatibility mode
|
|
||||||
2. **No Overengineering**: No complex session management or multi-session architecture
|
|
||||||
3. **Docker-Native**: Separate Docker image for n8n deployment
|
|
||||||
4. **Remote Deployment**: Designed to run alongside n8n in production
|
|
||||||
5. **Backward Compatible**: Existing functionality remains unchanged
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
- Docker and Docker Compose
|
|
||||||
- n8n version 1.104.2 or higher (with MCP Client Tool v1.1)
|
|
||||||
- Basic understanding of Docker networking
|
|
||||||
|
|
||||||
## Implementation Approach
|
|
||||||
|
|
||||||
Instead of creating new multi-session architecture, we'll extend the existing single-session server with an n8n compatibility mode. This approach was recommended by all three expert reviewers as simpler and more maintainable.
|
|
||||||
|
|
||||||
## Architecture Changes
|
|
||||||
|
|
||||||
```
|
|
||||||
src/
|
|
||||||
├── http-server-single-session.ts # MODIFY: Add n8n mode flag
|
|
||||||
└── mcp/
|
|
||||||
└── server.ts # NO CHANGES NEEDED
|
|
||||||
|
|
||||||
Docker/
|
|
||||||
├── Dockerfile.n8n # NEW: n8n-specific image
|
|
||||||
├── docker-compose.n8n.yml # NEW: Simplified stack
|
|
||||||
└── .github/workflows/
|
|
||||||
└── docker-build-n8n.yml # NEW: Build workflow
|
|
||||||
```
|
|
||||||
|
|
||||||
## Implementation Steps
|
|
||||||
|
|
||||||
### Step 1: Modify Existing Single-Session Server
|
|
||||||
|
|
||||||
#### 1.1 Update `src/http-server-single-session.ts`
|
|
||||||
|
|
||||||
Add n8n compatibility mode to the existing server with minimal changes:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// Add these constants at the top (after imports)
|
|
||||||
const PROTOCOL_VERSION = "2024-11-05";
|
|
||||||
const N8N_MODE = process.env.N8N_MODE === 'true';
|
|
||||||
|
|
||||||
// In the constructor or start method, add logging
|
|
||||||
if (N8N_MODE) {
|
|
||||||
logger.info('Running in n8n compatibility mode');
|
|
||||||
}
|
|
||||||
|
|
||||||
// In setupRoutes method, add the protocol version endpoint
|
|
||||||
if (N8N_MODE) {
|
|
||||||
app.get('/mcp', (req, res) => {
|
|
||||||
res.json({
|
|
||||||
protocolVersion: PROTOCOL_VERSION,
|
|
||||||
serverInfo: {
|
|
||||||
name: "n8n-mcp",
|
|
||||||
version: PROJECT_VERSION,
|
|
||||||
capabilities: {
|
|
||||||
tools: true,
|
|
||||||
resources: false,
|
|
||||||
prompts: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// In handleMCPRequest method, add session header
|
|
||||||
if (N8N_MODE && this.session) {
|
|
||||||
res.setHeader('Mcp-Session-Id', this.session.sessionId);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update error handling to use JSON-RPC format
|
|
||||||
catch (error) {
|
|
||||||
logger.error('MCP request error:', error);
|
|
||||||
|
|
||||||
if (N8N_MODE) {
|
|
||||||
res.status(500).json({
|
|
||||||
jsonrpc: '2.0',
|
|
||||||
error: {
|
|
||||||
code: -32603,
|
|
||||||
message: 'Internal error',
|
|
||||||
data: error instanceof Error ? error.message : 'Unknown error',
|
|
||||||
},
|
|
||||||
id: null,
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
// Keep existing error handling for backward compatibility
|
|
||||||
res.status(500).json({
|
|
||||||
error: 'Internal server error',
|
|
||||||
details: error instanceof Error ? error.message : 'Unknown error'
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
That's it! No new files, no complex session management. Just a few lines of code.
|
|
||||||
|
|
||||||
### Step 2: Update Package Scripts
|
|
||||||
|
|
||||||
#### 2.1 Update `package.json`
|
|
||||||
|
|
||||||
Add a simple script for n8n mode:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"scripts": {
|
|
||||||
"start:n8n": "N8N_MODE=true MCP_MODE=http node dist/mcp/index.js"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 3: Create Docker Infrastructure for n8n
|
|
||||||
|
|
||||||
#### 3.1 Create `Dockerfile.n8n`
|
|
||||||
|
|
||||||
```dockerfile
|
|
||||||
# Dockerfile.n8n - Optimized for n8n integration
|
|
||||||
FROM node:22-alpine AS builder
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Install build dependencies
|
|
||||||
RUN apk add --no-cache python3 make g++
|
|
||||||
|
|
||||||
# Copy package files
|
|
||||||
COPY package*.json tsconfig*.json ./
|
|
||||||
|
|
||||||
# Install ALL dependencies
|
|
||||||
RUN npm ci --no-audit --no-fund
|
|
||||||
|
|
||||||
# Copy source and build
|
|
||||||
COPY src ./src
|
|
||||||
RUN npm run build && npm run rebuild
|
|
||||||
|
|
||||||
# Runtime stage
|
|
||||||
FROM node:22-alpine
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Install runtime dependencies
|
|
||||||
RUN apk add --no-cache curl dumb-init
|
|
||||||
|
|
||||||
# Create non-root user
|
|
||||||
RUN addgroup -g 1001 -S nodejs && adduser -S nodejs -u 1001
|
|
||||||
|
|
||||||
# Copy application from builder
|
|
||||||
COPY --from=builder --chown=nodejs:nodejs /app/dist ./dist
|
|
||||||
COPY --from=builder --chown=nodejs:nodejs /app/data ./data
|
|
||||||
COPY --from=builder --chown=nodejs:nodejs /app/node_modules ./node_modules
|
|
||||||
COPY --chown=nodejs:nodejs package.json ./
|
|
||||||
|
|
||||||
USER nodejs
|
|
||||||
|
|
||||||
EXPOSE 3001
|
|
||||||
|
|
||||||
HEALTHCHECK CMD curl -f http://localhost:3001/health || exit 1
|
|
||||||
|
|
||||||
ENTRYPOINT ["dumb-init", "--"]
|
|
||||||
CMD ["node", "dist/mcp/index.js"]
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 3.2 Create `docker-compose.n8n.yml`
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# docker-compose.n8n.yml - Simple stack for n8n + n8n-mcp
|
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
|
||||||
n8n:
|
|
||||||
image: n8nio/n8n:latest
|
|
||||||
container_name: n8n
|
|
||||||
restart: unless-stopped
|
|
||||||
ports:
|
|
||||||
- "5678:5678"
|
|
||||||
environment:
|
|
||||||
- N8N_BASIC_AUTH_ACTIVE=${N8N_BASIC_AUTH_ACTIVE:-true}
|
|
||||||
- N8N_BASIC_AUTH_USER=${N8N_USER:-admin}
|
|
||||||
- N8N_BASIC_AUTH_PASSWORD=${N8N_PASSWORD:-changeme}
|
|
||||||
- N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE=true
|
|
||||||
volumes:
|
|
||||||
- n8n_data:/home/node/.n8n
|
|
||||||
networks:
|
|
||||||
- n8n-net
|
|
||||||
depends_on:
|
|
||||||
n8n-mcp:
|
|
||||||
condition: service_healthy
|
|
||||||
|
|
||||||
n8n-mcp:
|
|
||||||
image: ghcr.io/${GITHUB_USER:-czlonkowski}/n8n-mcp-n8n:latest
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
dockerfile: Dockerfile.n8n
|
|
||||||
container_name: n8n-mcp
|
|
||||||
restart: unless-stopped
|
|
||||||
environment:
|
|
||||||
- MCP_MODE=http
|
|
||||||
- N8N_MODE=true
|
|
||||||
- AUTH_TOKEN=${MCP_AUTH_TOKEN}
|
|
||||||
- NODE_ENV=production
|
|
||||||
- HTTP_PORT=3001
|
|
||||||
networks:
|
|
||||||
- n8n-net
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "curl", "-f", "http://localhost:3001/health"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 10s
|
|
||||||
retries: 3
|
|
||||||
|
|
||||||
networks:
|
|
||||||
n8n-net:
|
|
||||||
driver: bridge
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
n8n_data:
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 3.3 Create `.env.n8n.example`
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# .env.n8n.example - Copy to .env and configure
|
|
||||||
|
|
||||||
# n8n Configuration
|
|
||||||
N8N_USER=admin
|
|
||||||
N8N_PASSWORD=changeme
|
|
||||||
N8N_BASIC_AUTH_ACTIVE=true
|
|
||||||
|
|
||||||
# MCP Configuration
|
|
||||||
# Generate with: openssl rand -base64 32
|
|
||||||
MCP_AUTH_TOKEN=your-secure-token-minimum-32-characters
|
|
||||||
|
|
||||||
# GitHub username for image registry
|
|
||||||
GITHUB_USER=czlonkowski
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 4: Create GitHub Actions Workflow
|
|
||||||
|
|
||||||
#### 4.1 Create `.github/workflows/docker-build-n8n.yml`
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
name: Build n8n Docker Image
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [main]
|
|
||||||
tags: ['v*']
|
|
||||||
paths:
|
|
||||||
- 'src/**'
|
|
||||||
- 'package*.json'
|
|
||||||
- 'Dockerfile.n8n'
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
env:
|
|
||||||
REGISTRY: ghcr.io
|
|
||||||
IMAGE_NAME: ${{ github.repository }}-n8n
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ${{ env.REGISTRY }}
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- uses: docker/metadata-action@v5
|
|
||||||
id: meta
|
|
||||||
with:
|
|
||||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
|
||||||
tags: |
|
|
||||||
type=ref,event=branch
|
|
||||||
type=semver,pattern={{version}}
|
|
||||||
type=raw,value=latest,enable={{is_default_branch}}
|
|
||||||
|
|
||||||
- uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
file: ./Dockerfile.n8n
|
|
||||||
push: true
|
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
cache-from: type=gha
|
|
||||||
cache-to: type=gha,mode=max
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 5: Testing
|
|
||||||
|
|
||||||
#### 5.1 Unit Tests for n8n Mode
|
|
||||||
|
|
||||||
Create `tests/unit/http-server-n8n-mode.test.ts`:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
import { describe, it, expect, vi } from 'vitest';
|
|
||||||
import request from 'supertest';
|
|
||||||
|
|
||||||
describe('n8n Mode', () => {
|
|
||||||
it('should return protocol version on GET /mcp', async () => {
|
|
||||||
process.env.N8N_MODE = 'true';
|
|
||||||
const app = await createTestApp();
|
|
||||||
|
|
||||||
const response = await request(app)
|
|
||||||
.get('/mcp')
|
|
||||||
.expect(200);
|
|
||||||
|
|
||||||
expect(response.body.protocolVersion).toBe('2024-11-05');
|
|
||||||
expect(response.body.serverInfo.capabilities.tools).toBe(true);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should include session ID in response headers', async () => {
|
|
||||||
process.env.N8N_MODE = 'true';
|
|
||||||
const app = await createTestApp();
|
|
||||||
|
|
||||||
const response = await request(app)
|
|
||||||
.post('/mcp')
|
|
||||||
.set('Authorization', 'Bearer test-token')
|
|
||||||
.send({ jsonrpc: '2.0', method: 'initialize', id: 1 });
|
|
||||||
|
|
||||||
expect(response.headers['mcp-session-id']).toBeDefined();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should format errors as JSON-RPC', async () => {
|
|
||||||
process.env.N8N_MODE = 'true';
|
|
||||||
const app = await createTestApp();
|
|
||||||
|
|
||||||
const response = await request(app)
|
|
||||||
.post('/mcp')
|
|
||||||
.send({ invalid: 'request' })
|
|
||||||
.expect(500);
|
|
||||||
|
|
||||||
expect(response.body.jsonrpc).toBe('2.0');
|
|
||||||
expect(response.body.error.code).toBe(-32603);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 5.2 Quick Deployment Script
|
|
||||||
|
|
||||||
Create `deploy/quick-deploy-n8n.sh`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
echo "🚀 Quick Deploy n8n + n8n-mcp"
|
|
||||||
|
|
||||||
# Check prerequisites
|
|
||||||
command -v docker >/dev/null 2>&1 || { echo "Docker required"; exit 1; }
|
|
||||||
command -v docker-compose >/dev/null 2>&1 || { echo "Docker Compose required"; exit 1; }
|
|
||||||
|
|
||||||
# Generate auth token if not exists
|
|
||||||
if [ ! -f .env ]; then
|
|
||||||
cp .env.n8n.example .env
|
|
||||||
TOKEN=$(openssl rand -base64 32)
|
|
||||||
sed -i "s/your-secure-token-minimum-32-characters/$TOKEN/" .env
|
|
||||||
echo "Generated MCP_AUTH_TOKEN: $TOKEN"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Deploy
|
|
||||||
docker-compose -f docker-compose.n8n.yml up -d
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "✅ Deployment complete!"
|
|
||||||
echo ""
|
|
||||||
echo "📋 Next steps:"
|
|
||||||
echo "1. Access n8n at http://localhost:5678"
|
|
||||||
echo " Username: admin (or check .env)"
|
|
||||||
echo " Password: changeme (or check .env)"
|
|
||||||
echo ""
|
|
||||||
echo "2. Create a workflow with MCP Client Tool:"
|
|
||||||
echo " - Server URL: http://n8n-mcp:3001/mcp"
|
|
||||||
echo " - Authentication: Bearer Token"
|
|
||||||
echo " - Token: Check .env file for MCP_AUTH_TOKEN"
|
|
||||||
echo ""
|
|
||||||
echo "📊 View logs: docker-compose -f docker-compose.n8n.yml logs -f"
|
|
||||||
echo "🛑 Stop: docker-compose -f docker-compose.n8n.yml down"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Implementation Checklist (Simplified)
|
|
||||||
|
|
||||||
### Code Changes
|
|
||||||
- [ ] Add N8N_MODE flag to `http-server-single-session.ts`
|
|
||||||
- [ ] Add protocol version endpoint (GET /mcp) when N8N_MODE=true
|
|
||||||
- [ ] Add Mcp-Session-Id header to responses
|
|
||||||
- [ ] Update error responses to JSON-RPC format when N8N_MODE=true
|
|
||||||
- [ ] Add npm script `start:n8n` to package.json
|
|
||||||
|
|
||||||
### Docker Infrastructure
|
|
||||||
- [ ] Create `Dockerfile.n8n` for n8n-specific image
|
|
||||||
- [ ] Create `docker-compose.n8n.yml` for simple deployment
|
|
||||||
- [ ] Create `.env.n8n.example` template
|
|
||||||
- [ ] Create GitHub Actions workflow `docker-build-n8n.yml`
|
|
||||||
- [ ] Create `deploy/quick-deploy-n8n.sh` script
|
|
||||||
|
|
||||||
### Testing
|
|
||||||
- [ ] Write unit tests for n8n mode functionality
|
|
||||||
- [ ] Test with actual n8n MCP Client Tool
|
|
||||||
- [ ] Verify protocol version endpoint
|
|
||||||
- [ ] Test authentication flow
|
|
||||||
- [ ] Validate error formatting
|
|
||||||
|
|
||||||
### Documentation
|
|
||||||
- [ ] Update README with n8n deployment section
|
|
||||||
- [ ] Document N8N_MODE environment variable
|
|
||||||
- [ ] Add troubleshooting guide for common issues
|
|
||||||
|
|
||||||
## Quick Start Guide
|
|
||||||
|
|
||||||
### 1. One-Command Deployment
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Clone and deploy
|
|
||||||
git clone https://github.com/czlonkowski/n8n-mcp.git
|
|
||||||
cd n8n-mcp
|
|
||||||
./deploy/quick-deploy-n8n.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Manual Configuration in n8n
|
|
||||||
|
|
||||||
After deployment, configure the MCP Client Tool in n8n:
|
|
||||||
|
|
||||||
1. Open n8n at `http://localhost:5678`
|
|
||||||
2. Create a new workflow
|
|
||||||
3. Add "MCP Client Tool" node (under AI category)
|
|
||||||
4. Configure:
|
|
||||||
- **Server URL**: `http://n8n-mcp:3001/mcp`
|
|
||||||
- **Authentication**: Bearer Token
|
|
||||||
- **Token**: Check your `.env` file for MCP_AUTH_TOKEN
|
|
||||||
5. Select a tool (e.g., `list_nodes`)
|
|
||||||
6. Execute the workflow
|
|
||||||
|
|
||||||
### 3. Production Deployment
|
|
||||||
|
|
||||||
For production with SSL, use a reverse proxy:
|
|
||||||
|
|
||||||
```nginx
|
|
||||||
# nginx configuration
|
|
||||||
server {
|
|
||||||
listen 443 ssl;
|
|
||||||
server_name n8n.yourdomain.com;
|
|
||||||
|
|
||||||
location / {
|
|
||||||
proxy_pass http://localhost:5678;
|
|
||||||
proxy_http_version 1.1;
|
|
||||||
proxy_set_header Upgrade $http_upgrade;
|
|
||||||
proxy_set_header Connection "upgrade";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The MCP server should remain internal only - n8n connects via Docker network.
|
|
||||||
|
|
||||||
## Success Criteria
|
|
||||||
|
|
||||||
The implementation is successful when:
|
|
||||||
|
|
||||||
1. **Minimal Code Changes**: Only ~20 lines added to existing server
|
|
||||||
2. **Protocol Compliance**: GET /mcp returns correct protocol version
|
|
||||||
3. **n8n Connection**: MCP Client Tool connects successfully
|
|
||||||
4. **Tool Execution**: Tools work without modification
|
|
||||||
5. **Backward Compatible**: Existing Claude Desktop usage unaffected
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### Common Issues
|
|
||||||
|
|
||||||
1. **"Protocol version mismatch"**
|
|
||||||
- Ensure N8N_MODE=true is set
|
|
||||||
- Check GET /mcp returns "2024-11-05"
|
|
||||||
|
|
||||||
2. **"Authentication failed"**
|
|
||||||
- Verify AUTH_TOKEN matches in .env and n8n
|
|
||||||
- Token must be 32+ characters
|
|
||||||
- Use "Bearer Token" auth type in n8n
|
|
||||||
|
|
||||||
3. **"Connection refused"**
|
|
||||||
- Check containers are on same network
|
|
||||||
- Use internal hostname: `http://n8n-mcp:3001/mcp`
|
|
||||||
- Verify health check passes
|
|
||||||
|
|
||||||
4. **Testing the Setup**
|
|
||||||
```bash
|
|
||||||
# Check protocol version
|
|
||||||
docker exec n8n-mcp curl http://localhost:3001/mcp
|
|
||||||
|
|
||||||
# View logs
|
|
||||||
docker-compose -f docker-compose.n8n.yml logs -f n8n-mcp
|
|
||||||
```
|
|
||||||
|
|
||||||
## Summary
|
|
||||||
|
|
||||||
This simplified approach:
|
|
||||||
- **Extends existing code** rather than creating new architecture
|
|
||||||
- **Adds n8n compatibility** with minimal changes
|
|
||||||
- **Uses separate Docker image** for clean deployment
|
|
||||||
- **Maintains backward compatibility** for existing users
|
|
||||||
- **Avoids overengineering** with simple, practical solutions
|
|
||||||
|
|
||||||
Total implementation effort: ~2-3 hours (vs. 2-3 days for multi-session approach)
|
|
||||||
@@ -1,146 +0,0 @@
|
|||||||
# Test Artifacts Documentation
|
|
||||||
|
|
||||||
This document describes the comprehensive test result artifact storage system implemented in the n8n-mcp project.
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
The test artifact system captures, stores, and presents test results in multiple formats to facilitate debugging, analysis, and historical tracking of test performance.
|
|
||||||
|
|
||||||
## Artifact Types
|
|
||||||
|
|
||||||
### 1. Test Results
|
|
||||||
- **JUnit XML** (`test-results/junit.xml`): Standard format for CI integration
|
|
||||||
- **JSON Results** (`test-results/results.json`): Detailed test data for analysis
|
|
||||||
- **HTML Report** (`test-results/html/index.html`): Interactive test report
|
|
||||||
- **Test Summary** (`test-summary.md`): Markdown summary for PR comments
|
|
||||||
|
|
||||||
### 2. Coverage Reports
|
|
||||||
- **LCOV** (`coverage/lcov.info`): Standard coverage format
|
|
||||||
- **HTML Coverage** (`coverage/html/index.html`): Interactive coverage browser
|
|
||||||
- **Coverage Summary** (`coverage/coverage-summary.json`): JSON coverage data
|
|
||||||
|
|
||||||
### 3. Benchmark Results
|
|
||||||
- **Benchmark JSON** (`benchmark-results.json`): Raw benchmark data
|
|
||||||
- **Comparison Reports** (`benchmark-comparison.md`): PR benchmark comparisons
|
|
||||||
|
|
||||||
### 4. Detailed Reports
|
|
||||||
- **HTML Report** (`test-reports/report.html`): Comprehensive styled report
|
|
||||||
- **Markdown Report** (`test-reports/report.md`): Full markdown report
|
|
||||||
- **JSON Report** (`test-reports/report.json`): Complete test data
|
|
||||||
|
|
||||||
## GitHub Actions Integration
|
|
||||||
|
|
||||||
### Test Workflow (`test.yml`)
|
|
||||||
|
|
||||||
The main test workflow:
|
|
||||||
1. Runs tests with coverage using multiple reporters
|
|
||||||
2. Generates test summaries and detailed reports
|
|
||||||
3. Uploads artifacts with metadata
|
|
||||||
4. Posts summaries to PRs
|
|
||||||
5. Creates a combined artifact index
|
|
||||||
|
|
||||||
### Benchmark PR Workflow (`benchmark-pr.yml`)
|
|
||||||
|
|
||||||
For pull requests:
|
|
||||||
1. Runs benchmarks on PR branch
|
|
||||||
2. Runs benchmarks on base branch
|
|
||||||
3. Compares results
|
|
||||||
4. Posts comparison to PR
|
|
||||||
5. Sets status checks for regressions
|
|
||||||
|
|
||||||
## Artifact Retention
|
|
||||||
|
|
||||||
- **Test Results**: 30 days
|
|
||||||
- **Coverage Reports**: 30 days
|
|
||||||
- **Benchmark Results**: 30 days
|
|
||||||
- **Combined Results**: 90 days
|
|
||||||
- **Test Metadata**: 30 days
|
|
||||||
|
|
||||||
## PR Comment Integration
|
|
||||||
|
|
||||||
The system automatically:
|
|
||||||
- Posts test summaries to PR comments
|
|
||||||
- Updates existing comments instead of creating duplicates
|
|
||||||
- Includes links to full artifacts
|
|
||||||
- Shows coverage and benchmark changes
|
|
||||||
|
|
||||||
## Job Summary
|
|
||||||
|
|
||||||
Each workflow run includes a job summary with:
|
|
||||||
- Test results overview
|
|
||||||
- Coverage summary
|
|
||||||
- Benchmark results
|
|
||||||
- Direct links to download artifacts
|
|
||||||
|
|
||||||
## Local Development
|
|
||||||
|
|
||||||
### Running Tests with Reports
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Run tests with all reporters
|
|
||||||
CI=true npm run test:coverage
|
|
||||||
|
|
||||||
# Generate detailed reports
|
|
||||||
node scripts/generate-detailed-reports.js
|
|
||||||
|
|
||||||
# Generate test summary
|
|
||||||
node scripts/generate-test-summary.js
|
|
||||||
|
|
||||||
# Compare benchmarks
|
|
||||||
node scripts/compare-benchmarks.js benchmark-results.json benchmark-baseline.json
|
|
||||||
```
|
|
||||||
|
|
||||||
### Report Locations
|
|
||||||
|
|
||||||
When running locally, reports are generated in:
|
|
||||||
- `test-results/` - Vitest outputs
|
|
||||||
- `test-reports/` - Detailed reports
|
|
||||||
- `coverage/` - Coverage reports
|
|
||||||
- Root directory - Summary files
|
|
||||||
|
|
||||||
## Report Formats
|
|
||||||
|
|
||||||
### HTML Report Features
|
|
||||||
- Responsive design
|
|
||||||
- Test suite breakdown
|
|
||||||
- Failed test details with error messages
|
|
||||||
- Coverage visualization with progress bars
|
|
||||||
- Benchmark performance metrics
|
|
||||||
- Sortable tables
|
|
||||||
|
|
||||||
### Markdown Report Features
|
|
||||||
- GitHub-compatible formatting
|
|
||||||
- Summary statistics
|
|
||||||
- Failed test listings
|
|
||||||
- Coverage breakdown
|
|
||||||
- Benchmark comparisons
|
|
||||||
|
|
||||||
### JSON Report Features
|
|
||||||
- Complete test data
|
|
||||||
- Programmatic access
|
|
||||||
- Historical comparison
|
|
||||||
- CI/CD integration
|
|
||||||
|
|
||||||
## Best Practices
|
|
||||||
|
|
||||||
1. **Always Check Artifacts**: When tests fail in CI, download and review the HTML report
|
|
||||||
2. **Monitor Coverage**: Use the coverage reports to identify untested code
|
|
||||||
3. **Track Benchmarks**: Review benchmark comparisons on performance-critical PRs
|
|
||||||
4. **Archive Important Runs**: Download artifacts from significant releases
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### Missing Artifacts
|
|
||||||
- Check if tests ran to completion
|
|
||||||
- Verify artifact upload steps executed
|
|
||||||
- Check retention period hasn't expired
|
|
||||||
|
|
||||||
### Report Generation Failures
|
|
||||||
- Ensure all dependencies are installed
|
|
||||||
- Check for valid test/coverage output files
|
|
||||||
- Review workflow logs for errors
|
|
||||||
|
|
||||||
### PR Comment Issues
|
|
||||||
- Verify GitHub Actions permissions
|
|
||||||
- Check bot authentication
|
|
||||||
- Review comment posting logs
|
|
||||||
@@ -1,802 +0,0 @@
|
|||||||
# n8n-MCP Testing Architecture
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
This document describes the comprehensive testing infrastructure implemented for the n8n-MCP project. The testing suite includes over 1,100 tests split between unit and integration tests, benchmarks, and a complete CI/CD pipeline ensuring code quality and reliability.
|
|
||||||
|
|
||||||
### Test Suite Statistics (from CI Run #41)
|
|
||||||
|
|
||||||
- **Total Tests**: 1,182 tests
|
|
||||||
- **Unit Tests**: 933 tests (932 passed, 1 skipped)
|
|
||||||
- **Integration Tests**: 249 tests (245 passed, 4 skipped)
|
|
||||||
- **Test Files**:
|
|
||||||
- 30 unit test files
|
|
||||||
- 14 integration test files
|
|
||||||
- **Test Execution Time**:
|
|
||||||
- Unit tests: ~2 minutes with coverage
|
|
||||||
- Integration tests: ~23 seconds
|
|
||||||
- Total CI time: ~2.5 minutes
|
|
||||||
- **Success Rate**: 99.5% (only 5 tests skipped, 0 failures)
|
|
||||||
- **CI/CD Pipeline**: Fully automated with GitHub Actions
|
|
||||||
- **Test Artifacts**: JUnit XML, coverage reports, benchmark results
|
|
||||||
- **Parallel Execution**: Configurable with thread pool
|
|
||||||
|
|
||||||
## Testing Framework: Vitest
|
|
||||||
|
|
||||||
We use **Vitest** as our primary testing framework, chosen for its:
|
|
||||||
- **Speed**: Native ESM support and fast execution
|
|
||||||
- **TypeScript Integration**: First-class TypeScript support
|
|
||||||
- **Watch Mode**: Instant feedback during development
|
|
||||||
- **Jest Compatibility**: Easy migration from Jest
|
|
||||||
- **Built-in Mocking**: Powerful mocking capabilities
|
|
||||||
- **Coverage**: Integrated code coverage with v8
|
|
||||||
|
|
||||||
### Configuration
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// vitest.config.ts
|
|
||||||
export default defineConfig({
|
|
||||||
test: {
|
|
||||||
globals: true,
|
|
||||||
environment: 'node',
|
|
||||||
setupFiles: ['./tests/setup/global-setup.ts'],
|
|
||||||
pool: 'threads',
|
|
||||||
poolOptions: {
|
|
||||||
threads: {
|
|
||||||
singleThread: process.env.TEST_PARALLEL !== 'true',
|
|
||||||
maxThreads: parseInt(process.env.TEST_MAX_WORKERS || '4', 10)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
coverage: {
|
|
||||||
provider: 'v8',
|
|
||||||
reporter: ['lcov', 'html', 'text-summary'],
|
|
||||||
exclude: ['node_modules/', 'tests/', '**/*.test.ts', 'scripts/']
|
|
||||||
}
|
|
||||||
},
|
|
||||||
resolve: {
|
|
||||||
alias: {
|
|
||||||
'@': path.resolve(__dirname, './src'),
|
|
||||||
'@tests': path.resolve(__dirname, './tests')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
## Directory Structure
|
|
||||||
|
|
||||||
```
|
|
||||||
tests/
|
|
||||||
├── unit/ # Unit tests with mocks (933 tests, 30 files)
|
|
||||||
│ ├── __mocks__/ # Mock implementations
|
|
||||||
│ │ └── n8n-nodes-base.test.ts
|
|
||||||
│ ├── database/ # Database layer tests
|
|
||||||
│ │ ├── database-adapter-unit.test.ts
|
|
||||||
│ │ ├── node-repository-core.test.ts
|
|
||||||
│ │ └── template-repository-core.test.ts
|
|
||||||
│ ├── loaders/ # Node loader tests
|
|
||||||
│ │ └── node-loader.test.ts
|
|
||||||
│ ├── mappers/ # Data mapper tests
|
|
||||||
│ │ └── docs-mapper.test.ts
|
|
||||||
│ ├── mcp/ # MCP server and tools tests
|
|
||||||
│ │ ├── handlers-n8n-manager.test.ts
|
|
||||||
│ │ ├── handlers-workflow-diff.test.ts
|
|
||||||
│ │ ├── tools-documentation.test.ts
|
|
||||||
│ │ └── tools.test.ts
|
|
||||||
│ ├── parsers/ # Parser tests
|
|
||||||
│ │ ├── node-parser.test.ts
|
|
||||||
│ │ ├── property-extractor.test.ts
|
|
||||||
│ │ └── simple-parser.test.ts
|
|
||||||
│ ├── services/ # Service layer tests (largest test suite)
|
|
||||||
│ │ ├── config-validator.test.ts
|
|
||||||
│ │ ├── enhanced-config-validator.test.ts
|
|
||||||
│ │ ├── example-generator.test.ts
|
|
||||||
│ │ ├── expression-validator.test.ts
|
|
||||||
│ │ ├── n8n-api-client.test.ts
|
|
||||||
│ │ ├── n8n-validation.test.ts
|
|
||||||
│ │ ├── node-specific-validators.test.ts
|
|
||||||
│ │ ├── property-dependencies.test.ts
|
|
||||||
│ │ ├── property-filter.test.ts
|
|
||||||
│ │ ├── task-templates.test.ts
|
|
||||||
│ │ ├── workflow-diff-engine.test.ts
|
|
||||||
│ │ ├── workflow-validator-comprehensive.test.ts
|
|
||||||
│ │ └── workflow-validator.test.ts
|
|
||||||
│ └── utils/ # Utility function tests
|
|
||||||
│ └── database-utils.test.ts
|
|
||||||
├── integration/ # Integration tests (249 tests, 14 files)
|
|
||||||
│ ├── database/ # Database integration tests
|
|
||||||
│ │ ├── connection-management.test.ts
|
|
||||||
│ │ ├── fts5-search.test.ts
|
|
||||||
│ │ ├── node-repository.test.ts
|
|
||||||
│ │ ├── performance.test.ts
|
|
||||||
│ │ └── transactions.test.ts
|
|
||||||
│ ├── mcp-protocol/ # MCP protocol tests
|
|
||||||
│ │ ├── basic-connection.test.ts
|
|
||||||
│ │ ├── error-handling.test.ts
|
|
||||||
│ │ ├── performance.test.ts
|
|
||||||
│ │ ├── protocol-compliance.test.ts
|
|
||||||
│ │ ├── session-management.test.ts
|
|
||||||
│ │ └── tool-invocation.test.ts
|
|
||||||
│ └── setup/ # Integration test setup
|
|
||||||
│ ├── integration-setup.ts
|
|
||||||
│ └── msw-test-server.ts
|
|
||||||
├── benchmarks/ # Performance benchmarks
|
|
||||||
│ ├── database-queries.bench.ts
|
|
||||||
│ └── sample.bench.ts
|
|
||||||
├── setup/ # Global test configuration
|
|
||||||
│ ├── global-setup.ts # Global test setup
|
|
||||||
│ ├── msw-setup.ts # Mock Service Worker setup
|
|
||||||
│ └── test-env.ts # Test environment configuration
|
|
||||||
├── utils/ # Test utilities
|
|
||||||
│ ├── assertions.ts # Custom assertions
|
|
||||||
│ ├── builders/ # Test data builders
|
|
||||||
│ │ └── workflow.builder.ts
|
|
||||||
│ ├── data-generators.ts # Test data generators
|
|
||||||
│ ├── database-utils.ts # Database test utilities
|
|
||||||
│ └── test-helpers.ts # General test helpers
|
|
||||||
├── mocks/ # Mock implementations
|
|
||||||
│ └── n8n-api/ # n8n API mocks
|
|
||||||
│ ├── handlers.ts # MSW request handlers
|
|
||||||
│ └── data/ # Mock data
|
|
||||||
└── fixtures/ # Test fixtures
|
|
||||||
├── database/ # Database fixtures
|
|
||||||
├── factories/ # Data factories
|
|
||||||
└── workflows/ # Workflow fixtures
|
|
||||||
```
|
|
||||||
|
|
||||||
## Mock Strategy
|
|
||||||
|
|
||||||
### 1. Mock Service Worker (MSW) for API Mocking
|
|
||||||
|
|
||||||
We use MSW for intercepting and mocking HTTP requests:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/mocks/n8n-api/handlers.ts
|
|
||||||
import { http, HttpResponse } from 'msw';
|
|
||||||
|
|
||||||
export const handlers = [
|
|
||||||
// Workflow endpoints
|
|
||||||
http.get('*/workflows/:id', ({ params }) => {
|
|
||||||
const workflow = mockWorkflows.find(w => w.id === params.id);
|
|
||||||
if (!workflow) {
|
|
||||||
return new HttpResponse(null, { status: 404 });
|
|
||||||
}
|
|
||||||
return HttpResponse.json(workflow);
|
|
||||||
}),
|
|
||||||
|
|
||||||
// Execution endpoints
|
|
||||||
http.post('*/workflows/:id/run', async ({ params, request }) => {
|
|
||||||
const body = await request.json();
|
|
||||||
return HttpResponse.json({
|
|
||||||
executionId: generateExecutionId(),
|
|
||||||
status: 'running'
|
|
||||||
});
|
|
||||||
})
|
|
||||||
];
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Database Mocking
|
|
||||||
|
|
||||||
For unit tests, we mock the database layer:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/unit/__mocks__/better-sqlite3.ts
|
|
||||||
import { vi } from 'vitest';
|
|
||||||
|
|
||||||
export default vi.fn(() => ({
|
|
||||||
prepare: vi.fn(() => ({
|
|
||||||
all: vi.fn().mockReturnValue([]),
|
|
||||||
get: vi.fn().mockReturnValue(undefined),
|
|
||||||
run: vi.fn().mockReturnValue({ changes: 1 }),
|
|
||||||
finalize: vi.fn()
|
|
||||||
})),
|
|
||||||
exec: vi.fn(),
|
|
||||||
close: vi.fn(),
|
|
||||||
pragma: vi.fn()
|
|
||||||
}));
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. MCP SDK Mocking
|
|
||||||
|
|
||||||
For testing MCP protocol interactions:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/integration/mcp-protocol/test-helpers.ts
|
|
||||||
export class TestableN8NMCPServer extends N8NMCPServer {
|
|
||||||
private transports = new Set<Transport>();
|
|
||||||
|
|
||||||
async connectToTransport(transport: Transport): Promise<void> {
|
|
||||||
this.transports.add(transport);
|
|
||||||
await this.connect(transport);
|
|
||||||
}
|
|
||||||
|
|
||||||
async close(): Promise<void> {
|
|
||||||
for (const transport of this.transports) {
|
|
||||||
await transport.close();
|
|
||||||
}
|
|
||||||
this.transports.clear();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Test Patterns and Utilities
|
|
||||||
|
|
||||||
### 1. Database Test Utilities
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/utils/database-utils.ts
|
|
||||||
export class TestDatabase {
|
|
||||||
constructor(options: TestDatabaseOptions = {}) {
|
|
||||||
this.options = {
|
|
||||||
mode: 'memory',
|
|
||||||
enableFTS5: true,
|
|
||||||
...options
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
async initialize(): Promise<Database.Database> {
|
|
||||||
const db = this.options.mode === 'memory'
|
|
||||||
? new Database(':memory:')
|
|
||||||
: new Database(this.dbPath);
|
|
||||||
|
|
||||||
if (this.options.enableFTS5) {
|
|
||||||
await this.enableFTS5(db);
|
|
||||||
}
|
|
||||||
|
|
||||||
return db;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Data Generators
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/utils/data-generators.ts
|
|
||||||
export class TestDataGenerator {
|
|
||||||
static generateNode(overrides: Partial<ParsedNode> = {}): ParsedNode {
|
|
||||||
return {
|
|
||||||
nodeType: `test.node${faker.number.int()}`,
|
|
||||||
displayName: faker.commerce.productName(),
|
|
||||||
description: faker.lorem.sentence(),
|
|
||||||
properties: this.generateProperties(5),
|
|
||||||
...overrides
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
static generateWorkflow(nodeCount = 3): any {
|
|
||||||
const nodes = Array.from({ length: nodeCount }, (_, i) => ({
|
|
||||||
id: `node_${i}`,
|
|
||||||
type: 'test.node',
|
|
||||||
position: [i * 100, 0],
|
|
||||||
parameters: {}
|
|
||||||
}));
|
|
||||||
|
|
||||||
return { nodes, connections: {} };
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Custom Assertions
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/utils/assertions.ts
|
|
||||||
export function expectValidMCPResponse(response: any): void {
|
|
||||||
expect(response).toBeDefined();
|
|
||||||
expect(response.content).toBeDefined();
|
|
||||||
expect(Array.isArray(response.content)).toBe(true);
|
|
||||||
expect(response.content[0]).toHaveProperty('type', 'text');
|
|
||||||
expect(response.content[0]).toHaveProperty('text');
|
|
||||||
}
|
|
||||||
|
|
||||||
export function expectNodeStructure(node: any): void {
|
|
||||||
expect(node).toHaveProperty('nodeType');
|
|
||||||
expect(node).toHaveProperty('displayName');
|
|
||||||
expect(node).toHaveProperty('properties');
|
|
||||||
expect(Array.isArray(node.properties)).toBe(true);
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Unit Testing
|
|
||||||
|
|
||||||
Our unit tests focus on testing individual components in isolation with mocked dependencies:
|
|
||||||
|
|
||||||
### Service Layer Tests
|
|
||||||
|
|
||||||
The bulk of our unit tests (400+ tests) are in the services layer:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/unit/services/workflow-validator-comprehensive.test.ts
|
|
||||||
describe('WorkflowValidator Comprehensive Tests', () => {
|
|
||||||
it('should validate complex workflow with AI nodes', () => {
|
|
||||||
const workflow = {
|
|
||||||
nodes: [
|
|
||||||
{
|
|
||||||
id: 'ai_agent',
|
|
||||||
type: '@n8n/n8n-nodes-langchain.agent',
|
|
||||||
parameters: { prompt: 'Analyze data' }
|
|
||||||
}
|
|
||||||
],
|
|
||||||
connections: {}
|
|
||||||
};
|
|
||||||
|
|
||||||
const result = validator.validateWorkflow(workflow);
|
|
||||||
expect(result.valid).toBe(true);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
### Parser Tests
|
|
||||||
|
|
||||||
Testing the node parsing logic:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/unit/parsers/property-extractor.test.ts
|
|
||||||
describe('PropertyExtractor', () => {
|
|
||||||
it('should extract nested properties correctly', () => {
|
|
||||||
const node = {
|
|
||||||
properties: [
|
|
||||||
{
|
|
||||||
displayName: 'Options',
|
|
||||||
name: 'options',
|
|
||||||
type: 'collection',
|
|
||||||
options: [
|
|
||||||
{ name: 'timeout', type: 'number' }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
};
|
|
||||||
|
|
||||||
const extracted = extractor.extractProperties(node);
|
|
||||||
expect(extracted).toHaveProperty('options.timeout');
|
|
||||||
});
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
### Mock Testing
|
|
||||||
|
|
||||||
Testing our mock implementations:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/unit/__mocks__/n8n-nodes-base.test.ts
|
|
||||||
describe('n8n-nodes-base mock', () => {
|
|
||||||
it('should provide mocked node definitions', () => {
|
|
||||||
const httpNode = mockNodes['n8n-nodes-base.httpRequest'];
|
|
||||||
expect(httpNode).toBeDefined();
|
|
||||||
expect(httpNode.description.displayName).toBe('HTTP Request');
|
|
||||||
});
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
## Integration Testing
|
|
||||||
|
|
||||||
Our integration tests verify the complete system behavior:
|
|
||||||
|
|
||||||
### MCP Protocol Testing
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/integration/mcp-protocol/tool-invocation.test.ts
|
|
||||||
describe('MCP Tool Invocation', () => {
|
|
||||||
let mcpServer: TestableN8NMCPServer;
|
|
||||||
let client: Client;
|
|
||||||
|
|
||||||
beforeEach(async () => {
|
|
||||||
mcpServer = new TestableN8NMCPServer();
|
|
||||||
await mcpServer.initialize();
|
|
||||||
|
|
||||||
const [serverTransport, clientTransport] = InMemoryTransport.createLinkedPair();
|
|
||||||
await mcpServer.connectToTransport(serverTransport);
|
|
||||||
|
|
||||||
client = new Client({ name: 'test-client', version: '1.0.0' }, {});
|
|
||||||
await client.connect(clientTransport);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should list nodes with filtering', async () => {
|
|
||||||
const response = await client.callTool({
|
|
||||||
name: 'list_nodes',
|
|
||||||
arguments: { category: 'trigger', limit: 10 }
|
|
||||||
});
|
|
||||||
|
|
||||||
expectValidMCPResponse(response);
|
|
||||||
const result = JSON.parse(response.content[0].text);
|
|
||||||
expect(result.nodes).toHaveLength(10);
|
|
||||||
expect(result.nodes.every(n => n.category === 'trigger')).toBe(true);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
### Database Integration Testing
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/integration/database/fts5-search.test.ts
|
|
||||||
describe('FTS5 Search Integration', () => {
|
|
||||||
it('should perform fuzzy search', async () => {
|
|
||||||
const results = await nodeRepo.searchNodes('HTT', 'FUZZY');
|
|
||||||
|
|
||||||
expect(results.some(n => n.nodeType.includes('httpRequest'))).toBe(true);
|
|
||||||
expect(results.some(n => n.displayName.includes('HTTP'))).toBe(true);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle complex boolean queries', async () => {
|
|
||||||
const results = await nodeRepo.searchNodes('webhook OR http', 'OR');
|
|
||||||
|
|
||||||
expect(results.length).toBeGreaterThan(0);
|
|
||||||
expect(results.some(n =>
|
|
||||||
n.description?.includes('webhook') ||
|
|
||||||
n.description?.includes('http')
|
|
||||||
)).toBe(true);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
## Test Distribution and Coverage
|
|
||||||
|
|
||||||
### Test Distribution by Component
|
|
||||||
|
|
||||||
Based on our 1,182 tests:
|
|
||||||
|
|
||||||
1. **Services Layer** (~450 tests)
|
|
||||||
- `workflow-validator-comprehensive.test.ts`: 150+ tests
|
|
||||||
- `node-specific-validators.test.ts`: 120+ tests
|
|
||||||
- `n8n-validation.test.ts`: 80+ tests
|
|
||||||
- `n8n-api-client.test.ts`: 60+ tests
|
|
||||||
|
|
||||||
2. **Parsers** (~200 tests)
|
|
||||||
- `simple-parser.test.ts`: 80+ tests
|
|
||||||
- `property-extractor.test.ts`: 70+ tests
|
|
||||||
- `node-parser.test.ts`: 50+ tests
|
|
||||||
|
|
||||||
3. **MCP Integration** (~150 tests)
|
|
||||||
- `tool-invocation.test.ts`: 50+ tests
|
|
||||||
- `error-handling.test.ts`: 40+ tests
|
|
||||||
- `session-management.test.ts`: 30+ tests
|
|
||||||
|
|
||||||
4. **Database** (~300 tests)
|
|
||||||
- Unit tests for repositories: 100+ tests
|
|
||||||
- Integration tests for FTS5 search: 80+ tests
|
|
||||||
- Transaction tests: 60+ tests
|
|
||||||
- Performance tests: 60+ tests
|
|
||||||
|
|
||||||
### Test Execution Performance
|
|
||||||
|
|
||||||
From our CI runs:
|
|
||||||
- **Fastest tests**: Unit tests with mocks (<1ms each)
|
|
||||||
- **Slowest tests**: Integration tests with real database (100-5000ms)
|
|
||||||
- **Average test time**: ~20ms per test
|
|
||||||
- **Total suite execution**: Under 3 minutes in CI
|
|
||||||
|
|
||||||
## CI/CD Pipeline
|
|
||||||
|
|
||||||
Our GitHub Actions workflow runs all tests automatically:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# .github/workflows/test.yml
|
|
||||||
name: Test Suite
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [main]
|
|
||||||
pull_request:
|
|
||||||
branches: [main]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- uses: actions/setup-node@v4
|
|
||||||
with:
|
|
||||||
node-version: 20
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: npm ci
|
|
||||||
|
|
||||||
- name: Run unit tests with coverage
|
|
||||||
run: npm run test:unit -- --coverage
|
|
||||||
|
|
||||||
- name: Run integration tests
|
|
||||||
run: npm run test:integration
|
|
||||||
|
|
||||||
- name: Upload coverage to Codecov
|
|
||||||
uses: codecov/codecov-action@v4
|
|
||||||
```
|
|
||||||
|
|
||||||
### Test Execution Scripts
|
|
||||||
|
|
||||||
```json
|
|
||||||
// package.json
|
|
||||||
{
|
|
||||||
"scripts": {
|
|
||||||
"test": "vitest",
|
|
||||||
"test:unit": "vitest run tests/unit",
|
|
||||||
"test:integration": "vitest run tests/integration --config vitest.config.integration.ts",
|
|
||||||
"test:coverage": "vitest run --coverage",
|
|
||||||
"test:watch": "vitest watch",
|
|
||||||
"test:bench": "vitest bench --config vitest.config.benchmark.ts",
|
|
||||||
"benchmark:ci": "CI=true node scripts/run-benchmarks-ci.js"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### CI Test Results Summary
|
|
||||||
|
|
||||||
From our latest CI run (#41):
|
|
||||||
|
|
||||||
```
|
|
||||||
UNIT TESTS:
|
|
||||||
Test Files 30 passed (30)
|
|
||||||
Tests 932 passed | 1 skipped (933)
|
|
||||||
|
|
||||||
INTEGRATION TESTS:
|
|
||||||
Test Files 14 passed (14)
|
|
||||||
Tests 245 passed | 4 skipped (249)
|
|
||||||
|
|
||||||
TOTAL: 1,177 passed | 5 skipped | 0 failed
|
|
||||||
```
|
|
||||||
|
|
||||||
## Performance Testing
|
|
||||||
|
|
||||||
We use Vitest's built-in benchmark functionality:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/benchmarks/database-queries.bench.ts
|
|
||||||
import { bench, describe } from 'vitest';
|
|
||||||
|
|
||||||
describe('Database Query Performance', () => {
|
|
||||||
bench('search nodes by category', async () => {
|
|
||||||
await nodeRepo.getNodesByCategory('trigger');
|
|
||||||
});
|
|
||||||
|
|
||||||
bench('FTS5 search performance', async () => {
|
|
||||||
await nodeRepo.searchNodes('webhook http request', 'AND');
|
|
||||||
});
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
## Environment Configuration
|
|
||||||
|
|
||||||
Test environment is configured via `.env.test`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Test Environment Configuration
|
|
||||||
NODE_ENV=test
|
|
||||||
TEST_DB_PATH=:memory:
|
|
||||||
TEST_PARALLEL=false
|
|
||||||
TEST_MAX_WORKERS=4
|
|
||||||
FEATURE_TEST_COVERAGE=true
|
|
||||||
MSW_ENABLED=true
|
|
||||||
```
|
|
||||||
|
|
||||||
## Key Patterns and Lessons Learned
|
|
||||||
|
|
||||||
### 1. Response Structure Consistency
|
|
||||||
|
|
||||||
All MCP responses follow a specific structure that must be handled correctly:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// Common pattern for handling MCP responses
|
|
||||||
const response = await client.callTool({ name: 'list_nodes', arguments: {} });
|
|
||||||
|
|
||||||
// MCP responses have content array with text objects
|
|
||||||
expect(response.content).toBeDefined();
|
|
||||||
expect(response.content[0].type).toBe('text');
|
|
||||||
|
|
||||||
// Parse the actual data
|
|
||||||
const data = JSON.parse(response.content[0].text);
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. MSW Integration Setup
|
|
||||||
|
|
||||||
Proper MSW setup is crucial for integration tests:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/integration/setup/integration-setup.ts
|
|
||||||
import { setupServer } from 'msw/node';
|
|
||||||
import { handlers } from '@tests/mocks/n8n-api/handlers';
|
|
||||||
|
|
||||||
// Create server but don't start it globally
|
|
||||||
const server = setupServer(...handlers);
|
|
||||||
|
|
||||||
beforeAll(async () => {
|
|
||||||
// Only start MSW for integration tests
|
|
||||||
if (process.env.MSW_ENABLED === 'true') {
|
|
||||||
server.listen({ onUnhandledRequest: 'bypass' });
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
afterAll(async () => {
|
|
||||||
server.close();
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Database Isolation for Parallel Tests
|
|
||||||
|
|
||||||
Each test gets its own database to enable parallel execution:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/utils/database-utils.ts
|
|
||||||
export function createTestDatabaseAdapter(
|
|
||||||
db?: Database.Database,
|
|
||||||
options: TestDatabaseOptions = {}
|
|
||||||
): DatabaseAdapter {
|
|
||||||
const database = db || new Database(':memory:');
|
|
||||||
|
|
||||||
// Enable FTS5 if needed
|
|
||||||
if (options.enableFTS5) {
|
|
||||||
database.exec('PRAGMA main.compile_options;');
|
|
||||||
}
|
|
||||||
|
|
||||||
return new DatabaseAdapter(database);
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4. Environment-Aware Performance Thresholds
|
|
||||||
|
|
||||||
CI environments are slower, so we adjust expectations:
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// Environment-aware thresholds
|
|
||||||
const getThreshold = (local: number, ci: number) =>
|
|
||||||
process.env.CI ? ci : local;
|
|
||||||
|
|
||||||
it('should respond quickly', async () => {
|
|
||||||
const start = performance.now();
|
|
||||||
await someOperation();
|
|
||||||
const duration = performance.now() - start;
|
|
||||||
|
|
||||||
expect(duration).toBeLessThan(getThreshold(50, 200));
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
## Best Practices
|
|
||||||
|
|
||||||
### 1. Test Isolation
|
|
||||||
- Each test creates its own database instance
|
|
||||||
- Tests clean up after themselves
|
|
||||||
- No shared state between tests
|
|
||||||
|
|
||||||
### 2. Proper Cleanup Order
|
|
||||||
```typescript
|
|
||||||
afterEach(async () => {
|
|
||||||
// Close client first to ensure no pending requests
|
|
||||||
await client.close();
|
|
||||||
|
|
||||||
// Give time for client to fully close
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 50));
|
|
||||||
|
|
||||||
// Then close server
|
|
||||||
await mcpServer.close();
|
|
||||||
|
|
||||||
// Finally cleanup database
|
|
||||||
await testDb.cleanup();
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Handle Async Operations Carefully
|
|
||||||
```typescript
|
|
||||||
// Avoid race conditions in cleanup
|
|
||||||
it('should handle disconnection', async () => {
|
|
||||||
// ... test code ...
|
|
||||||
|
|
||||||
// Ensure operations complete before cleanup
|
|
||||||
await transport.close();
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 100));
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4. Meaningful Test Organization
|
|
||||||
- Group related tests using `describe` blocks
|
|
||||||
- Use descriptive test names that explain the behavior
|
|
||||||
- Follow AAA pattern: Arrange, Act, Assert
|
|
||||||
- Keep tests focused on single behaviors
|
|
||||||
|
|
||||||
## Debugging Tests
|
|
||||||
|
|
||||||
### Running Specific Tests
|
|
||||||
```bash
|
|
||||||
# Run a single test file
|
|
||||||
npm test tests/integration/mcp-protocol/tool-invocation.test.ts
|
|
||||||
|
|
||||||
# Run tests matching a pattern
|
|
||||||
npm test -- --grep "should list nodes"
|
|
||||||
|
|
||||||
# Run with debugging output
|
|
||||||
DEBUG=* npm test
|
|
||||||
```
|
|
||||||
|
|
||||||
### VSCode Integration
|
|
||||||
```json
|
|
||||||
// .vscode/launch.json
|
|
||||||
{
|
|
||||||
"configurations": [
|
|
||||||
{
|
|
||||||
"type": "node",
|
|
||||||
"request": "launch",
|
|
||||||
"name": "Debug Tests",
|
|
||||||
"program": "${workspaceFolder}/node_modules/vitest/vitest.mjs",
|
|
||||||
"args": ["run", "${file}"],
|
|
||||||
"console": "integratedTerminal"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Test Coverage
|
|
||||||
|
|
||||||
While we don't enforce strict coverage thresholds yet, the infrastructure is in place:
|
|
||||||
- Coverage reports generated in `lcov`, `html`, and `text` formats
|
|
||||||
- Integration with Codecov for tracking coverage over time
|
|
||||||
- Per-file coverage visible in VSCode with extensions
|
|
||||||
|
|
||||||
## Future Improvements
|
|
||||||
|
|
||||||
1. **E2E Testing**: Add Playwright for testing the full MCP server interaction
|
|
||||||
2. **Load Testing**: Implement k6 or Artillery for stress testing
|
|
||||||
3. **Contract Testing**: Add Pact for ensuring API compatibility
|
|
||||||
4. **Visual Regression**: For any UI components that may be added
|
|
||||||
5. **Mutation Testing**: Use Stryker to ensure test quality
|
|
||||||
|
|
||||||
## Common Issues and Solutions
|
|
||||||
|
|
||||||
### 1. Tests Hanging in CI
|
|
||||||
|
|
||||||
**Problem**: Tests would hang indefinitely in CI due to `process.exit()` calls.
|
|
||||||
|
|
||||||
**Solution**: Remove all `process.exit()` calls from test code and use proper cleanup:
|
|
||||||
```typescript
|
|
||||||
// Bad
|
|
||||||
afterAll(() => {
|
|
||||||
process.exit(0); // This causes Vitest to hang
|
|
||||||
});
|
|
||||||
|
|
||||||
// Good
|
|
||||||
afterAll(async () => {
|
|
||||||
await cleanup();
|
|
||||||
// Let Vitest handle process termination
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. MCP Response Structure
|
|
||||||
|
|
||||||
**Problem**: Tests expecting wrong response format from MCP tools.
|
|
||||||
|
|
||||||
**Solution**: Always access responses through `content[0].text`:
|
|
||||||
```typescript
|
|
||||||
// Wrong
|
|
||||||
const data = response[0].text;
|
|
||||||
|
|
||||||
// Correct
|
|
||||||
const data = JSON.parse(response.content[0].text);
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Database Not Found Errors
|
|
||||||
|
|
||||||
**Problem**: Tests failing with "node not found" when database is empty.
|
|
||||||
|
|
||||||
**Solution**: Check for empty databases before assertions:
|
|
||||||
```typescript
|
|
||||||
const stats = await server.executeTool('get_database_statistics', {});
|
|
||||||
if (stats.totalNodes > 0) {
|
|
||||||
expect(result.nodes.length).toBeGreaterThan(0);
|
|
||||||
} else {
|
|
||||||
expect(result.nodes).toHaveLength(0);
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4. MSW Loading Globally
|
|
||||||
|
|
||||||
**Problem**: MSW interfering with unit tests when loaded globally.
|
|
||||||
|
|
||||||
**Solution**: Only load MSW in integration test setup:
|
|
||||||
```typescript
|
|
||||||
// vitest.config.integration.ts
|
|
||||||
setupFiles: [
|
|
||||||
'./tests/setup/global-setup.ts',
|
|
||||||
'./tests/integration/setup/integration-setup.ts' // MSW only here
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
## Resources
|
|
||||||
|
|
||||||
- [Vitest Documentation](https://vitest.dev/)
|
|
||||||
- [MSW Documentation](https://mswjs.io/)
|
|
||||||
- [Testing Best Practices](https://github.com/goldbergyoni/javascript-testing-best-practices)
|
|
||||||
- [MCP SDK Documentation](https://modelcontextprotocol.io/)
|
|
||||||
@@ -1,276 +0,0 @@
|
|||||||
# n8n-MCP Testing Implementation Checklist
|
|
||||||
|
|
||||||
## Test Suite Development Status
|
|
||||||
|
|
||||||
### Context
|
|
||||||
- **Situation**: Building comprehensive test suite from scratch
|
|
||||||
- **Branch**: feat/comprehensive-testing-suite (separate from main)
|
|
||||||
- **Main Branch Status**: Working in production without tests
|
|
||||||
- **Goal**: Add test coverage without disrupting development
|
|
||||||
|
|
||||||
## Immediate Actions (Day 1)
|
|
||||||
|
|
||||||
- [x] ~~Fix failing tests (Phase 0)~~ ✅ COMPLETED
|
|
||||||
- [x] ~~Create GitHub Actions workflow file~~ ✅ COMPLETED
|
|
||||||
- [x] ~~Install Vitest and remove Jest~~ ✅ COMPLETED
|
|
||||||
- [x] ~~Create vitest.config.ts~~ ✅ COMPLETED
|
|
||||||
- [x] ~~Setup global test configuration~~ ✅ COMPLETED
|
|
||||||
- [x] ~~Migrate existing tests to Vitest syntax~~ ✅ COMPLETED
|
|
||||||
- [x] ~~Setup coverage reporting with Codecov~~ ✅ COMPLETED
|
|
||||||
|
|
||||||
## Phase 1: Vitest Migration ✅ COMPLETED
|
|
||||||
|
|
||||||
All tests have been successfully migrated from Jest to Vitest:
|
|
||||||
- ✅ Removed Jest and installed Vitest
|
|
||||||
- ✅ Created vitest.config.ts with path aliases
|
|
||||||
- ✅ Set up global test configuration
|
|
||||||
- ✅ Migrated all 6 test files (68 tests passing)
|
|
||||||
- ✅ Updated TypeScript configuration
|
|
||||||
- ✅ Cleaned up Jest configuration files
|
|
||||||
|
|
||||||
## Week 1: Foundation
|
|
||||||
|
|
||||||
### Testing Infrastructure ✅ COMPLETED (Phase 2)
|
|
||||||
- [x] ~~Create test directory structure~~ ✅ COMPLETED
|
|
||||||
- [x] ~~Setup mock infrastructure for better-sqlite3~~ ✅ COMPLETED
|
|
||||||
- [x] ~~Create mock for n8n-nodes-base package~~ ✅ COMPLETED
|
|
||||||
- [x] ~~Setup test database utilities~~ ✅ COMPLETED
|
|
||||||
- [x] ~~Create factory pattern for nodes~~ ✅ COMPLETED
|
|
||||||
- [x] ~~Create builder pattern for workflows~~ ✅ COMPLETED
|
|
||||||
- [x] ~~Setup global test utilities~~ ✅ COMPLETED
|
|
||||||
- [x] ~~Configure test environment variables~~ ✅ COMPLETED
|
|
||||||
|
|
||||||
### CI/CD Pipeline ✅ COMPLETED (Phase 3.8)
|
|
||||||
- [x] ~~GitHub Actions for test execution~~ ✅ COMPLETED & VERIFIED
|
|
||||||
- Successfully running with Vitest
|
|
||||||
- 1021 tests passing in CI
|
|
||||||
- Build time: ~2 minutes
|
|
||||||
- [x] ~~Coverage reporting integration~~ ✅ COMPLETED (Codecov setup)
|
|
||||||
- [x] ~~Performance benchmark tracking~~ ✅ COMPLETED
|
|
||||||
- [x] ~~Test result artifacts~~ ✅ COMPLETED
|
|
||||||
- [ ] Branch protection rules
|
|
||||||
- [ ] Required status checks
|
|
||||||
|
|
||||||
## Week 2: Mock Infrastructure
|
|
||||||
|
|
||||||
### Database Mocking
|
|
||||||
- [ ] Complete better-sqlite3 mock implementation
|
|
||||||
- [ ] Mock prepared statements
|
|
||||||
- [ ] Mock transactions
|
|
||||||
- [ ] Mock FTS5 search functionality
|
|
||||||
- [ ] Test data seeding utilities
|
|
||||||
|
|
||||||
### External Dependencies
|
|
||||||
- [ ] Mock axios for API calls
|
|
||||||
- [ ] Mock file system operations
|
|
||||||
- [ ] Mock MCP SDK
|
|
||||||
- [ ] Mock Express server
|
|
||||||
- [ ] Mock WebSocket connections
|
|
||||||
|
|
||||||
## Week 3-4: Unit Tests ✅ COMPLETED (Phase 3)
|
|
||||||
|
|
||||||
### Core Services (Priority 1) ✅ COMPLETED
|
|
||||||
- [x] ~~`config-validator.ts` - 95% coverage~~ ✅ 96.9%
|
|
||||||
- [x] ~~`enhanced-config-validator.ts` - 95% coverage~~ ✅ 94.55%
|
|
||||||
- [x] ~~`workflow-validator.ts` - 90% coverage~~ ✅ 97.59%
|
|
||||||
- [x] ~~`expression-validator.ts` - 90% coverage~~ ✅ 97.22%
|
|
||||||
- [x] ~~`property-filter.ts` - 90% coverage~~ ✅ 95.25%
|
|
||||||
- [x] ~~`example-generator.ts` - 85% coverage~~ ✅ 94.34%
|
|
||||||
|
|
||||||
### Parsers (Priority 2) ✅ COMPLETED
|
|
||||||
- [x] ~~`node-parser.ts` - 90% coverage~~ ✅ 97.42%
|
|
||||||
- [x] ~~`property-extractor.ts` - 90% coverage~~ ✅ 95.49%
|
|
||||||
|
|
||||||
### MCP Layer (Priority 3) ✅ COMPLETED
|
|
||||||
- [x] ~~`tools.ts` - 90% coverage~~ ✅ 94.11%
|
|
||||||
- [x] ~~`handlers-n8n-manager.ts` - 85% coverage~~ ✅ 92.71%
|
|
||||||
- [x] ~~`handlers-workflow-diff.ts` - 85% coverage~~ ✅ 96.34%
|
|
||||||
- [x] ~~`tools-documentation.ts` - 80% coverage~~ ✅ 94.12%
|
|
||||||
|
|
||||||
### Database Layer (Priority 4) ✅ COMPLETED
|
|
||||||
- [x] ~~`node-repository.ts` - 85% coverage~~ ✅ 91.48%
|
|
||||||
- [x] ~~`database-adapter.ts` - 85% coverage~~ ✅ 89.29%
|
|
||||||
- [x] ~~`template-repository.ts` - 80% coverage~~ ✅ 86.78%
|
|
||||||
|
|
||||||
### Loaders and Mappers (Priority 5) ✅ COMPLETED
|
|
||||||
- [x] ~~`node-loader.ts` - 85% coverage~~ ✅ 91.89%
|
|
||||||
- [x] ~~`docs-mapper.ts` - 80% coverage~~ ✅ 95.45%
|
|
||||||
|
|
||||||
### Additional Critical Services Tested ✅ COMPLETED (Phase 3.5)
|
|
||||||
- [x] ~~`n8n-api-client.ts`~~ ✅ 83.87%
|
|
||||||
- [x] ~~`workflow-diff-engine.ts`~~ ✅ 90.06%
|
|
||||||
- [x] ~~`n8n-validation.ts`~~ ✅ 97.14%
|
|
||||||
- [x] ~~`node-specific-validators.ts`~~ ✅ 98.7%
|
|
||||||
|
|
||||||
## Week 5-6: Integration Tests 🚧 IN PROGRESS
|
|
||||||
|
|
||||||
### Real Status (July 29, 2025)
|
|
||||||
**Context**: Building test suite from scratch on testing branch. Main branch has no tests.
|
|
||||||
|
|
||||||
**Overall Status**: 187/246 tests passing (76% pass rate)
|
|
||||||
**Critical Issue**: CI shows green despite 58 failing tests due to `|| true` in workflow
|
|
||||||
|
|
||||||
### MCP Protocol Tests 🔄 MIXED STATUS
|
|
||||||
- [x] ~~Full MCP server initialization~~ ✅ COMPLETED
|
|
||||||
- [x] ~~Tool invocation flow~~ ✅ FIXED (30 tests in tool-invocation.test.ts)
|
|
||||||
- [ ] Error handling and recovery ⚠️ 16 FAILING (error-handling.test.ts)
|
|
||||||
- [x] ~~Concurrent request handling~~ ✅ COMPLETED
|
|
||||||
- [ ] Session management ⚠️ 5 FAILING (timeout issues)
|
|
||||||
|
|
||||||
### n8n API Integration 🔄 PENDING
|
|
||||||
- [ ] Workflow CRUD operations (MSW mocks ready)
|
|
||||||
- [ ] Webhook triggering
|
|
||||||
- [ ] Execution monitoring
|
|
||||||
- [ ] Authentication handling
|
|
||||||
- [ ] Error scenarios
|
|
||||||
|
|
||||||
### Database Integration ⚠️ ISSUES FOUND
|
|
||||||
- [x] ~~SQLite operations with real DB~~ ✅ BASIC TESTS PASS
|
|
||||||
- [ ] FTS5 search functionality ⚠️ 7 FAILING (syntax errors)
|
|
||||||
- [ ] Transaction handling ⚠️ 1 FAILING (isolation issues)
|
|
||||||
- [ ] Migration testing 🔄 NOT STARTED
|
|
||||||
- [ ] Performance under load ⚠️ 4 FAILING (slower than thresholds)
|
|
||||||
|
|
||||||
## Week 7-8: E2E & Performance
|
|
||||||
|
|
||||||
### End-to-End Scenarios
|
|
||||||
- [ ] Complete workflow creation flow
|
|
||||||
- [ ] AI agent workflow setup
|
|
||||||
- [ ] Template import and validation
|
|
||||||
- [ ] Workflow execution monitoring
|
|
||||||
- [ ] Error recovery scenarios
|
|
||||||
|
|
||||||
### Performance Benchmarks
|
|
||||||
- [ ] Node loading speed (< 50ms per node)
|
|
||||||
- [ ] Search performance (< 100ms for 1000 nodes)
|
|
||||||
- [ ] Validation speed (< 10ms simple, < 100ms complex)
|
|
||||||
- [ ] Database query performance
|
|
||||||
- [ ] Memory usage profiling
|
|
||||||
- [ ] Concurrent request handling
|
|
||||||
|
|
||||||
### Load Testing
|
|
||||||
- [ ] 100 concurrent MCP requests
|
|
||||||
- [ ] 10,000 nodes in database
|
|
||||||
- [ ] 1,000 workflow validations/minute
|
|
||||||
- [ ] Memory leak detection
|
|
||||||
- [ ] Resource cleanup verification
|
|
||||||
|
|
||||||
## Testing Quality Gates
|
|
||||||
|
|
||||||
### Coverage Requirements
|
|
||||||
- [ ] Overall: 80%+ (Currently: 62.67%)
|
|
||||||
- [x] ~~Core services: 90%+~~ ✅ COMPLETED
|
|
||||||
- [x] ~~MCP tools: 90%+~~ ✅ COMPLETED
|
|
||||||
- [x] ~~Critical paths: 95%+~~ ✅ COMPLETED
|
|
||||||
- [x] ~~New code: 90%+~~ ✅ COMPLETED
|
|
||||||
|
|
||||||
### Performance Requirements
|
|
||||||
- [x] ~~All unit tests < 10ms~~ ✅ COMPLETED
|
|
||||||
- [ ] Integration tests < 1s
|
|
||||||
- [ ] E2E tests < 10s
|
|
||||||
- [x] ~~Full suite < 5 minutes~~ ✅ COMPLETED (~2 minutes)
|
|
||||||
- [x] ~~No memory leaks~~ ✅ COMPLETED
|
|
||||||
|
|
||||||
### Code Quality
|
|
||||||
- [x] ~~No ESLint errors~~ ✅ COMPLETED
|
|
||||||
- [x] ~~No TypeScript errors~~ ✅ COMPLETED
|
|
||||||
- [x] ~~No console.log in tests~~ ✅ COMPLETED
|
|
||||||
- [x] ~~All tests have descriptions~~ ✅ COMPLETED
|
|
||||||
- [x] ~~No hardcoded values~~ ✅ COMPLETED
|
|
||||||
|
|
||||||
## Monitoring & Maintenance
|
|
||||||
|
|
||||||
### Daily
|
|
||||||
- [ ] Check CI pipeline status
|
|
||||||
- [ ] Review failed tests
|
|
||||||
- [ ] Monitor flaky tests
|
|
||||||
|
|
||||||
### Weekly
|
|
||||||
- [ ] Review coverage reports
|
|
||||||
- [ ] Update test documentation
|
|
||||||
- [ ] Performance benchmark review
|
|
||||||
- [ ] Team sync on testing progress
|
|
||||||
|
|
||||||
### Monthly
|
|
||||||
- [ ] Update baseline benchmarks
|
|
||||||
- [ ] Review and refactor tests
|
|
||||||
- [ ] Update testing strategy
|
|
||||||
- [ ] Training/knowledge sharing
|
|
||||||
|
|
||||||
## Risk Mitigation
|
|
||||||
|
|
||||||
### Technical Risks
|
|
||||||
- [ ] Mock complexity - Use simple, maintainable mocks
|
|
||||||
- [ ] Test brittleness - Focus on behavior, not implementation
|
|
||||||
- [ ] Performance impact - Run heavy tests in parallel
|
|
||||||
- [ ] Flaky tests - Proper async handling and isolation
|
|
||||||
|
|
||||||
### Process Risks
|
|
||||||
- [ ] Slow adoption - Provide training and examples
|
|
||||||
- [ ] Coverage gaming - Review test quality, not just numbers
|
|
||||||
- [ ] Maintenance burden - Automate what's possible
|
|
||||||
- [ ] Integration complexity - Use test containers
|
|
||||||
|
|
||||||
## Success Criteria
|
|
||||||
|
|
||||||
### Current Reality Check
|
|
||||||
- **Unit Tests**: ✅ SOLID (932 passing, 87.8% coverage)
|
|
||||||
- **Integration Tests**: ⚠️ NEEDS WORK (58 failing, 76% pass rate)
|
|
||||||
- **E2E Tests**: 🔄 NOT STARTED
|
|
||||||
- **CI/CD**: ⚠️ BROKEN (hiding failures with || true)
|
|
||||||
|
|
||||||
### Revised Technical Metrics
|
|
||||||
- Coverage: Currently 87.8% for unit tests ✅
|
|
||||||
- Integration test pass rate: Target 100% (currently 76%)
|
|
||||||
- Performance: Adjust thresholds based on reality
|
|
||||||
- Reliability: Fix flaky tests during repair
|
|
||||||
- Speed: CI pipeline < 5 minutes ✅ (~2 minutes)
|
|
||||||
|
|
||||||
### Team Metrics
|
|
||||||
- All developers writing tests ✅
|
|
||||||
- Tests reviewed in PRs ✅
|
|
||||||
- No production bugs from tested code
|
|
||||||
- Improved development velocity ✅
|
|
||||||
|
|
||||||
## Phases Completed
|
|
||||||
|
|
||||||
- **Phase 0**: Immediate Fixes ✅ COMPLETED
|
|
||||||
- **Phase 1**: Vitest Migration ✅ COMPLETED
|
|
||||||
- **Phase 2**: Test Infrastructure ✅ COMPLETED
|
|
||||||
- **Phase 3**: Unit Tests (All 943 tests) ✅ COMPLETED
|
|
||||||
- **Phase 3.5**: Critical Service Testing ✅ COMPLETED
|
|
||||||
- **Phase 3.8**: CI/CD & Infrastructure ✅ COMPLETED
|
|
||||||
- **Phase 4**: Integration Tests 🚧 IN PROGRESS
|
|
||||||
- **Status**: 58 out of 246 tests failing (23.6% failure rate)
|
|
||||||
- **CI Issue**: Tests appear green due to `|| true` error suppression
|
|
||||||
- **Categories of Failures**:
|
|
||||||
- Database: 9 tests (state isolation, FTS5 syntax)
|
|
||||||
- MCP Protocol: 16 tests (response structure in error-handling.test.ts)
|
|
||||||
- MSW: 6 tests (not initialized properly)
|
|
||||||
- FTS5 Search: 7 tests (query syntax issues)
|
|
||||||
- Session Management: 5 tests (async cleanup)
|
|
||||||
- Performance: 15 tests (threshold mismatches)
|
|
||||||
- **Next Steps**:
|
|
||||||
1. Get team buy-in for "red" CI
|
|
||||||
2. Remove `|| true` from workflow
|
|
||||||
3. Fix tests systematically by category
|
|
||||||
- **Phase 5**: E2E Tests 🔄 PENDING
|
|
||||||
|
|
||||||
## Resources & Tools
|
|
||||||
|
|
||||||
### Documentation
|
|
||||||
- Vitest: https://vitest.dev/
|
|
||||||
- Testing Library: https://testing-library.com/
|
|
||||||
- MSW: https://mswjs.io/
|
|
||||||
- Testcontainers: https://www.testcontainers.com/
|
|
||||||
|
|
||||||
### Monitoring
|
|
||||||
- Codecov: https://codecov.io/
|
|
||||||
- GitHub Actions: https://github.com/features/actions
|
|
||||||
- Benchmark Action: https://github.com/benchmark-action/github-action-benchmark
|
|
||||||
|
|
||||||
### Team Resources
|
|
||||||
- Testing best practices guide
|
|
||||||
- Example test implementations
|
|
||||||
- Mock usage patterns
|
|
||||||
- Performance optimization tips
|
|
||||||
@@ -1,472 +0,0 @@
|
|||||||
# n8n-MCP Testing Implementation Guide
|
|
||||||
|
|
||||||
## Phase 1: Foundation Setup (Week 1-2)
|
|
||||||
|
|
||||||
### 1.1 Install Vitest and Dependencies
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Remove Jest
|
|
||||||
npm uninstall jest ts-jest @types/jest
|
|
||||||
|
|
||||||
# Install Vitest and related packages
|
|
||||||
npm install -D vitest @vitest/ui @vitest/coverage-v8
|
|
||||||
npm install -D @testing-library/jest-dom
|
|
||||||
npm install -D msw # For API mocking
|
|
||||||
npm install -D @faker-js/faker # For test data
|
|
||||||
npm install -D fishery # For factories
|
|
||||||
```
|
|
||||||
|
|
||||||
### 1.2 Update package.json Scripts
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"scripts": {
|
|
||||||
// Testing
|
|
||||||
"test": "vitest",
|
|
||||||
"test:ui": "vitest --ui",
|
|
||||||
"test:unit": "vitest run tests/unit",
|
|
||||||
"test:integration": "vitest run tests/integration",
|
|
||||||
"test:e2e": "vitest run tests/e2e",
|
|
||||||
"test:watch": "vitest watch",
|
|
||||||
"test:coverage": "vitest run --coverage",
|
|
||||||
"test:coverage:check": "vitest run --coverage --coverage.thresholdAutoUpdate=false",
|
|
||||||
|
|
||||||
// Benchmarks
|
|
||||||
"bench": "vitest bench",
|
|
||||||
"bench:compare": "vitest bench --compare",
|
|
||||||
|
|
||||||
// CI specific
|
|
||||||
"test:ci": "vitest run --reporter=junit --reporter=default",
|
|
||||||
"test:ci:coverage": "vitest run --coverage --reporter=junit --reporter=default"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 1.3 Migrate Existing Tests
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// Before (Jest)
|
|
||||||
import { describe, test, expect } from '@jest/globals';
|
|
||||||
|
|
||||||
// After (Vitest)
|
|
||||||
import { describe, it, expect, vi } from 'vitest';
|
|
||||||
|
|
||||||
// Update mock syntax
|
|
||||||
// Jest: jest.mock('module')
|
|
||||||
// Vitest: vi.mock('module')
|
|
||||||
|
|
||||||
// Update timer mocks
|
|
||||||
// Jest: jest.useFakeTimers()
|
|
||||||
// Vitest: vi.useFakeTimers()
|
|
||||||
```
|
|
||||||
|
|
||||||
### 1.4 Create Test Database Setup
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/setup/test-database.ts
|
|
||||||
import Database from 'better-sqlite3';
|
|
||||||
import { readFileSync } from 'fs';
|
|
||||||
import { join } from 'path';
|
|
||||||
|
|
||||||
export class TestDatabase {
|
|
||||||
private db: Database.Database;
|
|
||||||
|
|
||||||
constructor() {
|
|
||||||
this.db = new Database(':memory:');
|
|
||||||
this.initialize();
|
|
||||||
}
|
|
||||||
|
|
||||||
private initialize() {
|
|
||||||
const schema = readFileSync(
|
|
||||||
join(__dirname, '../../src/database/schema.sql'),
|
|
||||||
'utf8'
|
|
||||||
);
|
|
||||||
this.db.exec(schema);
|
|
||||||
}
|
|
||||||
|
|
||||||
seedNodes(nodes: any[]) {
|
|
||||||
const stmt = this.db.prepare(`
|
|
||||||
INSERT INTO nodes (type, displayName, name, group, version, description, properties)
|
|
||||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
|
||||||
`);
|
|
||||||
|
|
||||||
const insertMany = this.db.transaction((nodes) => {
|
|
||||||
for (const node of nodes) {
|
|
||||||
stmt.run(
|
|
||||||
node.type,
|
|
||||||
node.displayName,
|
|
||||||
node.name,
|
|
||||||
node.group,
|
|
||||||
node.version,
|
|
||||||
node.description,
|
|
||||||
JSON.stringify(node.properties)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
insertMany(nodes);
|
|
||||||
}
|
|
||||||
|
|
||||||
close() {
|
|
||||||
this.db.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
getDb() {
|
|
||||||
return this.db;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Phase 2: Core Unit Tests (Week 3-4)
|
|
||||||
|
|
||||||
### 2.1 Test Organization Template
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/unit/services/[service-name].test.ts
|
|
||||||
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
|
|
||||||
import { ServiceName } from '@/services/service-name';
|
|
||||||
|
|
||||||
describe('ServiceName', () => {
|
|
||||||
let service: ServiceName;
|
|
||||||
let mockDependency: any;
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
// Setup mocks
|
|
||||||
mockDependency = {
|
|
||||||
method: vi.fn()
|
|
||||||
};
|
|
||||||
|
|
||||||
// Create service instance
|
|
||||||
service = new ServiceName(mockDependency);
|
|
||||||
});
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
vi.clearAllMocks();
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('methodName', () => {
|
|
||||||
it('should handle happy path', async () => {
|
|
||||||
// Arrange
|
|
||||||
const input = { /* test data */ };
|
|
||||||
mockDependency.method.mockResolvedValue({ /* mock response */ });
|
|
||||||
|
|
||||||
// Act
|
|
||||||
const result = await service.methodName(input);
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
expect(result).toEqual(/* expected output */);
|
|
||||||
expect(mockDependency.method).toHaveBeenCalledWith(/* expected args */);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle errors gracefully', async () => {
|
|
||||||
// Arrange
|
|
||||||
mockDependency.method.mockRejectedValue(new Error('Test error'));
|
|
||||||
|
|
||||||
// Act & Assert
|
|
||||||
await expect(service.methodName({})).rejects.toThrow('Expected error message');
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2.2 Mock Strategies by Layer
|
|
||||||
|
|
||||||
#### Database Layer
|
|
||||||
```typescript
|
|
||||||
// tests/unit/database/node-repository.test.ts
|
|
||||||
import { vi } from 'vitest';
|
|
||||||
|
|
||||||
vi.mock('better-sqlite3', () => ({
|
|
||||||
default: vi.fn(() => ({
|
|
||||||
prepare: vi.fn(() => ({
|
|
||||||
all: vi.fn(() => mockData),
|
|
||||||
get: vi.fn((id) => mockData.find(d => d.id === id)),
|
|
||||||
run: vi.fn(() => ({ changes: 1 }))
|
|
||||||
})),
|
|
||||||
exec: vi.fn(),
|
|
||||||
close: vi.fn()
|
|
||||||
}))
|
|
||||||
}));
|
|
||||||
```
|
|
||||||
|
|
||||||
#### External APIs
|
|
||||||
```typescript
|
|
||||||
// tests/unit/services/__mocks__/axios.ts
|
|
||||||
export default {
|
|
||||||
create: vi.fn(() => ({
|
|
||||||
get: vi.fn(() => Promise.resolve({ data: {} })),
|
|
||||||
post: vi.fn(() => Promise.resolve({ data: { id: '123' } })),
|
|
||||||
put: vi.fn(() => Promise.resolve({ data: {} })),
|
|
||||||
delete: vi.fn(() => Promise.resolve({ data: {} }))
|
|
||||||
}))
|
|
||||||
};
|
|
||||||
```
|
|
||||||
|
|
||||||
#### File System
|
|
||||||
```typescript
|
|
||||||
// Use memfs for file system mocking
|
|
||||||
import { vol } from 'memfs';
|
|
||||||
|
|
||||||
vi.mock('fs', () => vol);
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
vol.reset();
|
|
||||||
vol.fromJSON({
|
|
||||||
'/test/file.json': JSON.stringify({ test: 'data' })
|
|
||||||
});
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2.3 Critical Path Tests
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// Priority 1: Node Loading and Parsing
|
|
||||||
// tests/unit/loaders/node-loader.test.ts
|
|
||||||
|
|
||||||
// Priority 2: Configuration Validation
|
|
||||||
// tests/unit/services/config-validator.test.ts
|
|
||||||
|
|
||||||
// Priority 3: MCP Tools
|
|
||||||
// tests/unit/mcp/tools.test.ts
|
|
||||||
|
|
||||||
// Priority 4: Database Operations
|
|
||||||
// tests/unit/database/node-repository.test.ts
|
|
||||||
|
|
||||||
// Priority 5: Workflow Validation
|
|
||||||
// tests/unit/services/workflow-validator.test.ts
|
|
||||||
```
|
|
||||||
|
|
||||||
## Phase 3: Integration Tests (Week 5-6)
|
|
||||||
|
|
||||||
### 3.1 Test Container Setup
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/setup/test-containers.ts
|
|
||||||
import { GenericContainer, StartedTestContainer } from 'testcontainers';
|
|
||||||
|
|
||||||
export class N8nTestContainer {
|
|
||||||
private container: StartedTestContainer;
|
|
||||||
|
|
||||||
async start() {
|
|
||||||
this.container = await new GenericContainer('n8nio/n8n:latest')
|
|
||||||
.withExposedPorts(5678)
|
|
||||||
.withEnv('N8N_BASIC_AUTH_ACTIVE', 'false')
|
|
||||||
.withEnv('N8N_ENCRYPTION_KEY', 'test-key')
|
|
||||||
.start();
|
|
||||||
|
|
||||||
return {
|
|
||||||
url: `http://localhost:${this.container.getMappedPort(5678)}`,
|
|
||||||
stop: () => this.container.stop()
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3.2 Integration Test Pattern
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/integration/n8n-api/workflow-crud.test.ts
|
|
||||||
import { N8nTestContainer } from '@tests/setup/test-containers';
|
|
||||||
import { N8nAPIClient } from '@/services/n8n-api-client';
|
|
||||||
|
|
||||||
describe('n8n API Integration', () => {
|
|
||||||
let container: any;
|
|
||||||
let apiClient: N8nAPIClient;
|
|
||||||
|
|
||||||
beforeAll(async () => {
|
|
||||||
container = await new N8nTestContainer().start();
|
|
||||||
apiClient = new N8nAPIClient(container.url);
|
|
||||||
}, 30000);
|
|
||||||
|
|
||||||
afterAll(async () => {
|
|
||||||
await container.stop();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should create and retrieve workflow', async () => {
|
|
||||||
// Create workflow
|
|
||||||
const workflow = createTestWorkflow();
|
|
||||||
const created = await apiClient.createWorkflow(workflow);
|
|
||||||
|
|
||||||
expect(created.id).toBeDefined();
|
|
||||||
|
|
||||||
// Retrieve workflow
|
|
||||||
const retrieved = await apiClient.getWorkflow(created.id);
|
|
||||||
expect(retrieved.name).toBe(workflow.name);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
## Phase 4: E2E & Performance (Week 7-8)
|
|
||||||
|
|
||||||
### 4.1 E2E Test Setup
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// tests/e2e/workflows/complete-workflow.test.ts
|
|
||||||
import { MCPClient } from '@tests/utils/mcp-client';
|
|
||||||
import { N8nTestContainer } from '@tests/setup/test-containers';
|
|
||||||
|
|
||||||
describe('Complete Workflow E2E', () => {
|
|
||||||
let mcpServer: any;
|
|
||||||
let n8nContainer: any;
|
|
||||||
let mcpClient: MCPClient;
|
|
||||||
|
|
||||||
beforeAll(async () => {
|
|
||||||
// Start n8n
|
|
||||||
n8nContainer = await new N8nTestContainer().start();
|
|
||||||
|
|
||||||
// Start MCP server
|
|
||||||
mcpServer = await startMCPServer({
|
|
||||||
n8nUrl: n8nContainer.url
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create MCP client
|
|
||||||
mcpClient = new MCPClient(mcpServer.url);
|
|
||||||
}, 60000);
|
|
||||||
|
|
||||||
it('should execute complete workflow creation flow', async () => {
|
|
||||||
// 1. Search for nodes
|
|
||||||
const searchResult = await mcpClient.call('search_nodes', {
|
|
||||||
query: 'webhook http slack'
|
|
||||||
});
|
|
||||||
|
|
||||||
// 2. Get node details
|
|
||||||
const webhookInfo = await mcpClient.call('get_node_info', {
|
|
||||||
nodeType: 'nodes-base.webhook'
|
|
||||||
});
|
|
||||||
|
|
||||||
// 3. Create workflow
|
|
||||||
const workflow = new WorkflowBuilder('E2E Test')
|
|
||||||
.addWebhookNode()
|
|
||||||
.addHttpRequestNode()
|
|
||||||
.addSlackNode()
|
|
||||||
.connectSequentially()
|
|
||||||
.build();
|
|
||||||
|
|
||||||
// 4. Validate workflow
|
|
||||||
const validation = await mcpClient.call('validate_workflow', {
|
|
||||||
workflow
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(validation.isValid).toBe(true);
|
|
||||||
|
|
||||||
// 5. Deploy to n8n
|
|
||||||
const deployed = await mcpClient.call('n8n_create_workflow', {
|
|
||||||
...workflow
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(deployed.id).toBeDefined();
|
|
||||||
expect(deployed.active).toBe(false);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4.2 Performance Benchmarks
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
// vitest.benchmark.config.ts
|
|
||||||
export default {
|
|
||||||
test: {
|
|
||||||
benchmark: {
|
|
||||||
// Output benchmark results
|
|
||||||
outputFile: './benchmark-results.json',
|
|
||||||
|
|
||||||
// Compare with baseline
|
|
||||||
compare: './benchmark-baseline.json',
|
|
||||||
|
|
||||||
// Fail if performance degrades by more than 10%
|
|
||||||
threshold: {
|
|
||||||
p95: 1.1, // 110% of baseline
|
|
||||||
p99: 1.2 // 120% of baseline
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
```
|
|
||||||
|
|
||||||
## Testing Best Practices
|
|
||||||
|
|
||||||
### 1. Test Naming Convention
|
|
||||||
```typescript
|
|
||||||
// Format: should [expected behavior] when [condition]
|
|
||||||
it('should return user data when valid ID is provided')
|
|
||||||
it('should throw ValidationError when email is invalid')
|
|
||||||
it('should retry 3 times when network fails')
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Test Data Builders
|
|
||||||
```typescript
|
|
||||||
// Use builders for complex test data
|
|
||||||
const user = new UserBuilder()
|
|
||||||
.withEmail('test@example.com')
|
|
||||||
.withRole('admin')
|
|
||||||
.build();
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Custom Matchers
|
|
||||||
```typescript
|
|
||||||
// tests/utils/matchers.ts
|
|
||||||
export const toBeValidNode = (received: any) => {
|
|
||||||
const pass =
|
|
||||||
received.type &&
|
|
||||||
received.displayName &&
|
|
||||||
received.properties &&
|
|
||||||
Array.isArray(received.properties);
|
|
||||||
|
|
||||||
return {
|
|
||||||
pass,
|
|
||||||
message: () => `expected ${received} to be a valid node`
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
// Usage
|
|
||||||
expect(node).toBeValidNode();
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4. Snapshot Testing
|
|
||||||
```typescript
|
|
||||||
// For complex structures
|
|
||||||
it('should generate correct node schema', () => {
|
|
||||||
const schema = generateNodeSchema(node);
|
|
||||||
expect(schema).toMatchSnapshot();
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
### 5. Test Isolation
|
|
||||||
```typescript
|
|
||||||
// Always clean up after tests
|
|
||||||
afterEach(async () => {
|
|
||||||
await cleanup();
|
|
||||||
vi.clearAllMocks();
|
|
||||||
vi.restoreAllMocks();
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
## Coverage Goals by Module
|
|
||||||
|
|
||||||
| Module | Target | Priority | Notes |
|
|
||||||
|--------|--------|----------|-------|
|
|
||||||
| services/config-validator | 95% | High | Critical for reliability |
|
|
||||||
| services/workflow-validator | 90% | High | Core functionality |
|
|
||||||
| mcp/tools | 90% | High | User-facing API |
|
|
||||||
| database/node-repository | 85% | Medium | Well-tested DB layer |
|
|
||||||
| loaders/node-loader | 85% | Medium | External dependencies |
|
|
||||||
| parsers/* | 90% | High | Data transformation |
|
|
||||||
| utils/* | 80% | Low | Helper functions |
|
|
||||||
| scripts/* | 50% | Low | One-time scripts |
|
|
||||||
|
|
||||||
## Continuous Improvement
|
|
||||||
|
|
||||||
1. **Weekly Reviews**: Review test coverage and identify gaps
|
|
||||||
2. **Performance Baselines**: Update benchmarks monthly
|
|
||||||
3. **Flaky Test Detection**: Monitor and fix within 48 hours
|
|
||||||
4. **Test Documentation**: Keep examples updated
|
|
||||||
5. **Developer Training**: Pair programming on tests
|
|
||||||
|
|
||||||
## Success Metrics
|
|
||||||
|
|
||||||
- [ ] All tests pass in CI (0 failures)
|
|
||||||
- [ ] Coverage > 80% overall
|
|
||||||
- [ ] No flaky tests
|
|
||||||
- [ ] CI runs < 5 minutes
|
|
||||||
- [ ] Performance benchmarks stable
|
|
||||||
- [ ] Zero production bugs from tested code
|
|
||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,66 +0,0 @@
|
|||||||
# Token Efficiency Improvements Summary
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
Made all MCP tool descriptions concise and token-efficient while preserving essential information.
|
|
||||||
|
|
||||||
## Key Improvements
|
|
||||||
|
|
||||||
### Before vs After Examples
|
|
||||||
|
|
||||||
1. **search_nodes**
|
|
||||||
- Before: ~350 chars with verbose explanation
|
|
||||||
- After: 165 chars
|
|
||||||
- `Search nodes by keywords. Modes: OR (any word), AND (all words), FUZZY (typos OK). Primary nodes ranked first. Examples: "webhook"→Webhook, "http call"→HTTP Request.`
|
|
||||||
|
|
||||||
2. **get_node_info**
|
|
||||||
- Before: ~450 chars with warnings about size
|
|
||||||
- After: 174 chars
|
|
||||||
- `Get FULL node schema (100KB+). TIP: Use get_node_essentials first! Returns all properties/operations/credentials. Prefix required: "nodes-base.httpRequest" not "httpRequest".`
|
|
||||||
|
|
||||||
3. **validate_node_minimal**
|
|
||||||
- Before: ~350 chars explaining what it doesn't do
|
|
||||||
- After: 102 chars
|
|
||||||
- `Fast check for missing required fields only. No warnings/suggestions. Returns: list of missing fields.`
|
|
||||||
|
|
||||||
4. **get_property_dependencies**
|
|
||||||
- Before: ~400 chars with full example
|
|
||||||
- After: 131 chars
|
|
||||||
- `Shows property dependencies and visibility rules. Example: sendBody=true reveals body fields. Test visibility with optional config.`
|
|
||||||
|
|
||||||
## Statistics
|
|
||||||
|
|
||||||
### Documentation Tools (22 tools)
|
|
||||||
- Average description length: **129 characters**
|
|
||||||
- Total characters: 2,836
|
|
||||||
- Tools over 200 chars: 1 (list_nodes at 204)
|
|
||||||
|
|
||||||
### Management Tools (17 tools)
|
|
||||||
- Average description length: **93 characters**
|
|
||||||
- Total characters: 1,578
|
|
||||||
- Tools over 200 chars: 1 (n8n_update_partial_workflow at 284)
|
|
||||||
|
|
||||||
## Strategy Used
|
|
||||||
|
|
||||||
1. **Remove redundancy**: Eliminated repeated information available in parameter descriptions
|
|
||||||
2. **Use abbreviations**: "vs" instead of "versus", "&" instead of "and" where appropriate
|
|
||||||
3. **Compact examples**: `"webhook"→Webhook` instead of verbose explanations
|
|
||||||
4. **Direct language**: "Fast check" instead of "Quick validation that only checks"
|
|
||||||
5. **Move details to documentation**: Complex tools reference `tools_documentation()` for full details
|
|
||||||
6. **Essential info only**: Focus on what the tool does, not how it works internally
|
|
||||||
|
|
||||||
## Special Cases
|
|
||||||
|
|
||||||
### n8n_update_partial_workflow
|
|
||||||
This tool's description is necessarily longer (284 chars) because:
|
|
||||||
- Lists all 13 operation types
|
|
||||||
- Critical for users to know available operations
|
|
||||||
- Directs to full documentation for details
|
|
||||||
|
|
||||||
### Complex Documentation Preserved
|
|
||||||
For tools like `n8n_update_partial_workflow`, detailed documentation was moved to `tools-documentation.ts` rather than deleted, ensuring users can still access comprehensive information when needed.
|
|
||||||
|
|
||||||
## Impact
|
|
||||||
- **Token savings**: ~65-70% reduction in description tokens
|
|
||||||
- **Faster AI responses**: Less context used for tool descriptions
|
|
||||||
- **Better UX**: Clearer, more scannable tool list
|
|
||||||
- **Maintained functionality**: All essential information preserved
|
|
||||||
@@ -1,118 +0,0 @@
|
|||||||
# Transactional Updates Example
|
|
||||||
|
|
||||||
This example demonstrates the new transactional update capabilities in v2.7.0.
|
|
||||||
|
|
||||||
## Before (v2.6.x and earlier)
|
|
||||||
|
|
||||||
Previously, you had to carefully order operations to ensure nodes existed before connecting them:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"id": "workflow-123",
|
|
||||||
"operations": [
|
|
||||||
// 1. First add all nodes
|
|
||||||
{ "type": "addNode", "node": { "name": "Process", "type": "n8n-nodes-base.set", ... }},
|
|
||||||
{ "type": "addNode", "node": { "name": "Notify", "type": "n8n-nodes-base.slack", ... }},
|
|
||||||
|
|
||||||
// 2. Then add connections (would fail if done before nodes)
|
|
||||||
{ "type": "addConnection", "source": "Webhook", "target": "Process" },
|
|
||||||
{ "type": "addConnection", "source": "Process", "target": "Notify" }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## After (v2.7.0+)
|
|
||||||
|
|
||||||
Now you can write operations in any order - the engine automatically handles dependencies:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"id": "workflow-123",
|
|
||||||
"operations": [
|
|
||||||
// Connections can come first!
|
|
||||||
{ "type": "addConnection", "source": "Webhook", "target": "Process" },
|
|
||||||
{ "type": "addConnection", "source": "Process", "target": "Notify" },
|
|
||||||
|
|
||||||
// Nodes added later - still works!
|
|
||||||
{ "type": "addNode", "node": { "name": "Process", "type": "n8n-nodes-base.set", "position": [400, 300] }},
|
|
||||||
{ "type": "addNode", "node": { "name": "Notify", "type": "n8n-nodes-base.slack", "position": [600, 300] }}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## How It Works
|
|
||||||
|
|
||||||
1. **Two-Pass Processing**:
|
|
||||||
- Pass 1: All node operations (add, remove, update, move, enable, disable)
|
|
||||||
- Pass 2: All other operations (connections, settings, metadata)
|
|
||||||
|
|
||||||
2. **Operation Limit**: Maximum 5 operations per request keeps complexity manageable
|
|
||||||
|
|
||||||
3. **Atomic Updates**: All operations succeed or all fail - no partial updates
|
|
||||||
|
|
||||||
## Benefits for AI Agents
|
|
||||||
|
|
||||||
- **Intuitive**: Write operations in the order that makes sense logically
|
|
||||||
- **Reliable**: No need to track dependencies manually
|
|
||||||
- **Simple**: Focus on what to change, not how to order changes
|
|
||||||
- **Safe**: Built-in limits prevent overly complex operations
|
|
||||||
|
|
||||||
## Complete Example
|
|
||||||
|
|
||||||
Here's a real-world example of adding error handling to a workflow:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"id": "workflow-123",
|
|
||||||
"operations": [
|
|
||||||
// Define the flow first (makes logical sense)
|
|
||||||
{
|
|
||||||
"type": "removeConnection",
|
|
||||||
"source": "HTTP Request",
|
|
||||||
"target": "Save to DB"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "addConnection",
|
|
||||||
"source": "HTTP Request",
|
|
||||||
"target": "Error Handler"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "addConnection",
|
|
||||||
"source": "Error Handler",
|
|
||||||
"target": "Send Alert"
|
|
||||||
},
|
|
||||||
|
|
||||||
// Then add the nodes
|
|
||||||
{
|
|
||||||
"type": "addNode",
|
|
||||||
"node": {
|
|
||||||
"name": "Error Handler",
|
|
||||||
"type": "n8n-nodes-base.if",
|
|
||||||
"position": [500, 400],
|
|
||||||
"parameters": {
|
|
||||||
"conditions": {
|
|
||||||
"boolean": [{
|
|
||||||
"value1": "={{$json.error}}",
|
|
||||||
"value2": true
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "addNode",
|
|
||||||
"node": {
|
|
||||||
"name": "Send Alert",
|
|
||||||
"type": "n8n-nodes-base.emailSend",
|
|
||||||
"position": [700, 400],
|
|
||||||
"parameters": {
|
|
||||||
"to": "alerts@company.com",
|
|
||||||
"subject": "Workflow Error Alert"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
All operations will be processed correctly, even though connections reference nodes that don't exist yet!
|
|
||||||
@@ -1,92 +0,0 @@
|
|||||||
# Validation Improvements v2.4.2
|
|
||||||
|
|
||||||
Based on AI agent feedback, we've implemented several improvements to the `validate_node_operation` tool:
|
|
||||||
|
|
||||||
## 🎯 Issues Addressed
|
|
||||||
|
|
||||||
### 1. **@version Warnings** ✅ FIXED
|
|
||||||
- **Issue**: Showed confusing warnings about `@version` property not being used
|
|
||||||
- **Fix**: Filter out internal properties starting with `@` or `_`
|
|
||||||
- **Result**: No more false warnings about internal n8n properties
|
|
||||||
|
|
||||||
### 2. **Duplicate Errors** ✅ FIXED
|
|
||||||
- **Issue**: Same error shown multiple times (e.g., missing `ts` field)
|
|
||||||
- **Fix**: Implemented deduplication that keeps the most specific error message
|
|
||||||
- **Result**: Each error shown only once with the best description
|
|
||||||
|
|
||||||
### 3. **Basic Code Validation** ✅ ADDED
|
|
||||||
- **Issue**: No syntax validation for Code node
|
|
||||||
- **Fix**: Added basic syntax checks for JavaScript and Python
|
|
||||||
- **Features**:
|
|
||||||
- Unbalanced braces/parentheses detection
|
|
||||||
- Python indentation consistency check
|
|
||||||
- n8n-specific patterns (return statement, input access)
|
|
||||||
- Security warnings (eval/exec usage)
|
|
||||||
|
|
||||||
## 📊 Before & After
|
|
||||||
|
|
||||||
### Before (v2.4.1):
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"errors": [
|
|
||||||
{ "property": "ts", "message": "Required property 'Message Timestamp' is missing" },
|
|
||||||
{ "property": "ts", "message": "Message timestamp (ts) is required to update a message" }
|
|
||||||
],
|
|
||||||
"warnings": [
|
|
||||||
{ "property": "@version", "message": "Property '@version' is configured but won't be used" }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### After (v2.4.2):
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"errors": [
|
|
||||||
{ "property": "ts", "message": "Message timestamp (ts) is required to update a message",
|
|
||||||
"fix": "Provide the timestamp of the message to update" }
|
|
||||||
],
|
|
||||||
"warnings": [] // No @version warning
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🆕 Code Validation Examples
|
|
||||||
|
|
||||||
### JavaScript Syntax Check:
|
|
||||||
```javascript
|
|
||||||
// Missing closing brace
|
|
||||||
if (true) {
|
|
||||||
return items;
|
|
||||||
// Error: "Unbalanced braces detected"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Python Indentation Check:
|
|
||||||
```python
|
|
||||||
def process():
|
|
||||||
if True: # Tab
|
|
||||||
return items # Spaces
|
|
||||||
# Error: "Mixed tabs and spaces in indentation"
|
|
||||||
```
|
|
||||||
|
|
||||||
### n8n Pattern Check:
|
|
||||||
```javascript
|
|
||||||
const result = items.map(item => item.json);
|
|
||||||
// Warning: "No return statement found"
|
|
||||||
// Suggestion: "Add: return items;"
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🚀 Impact
|
|
||||||
|
|
||||||
- **Cleaner validation results** - No more noise from internal properties
|
|
||||||
- **Clearer error messages** - Each issue reported once with best description
|
|
||||||
- **Better code quality** - Basic syntax validation catches common mistakes
|
|
||||||
- **n8n best practices** - Warns about missing return statements and input handling
|
|
||||||
|
|
||||||
## 📝 Summary
|
|
||||||
|
|
||||||
The `validate_node_operation` tool is now even more helpful for AI agents and developers:
|
|
||||||
- 95% reduction in false positives (operation-aware)
|
|
||||||
- No duplicate or confusing warnings
|
|
||||||
- Basic code validation for common syntax errors
|
|
||||||
- n8n-specific pattern checking
|
|
||||||
|
|
||||||
**Rating improved from 9/10 to 9.5/10!** 🎉
|
|
||||||
@@ -116,17 +116,46 @@ The `n8n_update_partial_workflow` tool allows you to make targeted changes to wo
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Update Connection (Change routing)
|
#### Rewire Connection
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"type": "updateConnection",
|
"type": "rewireConnection",
|
||||||
|
"source": "Webhook",
|
||||||
|
"from": "Old Handler",
|
||||||
|
"to": "New Handler",
|
||||||
|
"description": "Rewire connection to new handler"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Smart Parameters for IF Nodes
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"type": "addConnection",
|
||||||
"source": "IF",
|
"source": "IF",
|
||||||
"target": "Send Email",
|
"target": "Success Handler",
|
||||||
"changes": {
|
"branch": "true", // Semantic parameter instead of sourceIndex
|
||||||
"sourceOutput": "false", // Change from 'true' to 'false' output
|
"description": "Route true branch to success handler"
|
||||||
"targetInput": "main"
|
}
|
||||||
},
|
```
|
||||||
"description": "Route failed conditions to email"
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"type": "addConnection",
|
||||||
|
"source": "IF",
|
||||||
|
"target": "Error Handler",
|
||||||
|
"branch": "false", // Routes to false branch (sourceIndex=1)
|
||||||
|
"description": "Route false branch to error handler"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Smart Parameters for Switch Nodes
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"type": "addConnection",
|
||||||
|
"source": "Switch",
|
||||||
|
"target": "Handler A",
|
||||||
|
"case": 0, // First output
|
||||||
|
"description": "Route case 0 to Handler A"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -296,6 +325,193 @@ The `n8n_update_partial_workflow` tool allows you to make targeted changes to wo
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Example 5: Large Batch Workflow Refactoring
|
||||||
|
Demonstrates handling many operations in a single request - no longer limited to 5 operations!
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "workflow-batch",
|
||||||
|
"operations": [
|
||||||
|
// Add 10 processing nodes
|
||||||
|
{
|
||||||
|
"type": "addNode",
|
||||||
|
"node": {
|
||||||
|
"name": "Filter Active Users",
|
||||||
|
"type": "n8n-nodes-base.filter",
|
||||||
|
"position": [400, 200],
|
||||||
|
"parameters": { "conditions": { "boolean": [{ "value1": "={{$json.active}}", "value2": true }] } }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addNode",
|
||||||
|
"node": {
|
||||||
|
"name": "Transform User Data",
|
||||||
|
"type": "n8n-nodes-base.set",
|
||||||
|
"position": [600, 200],
|
||||||
|
"parameters": { "values": { "string": [{ "name": "formatted_name", "value": "={{$json.firstName}} {{$json.lastName}}" }] } }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addNode",
|
||||||
|
"node": {
|
||||||
|
"name": "Validate Email",
|
||||||
|
"type": "n8n-nodes-base.if",
|
||||||
|
"position": [800, 200],
|
||||||
|
"parameters": { "conditions": { "string": [{ "value1": "={{$json.email}}", "operation": "contains", "value2": "@" }] } }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addNode",
|
||||||
|
"node": {
|
||||||
|
"name": "Enrich with API",
|
||||||
|
"type": "n8n-nodes-base.httpRequest",
|
||||||
|
"position": [1000, 150],
|
||||||
|
"parameters": { "url": "https://api.example.com/enrich", "method": "POST" }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addNode",
|
||||||
|
"node": {
|
||||||
|
"name": "Log Invalid Emails",
|
||||||
|
"type": "n8n-nodes-base.code",
|
||||||
|
"position": [1000, 350],
|
||||||
|
"parameters": { "jsCode": "console.log('Invalid email:', $json.email);\nreturn $json;" }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addNode",
|
||||||
|
"node": {
|
||||||
|
"name": "Merge Results",
|
||||||
|
"type": "n8n-nodes-base.merge",
|
||||||
|
"position": [1200, 250]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addNode",
|
||||||
|
"node": {
|
||||||
|
"name": "Deduplicate",
|
||||||
|
"type": "n8n-nodes-base.removeDuplicates",
|
||||||
|
"position": [1400, 250],
|
||||||
|
"parameters": { "propertyName": "id" }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addNode",
|
||||||
|
"node": {
|
||||||
|
"name": "Sort by Date",
|
||||||
|
"type": "n8n-nodes-base.sort",
|
||||||
|
"position": [1600, 250],
|
||||||
|
"parameters": { "sortFieldsUi": { "sortField": [{ "fieldName": "created_at", "order": "descending" }] } }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addNode",
|
||||||
|
"node": {
|
||||||
|
"name": "Batch for DB",
|
||||||
|
"type": "n8n-nodes-base.splitInBatches",
|
||||||
|
"position": [1800, 250],
|
||||||
|
"parameters": { "batchSize": 100 }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addNode",
|
||||||
|
"node": {
|
||||||
|
"name": "Save to Database",
|
||||||
|
"type": "n8n-nodes-base.postgres",
|
||||||
|
"position": [2000, 250],
|
||||||
|
"parameters": { "operation": "insert", "table": "processed_users" }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
// Connect all the nodes
|
||||||
|
{
|
||||||
|
"type": "addConnection",
|
||||||
|
"source": "Get Users",
|
||||||
|
"target": "Filter Active Users"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addConnection",
|
||||||
|
"source": "Filter Active Users",
|
||||||
|
"target": "Transform User Data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addConnection",
|
||||||
|
"source": "Transform User Data",
|
||||||
|
"target": "Validate Email"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addConnection",
|
||||||
|
"source": "Validate Email",
|
||||||
|
"sourceOutput": "true",
|
||||||
|
"target": "Enrich with API"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addConnection",
|
||||||
|
"source": "Validate Email",
|
||||||
|
"sourceOutput": "false",
|
||||||
|
"target": "Log Invalid Emails"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addConnection",
|
||||||
|
"source": "Enrich with API",
|
||||||
|
"target": "Merge Results"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addConnection",
|
||||||
|
"source": "Log Invalid Emails",
|
||||||
|
"target": "Merge Results",
|
||||||
|
"targetInput": "input2"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addConnection",
|
||||||
|
"source": "Merge Results",
|
||||||
|
"target": "Deduplicate"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addConnection",
|
||||||
|
"source": "Deduplicate",
|
||||||
|
"target": "Sort by Date"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addConnection",
|
||||||
|
"source": "Sort by Date",
|
||||||
|
"target": "Batch for DB"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addConnection",
|
||||||
|
"source": "Batch for DB",
|
||||||
|
"target": "Save to Database"
|
||||||
|
},
|
||||||
|
// Update workflow metadata
|
||||||
|
{
|
||||||
|
"type": "updateName",
|
||||||
|
"name": "User Processing Pipeline v2"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "updateSettings",
|
||||||
|
"settings": {
|
||||||
|
"executionOrder": "v1",
|
||||||
|
"timezone": "UTC",
|
||||||
|
"saveDataSuccessExecution": "all"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addTag",
|
||||||
|
"tag": "production"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addTag",
|
||||||
|
"tag": "user-processing"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "addTag",
|
||||||
|
"tag": "v2"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This example shows 26 operations in a single request, creating a complete data processing pipeline with proper error handling, validation, and batch processing.
|
||||||
|
|
||||||
## Best Practices
|
## Best Practices
|
||||||
|
|
||||||
1. **Use Descriptive Names**: Always provide clear node names and descriptions for operations
|
1. **Use Descriptive Names**: Always provide clear node names and descriptions for operations
|
||||||
@@ -390,13 +606,13 @@ The tool validates all operations before applying any changes. Common errors inc
|
|||||||
|
|
||||||
Always check the response for validation errors and adjust your operations accordingly.
|
Always check the response for validation errors and adjust your operations accordingly.
|
||||||
|
|
||||||
## Transactional Updates (v2.7.0+)
|
## Transactional Updates
|
||||||
|
|
||||||
The diff engine now supports transactional updates using a **two-pass processing** approach:
|
The diff engine now supports transactional updates using a **two-pass processing** approach:
|
||||||
|
|
||||||
### How It Works
|
### How It Works
|
||||||
|
|
||||||
1. **Operation Limit**: Maximum 5 operations per request to ensure reliability
|
1. **No Operation Limit**: Process unlimited operations in a single request
|
||||||
2. **Two-Pass Processing**:
|
2. **Two-Pass Processing**:
|
||||||
- **Pass 1**: All node operations (add, remove, update, move, enable, disable)
|
- **Pass 1**: All node operations (add, remove, update, move, enable, disable)
|
||||||
- **Pass 2**: All other operations (connections, settings, metadata)
|
- **Pass 2**: All other operations (connections, settings, metadata)
|
||||||
@@ -446,9 +662,9 @@ This allows you to add nodes and connect them in the same request:
|
|||||||
### Benefits
|
### Benefits
|
||||||
|
|
||||||
- **Order Independence**: You don't need to worry about operation order
|
- **Order Independence**: You don't need to worry about operation order
|
||||||
- **Atomic Updates**: All operations succeed or all fail
|
- **Atomic Updates**: All operations succeed or all fail (unless continueOnError is enabled)
|
||||||
- **Intuitive Usage**: Add complex workflow structures in one call
|
- **Intuitive Usage**: Add complex workflow structures in one call
|
||||||
- **Clear Limits**: 5 operations max keeps things simple and reliable
|
- **No Hard Limits**: Process unlimited operations efficiently
|
||||||
|
|
||||||
### Example: Complete Workflow Addition
|
### Example: Complete Workflow Addition
|
||||||
|
|
||||||
@@ -507,4 +723,4 @@ This allows you to add nodes and connect them in the same request:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
All 5 operations will be processed correctly regardless of order!
|
All operations will be processed correctly regardless of order!
|
||||||
15435
fetch_log.txt
Normal file
15435
fetch_log.txt
Normal file
File diff suppressed because one or more lines are too long
32
monitor_fetch.sh
Normal file
32
monitor_fetch.sh
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo "Monitoring template fetch progress..."
|
||||||
|
echo "=================================="
|
||||||
|
|
||||||
|
while true; do
|
||||||
|
# Check if process is still running
|
||||||
|
if ! pgrep -f "fetch-templates" > /dev/null; then
|
||||||
|
echo "Fetch process completed!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get database size
|
||||||
|
DB_SIZE=$(ls -lh data/nodes.db 2>/dev/null | awk '{print $5}')
|
||||||
|
|
||||||
|
# Get template count
|
||||||
|
TEMPLATE_COUNT=$(sqlite3 data/nodes.db "SELECT COUNT(*) FROM templates" 2>/dev/null || echo "0")
|
||||||
|
|
||||||
|
# Get last log entry
|
||||||
|
LAST_LOG=$(tail -n 1 fetch_log.txt 2>/dev/null | grep "Fetching template details" | tail -1)
|
||||||
|
|
||||||
|
# Display status
|
||||||
|
echo -ne "\rDB Size: $DB_SIZE | Templates: $TEMPLATE_COUNT | $LAST_LOG"
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "Final statistics:"
|
||||||
|
echo "-----------------"
|
||||||
|
ls -lh data/nodes.db
|
||||||
|
sqlite3 data/nodes.db "SELECT COUNT(*) as count, printf('%.1f MB', SUM(LENGTH(workflow_json_compressed))/1024.0/1024.0) as compressed_size FROM templates"
|
||||||
0
n8n-nodes.db
Normal file
0
n8n-nodes.db
Normal file
24705
package-lock.json
generated
24705
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
34
package.json
34
package.json
@@ -1,8 +1,16 @@
|
|||||||
{
|
{
|
||||||
"name": "n8n-mcp",
|
"name": "n8n-mcp",
|
||||||
"version": "2.10.4",
|
"version": "2.19.5",
|
||||||
"description": "Integration between n8n workflow automation and Model Context Protocol (MCP)",
|
"description": "Integration between n8n workflow automation and Model Context Protocol (MCP)",
|
||||||
"main": "dist/index.js",
|
"main": "dist/index.js",
|
||||||
|
"types": "dist/index.d.ts",
|
||||||
|
"exports": {
|
||||||
|
".": {
|
||||||
|
"types": "./dist/index.d.ts",
|
||||||
|
"require": "./dist/index.js",
|
||||||
|
"import": "./dist/index.js"
|
||||||
|
}
|
||||||
|
},
|
||||||
"bin": {
|
"bin": {
|
||||||
"n8n-mcp": "./dist/mcp/index.js"
|
"n8n-mcp": "./dist/mcp/index.js"
|
||||||
},
|
},
|
||||||
@@ -31,12 +39,16 @@
|
|||||||
"test:watch": "vitest watch",
|
"test:watch": "vitest watch",
|
||||||
"test:unit": "vitest run tests/unit",
|
"test:unit": "vitest run tests/unit",
|
||||||
"test:integration": "vitest run --config vitest.config.integration.ts",
|
"test:integration": "vitest run --config vitest.config.integration.ts",
|
||||||
|
"test:integration:n8n": "vitest run tests/integration/n8n-api",
|
||||||
|
"test:cleanup:orphans": "tsx tests/integration/n8n-api/scripts/cleanup-orphans.ts",
|
||||||
"test:e2e": "vitest run tests/e2e",
|
"test:e2e": "vitest run tests/e2e",
|
||||||
"lint": "tsc --noEmit",
|
"lint": "tsc --noEmit",
|
||||||
"typecheck": "tsc --noEmit",
|
"typecheck": "tsc --noEmit",
|
||||||
"update:n8n": "node scripts/update-n8n-deps.js",
|
"update:n8n": "node scripts/update-n8n-deps.js",
|
||||||
"update:n8n:check": "node scripts/update-n8n-deps.js --dry-run",
|
"update:n8n:check": "node scripts/update-n8n-deps.js --dry-run",
|
||||||
"fetch:templates": "node dist/scripts/fetch-templates.js",
|
"fetch:templates": "node dist/scripts/fetch-templates.js",
|
||||||
|
"fetch:templates:update": "node dist/scripts/fetch-templates.js --update",
|
||||||
|
"fetch:templates:extract": "node dist/scripts/fetch-templates.js --extract-only",
|
||||||
"fetch:templates:robust": "node dist/scripts/fetch-templates-robust.js",
|
"fetch:templates:robust": "node dist/scripts/fetch-templates-robust.js",
|
||||||
"prebuild:fts5": "npx tsx scripts/prebuild-fts5.ts",
|
"prebuild:fts5": "npx tsx scripts/prebuild-fts5.ts",
|
||||||
"test:templates": "node dist/scripts/test-templates.js",
|
"test:templates": "node dist/scripts/test-templates.js",
|
||||||
@@ -128,16 +140,26 @@
|
|||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@modelcontextprotocol/sdk": "^1.13.2",
|
"@modelcontextprotocol/sdk": "^1.13.2",
|
||||||
"@n8n/n8n-nodes-langchain": "^1.105.3",
|
"@n8n/n8n-nodes-langchain": "^1.113.1",
|
||||||
|
"@supabase/supabase-js": "^2.57.4",
|
||||||
"dotenv": "^16.5.0",
|
"dotenv": "^16.5.0",
|
||||||
"express": "^5.1.0",
|
"express": "^5.1.0",
|
||||||
"n8n": "^1.106.3",
|
"express-rate-limit": "^7.1.5",
|
||||||
"n8n-core": "^1.105.3",
|
"lru-cache": "^11.2.1",
|
||||||
"n8n-workflow": "^1.103.3",
|
"n8n": "^1.114.3",
|
||||||
|
"n8n-core": "^1.113.1",
|
||||||
|
"n8n-workflow": "^1.111.0",
|
||||||
|
"openai": "^4.77.0",
|
||||||
"sql.js": "^1.13.0",
|
"sql.js": "^1.13.0",
|
||||||
"uuid": "^10.0.0"
|
"uuid": "^10.0.0",
|
||||||
|
"zod": "^3.24.1"
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
|
"@rollup/rollup-darwin-arm64": "^4.50.0",
|
||||||
|
"@rollup/rollup-linux-x64-gnu": "^4.50.0",
|
||||||
"better-sqlite3": "^11.10.0"
|
"better-sqlite3": "^11.10.0"
|
||||||
|
},
|
||||||
|
"overrides": {
|
||||||
|
"pyodide": "0.26.4"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,24 @@
|
|||||||
{
|
{
|
||||||
"name": "n8n-mcp-runtime",
|
"name": "n8n-mcp-runtime",
|
||||||
"version": "2.10.1",
|
"version": "2.19.5",
|
||||||
"description": "n8n MCP Server Runtime Dependencies Only",
|
"description": "n8n MCP Server Runtime Dependencies Only",
|
||||||
"private": true,
|
"private": true,
|
||||||
|
"main": "dist/index.js",
|
||||||
|
"types": "dist/index.d.ts",
|
||||||
|
"exports": {
|
||||||
|
".": {
|
||||||
|
"types": "./dist/index.d.ts",
|
||||||
|
"require": "./dist/index.js",
|
||||||
|
"import": "./dist/index.js"
|
||||||
|
}
|
||||||
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@modelcontextprotocol/sdk": "^1.13.2",
|
"@modelcontextprotocol/sdk": "^1.13.2",
|
||||||
|
"@supabase/supabase-js": "^2.57.4",
|
||||||
"express": "^5.1.0",
|
"express": "^5.1.0",
|
||||||
|
"express-rate-limit": "^7.1.5",
|
||||||
"dotenv": "^16.5.0",
|
"dotenv": "^16.5.0",
|
||||||
|
"lru-cache": "^11.2.1",
|
||||||
"sql.js": "^1.13.0",
|
"sql.js": "^1.13.0",
|
||||||
"uuid": "^10.0.0",
|
"uuid": "^10.0.0",
|
||||||
"axios": "^1.7.7"
|
"axios": "^1.7.7"
|
||||||
|
|||||||
@@ -1,60 +0,0 @@
|
|||||||
# n8n-MCP v2.7.0 Release Notes
|
|
||||||
|
|
||||||
## 🎉 What's New
|
|
||||||
|
|
||||||
### 🔧 File Refactoring & Version Management
|
|
||||||
- **Renamed core MCP files** to remove unnecessary suffixes for cleaner codebase:
|
|
||||||
- `tools-update.ts` → `tools.ts`
|
|
||||||
- `server-update.ts` → `server.ts`
|
|
||||||
- `http-server-fixed.ts` → `http-server.ts`
|
|
||||||
- **Fixed version management** - Now reads from package.json as single source of truth (fixes #5)
|
|
||||||
- **Updated imports** across 21+ files to use the new file names
|
|
||||||
|
|
||||||
### 🔍 New Diagnostic Tool
|
|
||||||
- **Added `n8n_diagnostic` tool** - Helps troubleshoot why n8n management tools might not be appearing
|
|
||||||
- Shows environment variable status, API connectivity, and tool availability
|
|
||||||
- Provides step-by-step troubleshooting guidance
|
|
||||||
- Includes verbose mode for additional debug information
|
|
||||||
|
|
||||||
### 🧹 Code Cleanup
|
|
||||||
- Removed legacy HTTP server implementation with known issues
|
|
||||||
- Removed unused legacy API client
|
|
||||||
- Added version utility for consistent version handling
|
|
||||||
- Added script to sync runtime package version
|
|
||||||
|
|
||||||
## 📦 Installation
|
|
||||||
|
|
||||||
### Docker (Recommended)
|
|
||||||
```bash
|
|
||||||
docker pull ghcr.io/czlonkowski/n8n-mcp:2.7.0
|
|
||||||
```
|
|
||||||
|
|
||||||
### Claude Desktop
|
|
||||||
Update your configuration to use the latest version:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"mcpServers": {
|
|
||||||
"n8n-mcp": {
|
|
||||||
"command": "docker",
|
|
||||||
"args": ["run", "-i", "--rm", "ghcr.io/czlonkowski/n8n-mcp:2.7.0"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🐛 Bug Fixes
|
|
||||||
- Fixed version mismatch where version was hardcoded as 2.4.1 instead of reading from package.json
|
|
||||||
- Improved error messages for better debugging
|
|
||||||
|
|
||||||
## 📚 Documentation Updates
|
|
||||||
- Condensed version history in CLAUDE.md
|
|
||||||
- Updated documentation structure in README.md
|
|
||||||
- Removed outdated documentation files
|
|
||||||
- Added n8n_diagnostic tool to documentation
|
|
||||||
|
|
||||||
## 🙏 Acknowledgments
|
|
||||||
Thanks to all contributors and users who reported issues!
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**Full Changelog**: https://github.com/czlonkowski/n8n-mcp/blob/main/CHANGELOG.md
|
|
||||||
78
scripts/audit-schema-coverage.ts
Normal file
78
scripts/audit-schema-coverage.ts
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
/**
|
||||||
|
* Database Schema Coverage Audit Script
|
||||||
|
*
|
||||||
|
* Audits the database to determine how many nodes have complete schema information
|
||||||
|
* for resourceLocator mode validation. This helps assess the coverage of our
|
||||||
|
* schema-driven validation approach.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import Database from 'better-sqlite3';
|
||||||
|
import path from 'path';
|
||||||
|
|
||||||
|
const dbPath = path.join(__dirname, '../data/nodes.db');
|
||||||
|
const db = new Database(dbPath, { readonly: true });
|
||||||
|
|
||||||
|
console.log('=== Schema Coverage Audit ===\n');
|
||||||
|
|
||||||
|
// Query 1: How many nodes have resourceLocator properties?
|
||||||
|
const totalResourceLocator = db.prepare(`
|
||||||
|
SELECT COUNT(*) as count FROM nodes
|
||||||
|
WHERE properties_schema LIKE '%resourceLocator%'
|
||||||
|
`).get() as { count: number };
|
||||||
|
|
||||||
|
console.log(`Nodes with resourceLocator properties: ${totalResourceLocator.count}`);
|
||||||
|
|
||||||
|
// Query 2: Of those, how many have modes defined?
|
||||||
|
const withModes = db.prepare(`
|
||||||
|
SELECT COUNT(*) as count FROM nodes
|
||||||
|
WHERE properties_schema LIKE '%resourceLocator%'
|
||||||
|
AND properties_schema LIKE '%modes%'
|
||||||
|
`).get() as { count: number };
|
||||||
|
|
||||||
|
console.log(`Nodes with modes defined: ${withModes.count}`);
|
||||||
|
|
||||||
|
// Query 3: Which nodes have resourceLocator but NO modes?
|
||||||
|
const withoutModes = db.prepare(`
|
||||||
|
SELECT node_type, display_name
|
||||||
|
FROM nodes
|
||||||
|
WHERE properties_schema LIKE '%resourceLocator%'
|
||||||
|
AND properties_schema NOT LIKE '%modes%'
|
||||||
|
LIMIT 10
|
||||||
|
`).all() as Array<{ node_type: string; display_name: string }>;
|
||||||
|
|
||||||
|
console.log(`\nSample nodes WITHOUT modes (showing 10):`);
|
||||||
|
withoutModes.forEach(node => {
|
||||||
|
console.log(` - ${node.display_name} (${node.node_type})`);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Calculate coverage percentage
|
||||||
|
const coverage = totalResourceLocator.count > 0
|
||||||
|
? (withModes.count / totalResourceLocator.count) * 100
|
||||||
|
: 0;
|
||||||
|
|
||||||
|
console.log(`\nSchema coverage: ${coverage.toFixed(1)}% of resourceLocator nodes have modes defined`);
|
||||||
|
|
||||||
|
// Query 4: Get some examples of nodes WITH modes for verification
|
||||||
|
console.log('\nSample nodes WITH modes (showing 5):');
|
||||||
|
const withModesExamples = db.prepare(`
|
||||||
|
SELECT node_type, display_name
|
||||||
|
FROM nodes
|
||||||
|
WHERE properties_schema LIKE '%resourceLocator%'
|
||||||
|
AND properties_schema LIKE '%modes%'
|
||||||
|
LIMIT 5
|
||||||
|
`).all() as Array<{ node_type: string; display_name: string }>;
|
||||||
|
|
||||||
|
withModesExamples.forEach(node => {
|
||||||
|
console.log(` - ${node.display_name} (${node.node_type})`);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Summary
|
||||||
|
console.log('\n=== Summary ===');
|
||||||
|
console.log(`Total nodes in database: ${db.prepare('SELECT COUNT(*) as count FROM nodes').get() as any as { count: number }.count}`);
|
||||||
|
console.log(`Nodes with resourceLocator: ${totalResourceLocator.count}`);
|
||||||
|
console.log(`Nodes with complete mode schemas: ${withModes.count}`);
|
||||||
|
console.log(`Nodes without mode schemas: ${totalResourceLocator.count - withModes.count}`);
|
||||||
|
console.log(`\nImplication: Schema-driven validation will apply to ${withModes.count} nodes.`);
|
||||||
|
console.log(`For the remaining ${totalResourceLocator.count - withModes.count} nodes, validation will be skipped (graceful degradation).`);
|
||||||
|
|
||||||
|
db.close();
|
||||||
41
scripts/export-webhook-workflows.ts
Normal file
41
scripts/export-webhook-workflows.ts
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
#!/usr/bin/env tsx
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Export Webhook Workflow JSONs
|
||||||
|
*
|
||||||
|
* Generates the 4 webhook workflow JSON files needed for integration testing.
|
||||||
|
* These workflows must be imported into n8n and activated manually.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { writeFileSync, mkdirSync } from 'fs';
|
||||||
|
import { join } from 'path';
|
||||||
|
import { exportAllWebhookWorkflows } from '../tests/integration/n8n-api/utils/webhook-workflows';
|
||||||
|
|
||||||
|
const OUTPUT_DIR = join(process.cwd(), 'workflows-for-import');
|
||||||
|
|
||||||
|
// Create output directory
|
||||||
|
mkdirSync(OUTPUT_DIR, { recursive: true });
|
||||||
|
|
||||||
|
// Generate all workflow JSONs
|
||||||
|
const workflows = exportAllWebhookWorkflows();
|
||||||
|
|
||||||
|
// Write each workflow to a separate file
|
||||||
|
Object.entries(workflows).forEach(([method, workflow]) => {
|
||||||
|
const filename = `webhook-${method.toLowerCase()}.json`;
|
||||||
|
const filepath = join(OUTPUT_DIR, filename);
|
||||||
|
|
||||||
|
writeFileSync(filepath, JSON.stringify(workflow, null, 2), 'utf-8');
|
||||||
|
|
||||||
|
console.log(`✓ Generated: ${filename}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`\n✓ All workflow JSONs written to: ${OUTPUT_DIR}`);
|
||||||
|
console.log('\nNext steps:');
|
||||||
|
console.log('1. Import each JSON file into your n8n instance');
|
||||||
|
console.log('2. Activate each workflow in the n8n UI');
|
||||||
|
console.log('3. Copy the webhook URLs from each workflow (open workflow → Webhook node → copy URL)');
|
||||||
|
console.log('4. Add them to your .env file:');
|
||||||
|
console.log(' N8N_TEST_WEBHOOK_GET_URL=https://your-n8n.com/webhook/mcp-test-get');
|
||||||
|
console.log(' N8N_TEST_WEBHOOK_POST_URL=https://your-n8n.com/webhook/mcp-test-post');
|
||||||
|
console.log(' N8N_TEST_WEBHOOK_PUT_URL=https://your-n8n.com/webhook/mcp-test-put');
|
||||||
|
console.log(' N8N_TEST_WEBHOOK_DELETE_URL=https://your-n8n.com/webhook/mcp-test-delete');
|
||||||
62
scripts/publish-npm-quick.sh
Executable file
62
scripts/publish-npm-quick.sh
Executable file
@@ -0,0 +1,62 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Quick publish script that skips tests
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Color codes
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
NC='\033[0m'
|
||||||
|
|
||||||
|
echo "🚀 Preparing n8n-mcp for npm publish (quick mode)..."
|
||||||
|
|
||||||
|
# Sync version
|
||||||
|
echo "🔄 Syncing version to package.runtime.json..."
|
||||||
|
npm run sync:runtime-version
|
||||||
|
|
||||||
|
VERSION=$(node -e "console.log(require('./package.json').version)")
|
||||||
|
echo -e "${GREEN}📌 Version: $VERSION${NC}"
|
||||||
|
|
||||||
|
# Prepare publish directory
|
||||||
|
PUBLISH_DIR="npm-publish-temp"
|
||||||
|
rm -rf $PUBLISH_DIR
|
||||||
|
mkdir -p $PUBLISH_DIR
|
||||||
|
|
||||||
|
echo "📦 Copying files..."
|
||||||
|
cp -r dist $PUBLISH_DIR/
|
||||||
|
cp -r data $PUBLISH_DIR/
|
||||||
|
cp README.md LICENSE .env.example $PUBLISH_DIR/
|
||||||
|
cp .npmignore $PUBLISH_DIR/ 2>/dev/null || true
|
||||||
|
cp package.runtime.json $PUBLISH_DIR/package.json
|
||||||
|
|
||||||
|
cd $PUBLISH_DIR
|
||||||
|
|
||||||
|
# Configure package.json
|
||||||
|
node -e "
|
||||||
|
const pkg = require('./package.json');
|
||||||
|
pkg.name = 'n8n-mcp';
|
||||||
|
pkg.description = 'Integration between n8n workflow automation and Model Context Protocol (MCP)';
|
||||||
|
pkg.bin = { 'n8n-mcp': './dist/mcp/index.js' };
|
||||||
|
pkg.repository = { type: 'git', url: 'git+https://github.com/czlonkowski/n8n-mcp.git' };
|
||||||
|
pkg.keywords = ['n8n', 'mcp', 'model-context-protocol', 'ai', 'workflow', 'automation'];
|
||||||
|
pkg.author = 'Romuald Czlonkowski @ www.aiadvisors.pl/en';
|
||||||
|
pkg.license = 'MIT';
|
||||||
|
pkg.bugs = { url: 'https://github.com/czlonkowski/n8n-mcp/issues' };
|
||||||
|
pkg.homepage = 'https://github.com/czlonkowski/n8n-mcp#readme';
|
||||||
|
pkg.files = ['dist/**/*', 'data/nodes.db', '.env.example', 'README.md', 'LICENSE'];
|
||||||
|
delete pkg.private;
|
||||||
|
require('fs').writeFileSync('./package.json', JSON.stringify(pkg, null, 2));
|
||||||
|
"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "📋 Package details:"
|
||||||
|
echo -e "${GREEN}Name:${NC} $(node -e "console.log(require('./package.json').name)")"
|
||||||
|
echo -e "${GREEN}Version:${NC} $(node -e "console.log(require('./package.json').version)")"
|
||||||
|
echo -e "${GREEN}Size:${NC} ~50MB"
|
||||||
|
echo ""
|
||||||
|
echo "✅ Ready to publish!"
|
||||||
|
echo ""
|
||||||
|
echo -e "${YELLOW}⚠️ Note: Tests were skipped in quick mode${NC}"
|
||||||
|
echo ""
|
||||||
|
echo "To publish, run:"
|
||||||
|
echo -e " ${GREEN}cd $PUBLISH_DIR${NC}"
|
||||||
|
echo -e " ${GREEN}npm publish --otp=YOUR_OTP_CODE${NC}"
|
||||||
@@ -11,14 +11,8 @@ NC='\033[0m' # No Color
|
|||||||
|
|
||||||
echo "🚀 Preparing n8n-mcp for npm publish..."
|
echo "🚀 Preparing n8n-mcp for npm publish..."
|
||||||
|
|
||||||
# Run tests first to ensure quality
|
# Skip tests - they already run in CI before merge/publish
|
||||||
echo "🧪 Running tests..."
|
echo "⏭️ Skipping tests (already verified in CI)"
|
||||||
npm test
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo -e "${RED}❌ Tests failed. Aborting publish.${NC}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo -e "${GREEN}✅ All tests passed!${NC}"
|
|
||||||
|
|
||||||
# Sync version to runtime package first
|
# Sync version to runtime package first
|
||||||
echo "🔄 Syncing version to package.runtime.json..."
|
echo "🔄 Syncing version to package.runtime.json..."
|
||||||
@@ -65,6 +59,15 @@ node -e "
|
|||||||
const pkg = require('./package.json');
|
const pkg = require('./package.json');
|
||||||
pkg.name = 'n8n-mcp';
|
pkg.name = 'n8n-mcp';
|
||||||
pkg.description = 'Integration between n8n workflow automation and Model Context Protocol (MCP)';
|
pkg.description = 'Integration between n8n workflow automation and Model Context Protocol (MCP)';
|
||||||
|
pkg.main = 'dist/index.js';
|
||||||
|
pkg.types = 'dist/index.d.ts';
|
||||||
|
pkg.exports = {
|
||||||
|
'.': {
|
||||||
|
types: './dist/index.d.ts',
|
||||||
|
require: './dist/index.js',
|
||||||
|
import: './dist/index.js'
|
||||||
|
}
|
||||||
|
};
|
||||||
pkg.bin = { 'n8n-mcp': './dist/mcp/index.js' };
|
pkg.bin = { 'n8n-mcp': './dist/mcp/index.js' };
|
||||||
pkg.repository = { type: 'git', url: 'git+https://github.com/czlonkowski/n8n-mcp.git' };
|
pkg.repository = { type: 'git', url: 'git+https://github.com/czlonkowski/n8n-mcp.git' };
|
||||||
pkg.keywords = ['n8n', 'mcp', 'model-context-protocol', 'ai', 'workflow', 'automation'];
|
pkg.keywords = ['n8n', 'mcp', 'model-context-protocol', 'ai', 'workflow', 'automation'];
|
||||||
|
|||||||
189
scripts/test-ai-validation-debug.ts
Normal file
189
scripts/test-ai-validation-debug.ts
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
#!/usr/bin/env node
|
||||||
|
/**
|
||||||
|
* Debug test for AI validation issues
|
||||||
|
* Reproduces the bugs found by n8n-mcp-tester
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { validateAISpecificNodes, buildReverseConnectionMap } from '../src/services/ai-node-validator';
|
||||||
|
import type { WorkflowJson } from '../src/services/ai-tool-validators';
|
||||||
|
import { NodeTypeNormalizer } from '../src/utils/node-type-normalizer';
|
||||||
|
|
||||||
|
console.log('=== AI Validation Debug Tests ===\n');
|
||||||
|
|
||||||
|
// Test 1: AI Agent with NO language model connection
|
||||||
|
console.log('Test 1: Missing Language Model Detection');
|
||||||
|
const workflow1: WorkflowJson = {
|
||||||
|
name: 'Test Missing LM',
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: 'ai-agent-1',
|
||||||
|
name: 'AI Agent',
|
||||||
|
type: '@n8n/n8n-nodes-langchain.agent',
|
||||||
|
position: [500, 300],
|
||||||
|
parameters: {
|
||||||
|
promptType: 'define',
|
||||||
|
text: 'You are a helpful assistant'
|
||||||
|
},
|
||||||
|
typeVersion: 1.7
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {
|
||||||
|
// NO connections - AI Agent is isolated
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
console.log('Workflow:', JSON.stringify(workflow1, null, 2));
|
||||||
|
|
||||||
|
const reverseMap1 = buildReverseConnectionMap(workflow1);
|
||||||
|
console.log('\nReverse connection map for AI Agent:');
|
||||||
|
console.log('Entries:', Array.from(reverseMap1.entries()));
|
||||||
|
console.log('AI Agent connections:', reverseMap1.get('AI Agent'));
|
||||||
|
|
||||||
|
// Check node normalization
|
||||||
|
const normalizedType1 = NodeTypeNormalizer.normalizeToFullForm(workflow1.nodes[0].type);
|
||||||
|
console.log(`\nNode type: ${workflow1.nodes[0].type}`);
|
||||||
|
console.log(`Normalized type: ${normalizedType1}`);
|
||||||
|
console.log(`Match check: ${normalizedType1 === '@n8n/n8n-nodes-langchain.agent'}`);
|
||||||
|
|
||||||
|
const issues1 = validateAISpecificNodes(workflow1);
|
||||||
|
console.log('\nValidation issues:');
|
||||||
|
console.log(JSON.stringify(issues1, null, 2));
|
||||||
|
|
||||||
|
const hasMissingLMError = issues1.some(
|
||||||
|
i => i.severity === 'error' && i.code === 'MISSING_LANGUAGE_MODEL'
|
||||||
|
);
|
||||||
|
console.log(`\n✓ Has MISSING_LANGUAGE_MODEL error: ${hasMissingLMError}`);
|
||||||
|
console.log(`✗ Expected: true, Got: ${hasMissingLMError}`);
|
||||||
|
|
||||||
|
// Test 2: AI Agent WITH language model connection
|
||||||
|
console.log('\n\n' + '='.repeat(60));
|
||||||
|
console.log('Test 2: AI Agent WITH Language Model (Should be valid)');
|
||||||
|
const workflow2: WorkflowJson = {
|
||||||
|
name: 'Test With LM',
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: 'openai-1',
|
||||||
|
name: 'OpenAI Chat Model',
|
||||||
|
type: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
|
||||||
|
position: [200, 300],
|
||||||
|
parameters: {
|
||||||
|
modelName: 'gpt-4'
|
||||||
|
},
|
||||||
|
typeVersion: 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'ai-agent-1',
|
||||||
|
name: 'AI Agent',
|
||||||
|
type: '@n8n/n8n-nodes-langchain.agent',
|
||||||
|
position: [500, 300],
|
||||||
|
parameters: {
|
||||||
|
promptType: 'define',
|
||||||
|
text: 'You are a helpful assistant'
|
||||||
|
},
|
||||||
|
typeVersion: 1.7
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {
|
||||||
|
'OpenAI Chat Model': {
|
||||||
|
ai_languageModel: [
|
||||||
|
[
|
||||||
|
{
|
||||||
|
node: 'AI Agent',
|
||||||
|
type: 'ai_languageModel',
|
||||||
|
index: 0
|
||||||
|
}
|
||||||
|
]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
console.log('\nConnections:', JSON.stringify(workflow2.connections, null, 2));
|
||||||
|
|
||||||
|
const reverseMap2 = buildReverseConnectionMap(workflow2);
|
||||||
|
console.log('\nReverse connection map for AI Agent:');
|
||||||
|
console.log('AI Agent connections:', reverseMap2.get('AI Agent'));
|
||||||
|
|
||||||
|
const issues2 = validateAISpecificNodes(workflow2);
|
||||||
|
console.log('\nValidation issues:');
|
||||||
|
console.log(JSON.stringify(issues2, null, 2));
|
||||||
|
|
||||||
|
const hasMissingLMError2 = issues2.some(
|
||||||
|
i => i.severity === 'error' && i.code === 'MISSING_LANGUAGE_MODEL'
|
||||||
|
);
|
||||||
|
console.log(`\n✓ Should NOT have MISSING_LANGUAGE_MODEL error: ${!hasMissingLMError2}`);
|
||||||
|
console.log(`Expected: false, Got: ${hasMissingLMError2}`);
|
||||||
|
|
||||||
|
// Test 3: AI Agent with tools but no language model
|
||||||
|
console.log('\n\n' + '='.repeat(60));
|
||||||
|
console.log('Test 3: AI Agent with Tools but NO Language Model');
|
||||||
|
const workflow3: WorkflowJson = {
|
||||||
|
name: 'Test Tools No LM',
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: 'http-tool-1',
|
||||||
|
name: 'HTTP Request Tool',
|
||||||
|
type: '@n8n/n8n-nodes-langchain.toolHttpRequest',
|
||||||
|
position: [200, 300],
|
||||||
|
parameters: {
|
||||||
|
toolDescription: 'Calls an API',
|
||||||
|
url: 'https://api.example.com'
|
||||||
|
},
|
||||||
|
typeVersion: 1.1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'ai-agent-1',
|
||||||
|
name: 'AI Agent',
|
||||||
|
type: '@n8n/n8n-nodes-langchain.agent',
|
||||||
|
position: [500, 300],
|
||||||
|
parameters: {
|
||||||
|
promptType: 'define',
|
||||||
|
text: 'You are a helpful assistant'
|
||||||
|
},
|
||||||
|
typeVersion: 1.7
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {
|
||||||
|
'HTTP Request Tool': {
|
||||||
|
ai_tool: [
|
||||||
|
[
|
||||||
|
{
|
||||||
|
node: 'AI Agent',
|
||||||
|
type: 'ai_tool',
|
||||||
|
index: 0
|
||||||
|
}
|
||||||
|
]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
console.log('\nConnections:', JSON.stringify(workflow3.connections, null, 2));
|
||||||
|
|
||||||
|
const reverseMap3 = buildReverseConnectionMap(workflow3);
|
||||||
|
console.log('\nReverse connection map for AI Agent:');
|
||||||
|
const aiAgentConns = reverseMap3.get('AI Agent');
|
||||||
|
console.log('AI Agent connections:', aiAgentConns);
|
||||||
|
console.log('Connection types:', aiAgentConns?.map(c => c.type));
|
||||||
|
|
||||||
|
const issues3 = validateAISpecificNodes(workflow3);
|
||||||
|
console.log('\nValidation issues:');
|
||||||
|
console.log(JSON.stringify(issues3, null, 2));
|
||||||
|
|
||||||
|
const hasMissingLMError3 = issues3.some(
|
||||||
|
i => i.severity === 'error' && i.code === 'MISSING_LANGUAGE_MODEL'
|
||||||
|
);
|
||||||
|
const hasNoToolsInfo3 = issues3.some(
|
||||||
|
i => i.severity === 'info' && i.message.includes('no ai_tool connections')
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log(`\n✓ Should have MISSING_LANGUAGE_MODEL error: ${hasMissingLMError3}`);
|
||||||
|
console.log(`Expected: true, Got: ${hasMissingLMError3}`);
|
||||||
|
console.log(`✗ Should NOT have "no tools" info: ${!hasNoToolsInfo3}`);
|
||||||
|
console.log(`Expected: false, Got: ${hasNoToolsInfo3}`);
|
||||||
|
|
||||||
|
console.log('\n' + '='.repeat(60));
|
||||||
|
console.log('Summary:');
|
||||||
|
console.log(`Test 1 (No LM): ${hasMissingLMError ? 'PASS ✓' : 'FAIL ✗'}`);
|
||||||
|
console.log(`Test 2 (With LM): ${!hasMissingLMError2 ? 'PASS ✓' : 'FAIL ✗'}`);
|
||||||
|
console.log(`Test 3 (Tools, No LM): ${hasMissingLMError3 && !hasNoToolsInfo3 ? 'PASS ✓' : 'FAIL ✗'}`);
|
||||||
@@ -10,7 +10,7 @@ import { getToolDocumentation } from '../src/mcp/tools-documentation';
|
|||||||
import { ExampleGenerator } from '../src/services/example-generator';
|
import { ExampleGenerator } from '../src/services/example-generator';
|
||||||
import { EnhancedConfigValidator } from '../src/services/enhanced-config-validator';
|
import { EnhancedConfigValidator } from '../src/services/enhanced-config-validator';
|
||||||
|
|
||||||
const dbPath = process.env.NODE_DB_PATH || './nodes.db';
|
const dbPath = process.env.NODE_DB_PATH || './data/nodes.db';
|
||||||
|
|
||||||
async function main() {
|
async function main() {
|
||||||
console.log('🧪 Testing Code Node Documentation Fixes\n');
|
console.log('🧪 Testing Code Node Documentation Fixes\n');
|
||||||
|
|||||||
163
scripts/test-docker-fingerprint.ts
Normal file
163
scripts/test-docker-fingerprint.ts
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
/**
|
||||||
|
* Test Docker Host Fingerprinting
|
||||||
|
* Verifies that host machine characteristics are stable across container recreations
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { existsSync, readFileSync } from 'fs';
|
||||||
|
import { platform, arch } from 'os';
|
||||||
|
import { createHash } from 'crypto';
|
||||||
|
|
||||||
|
console.log('=== Docker Host Fingerprinting Test ===\n');
|
||||||
|
|
||||||
|
function generateHostFingerprint(): string {
|
||||||
|
try {
|
||||||
|
const signals: string[] = [];
|
||||||
|
|
||||||
|
console.log('Collecting host signals...\n');
|
||||||
|
|
||||||
|
// CPU info (stable across container recreations)
|
||||||
|
if (existsSync('/proc/cpuinfo')) {
|
||||||
|
const cpuinfo = readFileSync('/proc/cpuinfo', 'utf-8');
|
||||||
|
const modelMatch = cpuinfo.match(/model name\s*:\s*(.+)/);
|
||||||
|
const coresMatch = cpuinfo.match(/processor\s*:/g);
|
||||||
|
|
||||||
|
if (modelMatch) {
|
||||||
|
const cpuModel = modelMatch[1].trim();
|
||||||
|
signals.push(cpuModel);
|
||||||
|
console.log('✓ CPU Model:', cpuModel);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (coresMatch) {
|
||||||
|
const cores = `cores:${coresMatch.length}`;
|
||||||
|
signals.push(cores);
|
||||||
|
console.log('✓ CPU Cores:', coresMatch.length);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.log('✗ /proc/cpuinfo not available (Windows/Mac Docker)');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Memory (stable)
|
||||||
|
if (existsSync('/proc/meminfo')) {
|
||||||
|
const meminfo = readFileSync('/proc/meminfo', 'utf-8');
|
||||||
|
const totalMatch = meminfo.match(/MemTotal:\s+(\d+)/);
|
||||||
|
|
||||||
|
if (totalMatch) {
|
||||||
|
const memory = `mem:${totalMatch[1]}`;
|
||||||
|
signals.push(memory);
|
||||||
|
console.log('✓ Total Memory:', totalMatch[1], 'kB');
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.log('✗ /proc/meminfo not available (Windows/Mac Docker)');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Docker network subnet
|
||||||
|
const networkInfo = getDockerNetworkInfo();
|
||||||
|
if (networkInfo) {
|
||||||
|
signals.push(networkInfo);
|
||||||
|
console.log('✓ Network Info:', networkInfo);
|
||||||
|
} else {
|
||||||
|
console.log('✗ Network info not available');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Platform basics (stable)
|
||||||
|
signals.push(platform(), arch());
|
||||||
|
console.log('✓ Platform:', platform());
|
||||||
|
console.log('✓ Architecture:', arch());
|
||||||
|
|
||||||
|
// Generate stable ID from all signals
|
||||||
|
console.log('\nCombined signals:', signals.join(' | '));
|
||||||
|
const fingerprint = signals.join('-');
|
||||||
|
const userId = createHash('sha256').update(fingerprint).digest('hex').substring(0, 16);
|
||||||
|
|
||||||
|
return userId;
|
||||||
|
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error generating fingerprint:', error);
|
||||||
|
// Fallback
|
||||||
|
return createHash('sha256')
|
||||||
|
.update(`${platform()}-${arch()}-docker`)
|
||||||
|
.digest('hex')
|
||||||
|
.substring(0, 16);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function getDockerNetworkInfo(): string | null {
|
||||||
|
try {
|
||||||
|
// Read routing table to get bridge network
|
||||||
|
if (existsSync('/proc/net/route')) {
|
||||||
|
const routes = readFileSync('/proc/net/route', 'utf-8');
|
||||||
|
const lines = routes.split('\n');
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
if (line.includes('eth0')) {
|
||||||
|
const parts = line.split(/\s+/);
|
||||||
|
if (parts[2]) {
|
||||||
|
const gateway = parseInt(parts[2], 16).toString(16);
|
||||||
|
return `net:${gateway}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Ignore errors
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test environment detection
|
||||||
|
console.log('\n=== Environment Detection ===\n');
|
||||||
|
|
||||||
|
const isDocker = process.env.IS_DOCKER === 'true';
|
||||||
|
const isCloudEnvironment = !!(
|
||||||
|
process.env.RAILWAY_ENVIRONMENT ||
|
||||||
|
process.env.RENDER ||
|
||||||
|
process.env.FLY_APP_NAME ||
|
||||||
|
process.env.HEROKU_APP_NAME ||
|
||||||
|
process.env.AWS_EXECUTION_ENV ||
|
||||||
|
process.env.KUBERNETES_SERVICE_HOST
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log('IS_DOCKER env:', process.env.IS_DOCKER);
|
||||||
|
console.log('Docker detected:', isDocker);
|
||||||
|
console.log('Cloud environment:', isCloudEnvironment);
|
||||||
|
|
||||||
|
// Generate fingerprints
|
||||||
|
console.log('\n=== Fingerprint Generation ===\n');
|
||||||
|
|
||||||
|
const fingerprint1 = generateHostFingerprint();
|
||||||
|
const fingerprint2 = generateHostFingerprint();
|
||||||
|
const fingerprint3 = generateHostFingerprint();
|
||||||
|
|
||||||
|
console.log('\nFingerprint 1:', fingerprint1);
|
||||||
|
console.log('Fingerprint 2:', fingerprint2);
|
||||||
|
console.log('Fingerprint 3:', fingerprint3);
|
||||||
|
|
||||||
|
const consistent = fingerprint1 === fingerprint2 && fingerprint2 === fingerprint3;
|
||||||
|
console.log('\nConsistent:', consistent ? '✓ YES' : '✗ NO');
|
||||||
|
|
||||||
|
// Test explicit ID override
|
||||||
|
console.log('\n=== Environment Variable Override Test ===\n');
|
||||||
|
|
||||||
|
if (process.env.N8N_MCP_USER_ID) {
|
||||||
|
console.log('Explicit user ID:', process.env.N8N_MCP_USER_ID);
|
||||||
|
console.log('This would override the fingerprint');
|
||||||
|
} else {
|
||||||
|
console.log('No explicit user ID set');
|
||||||
|
console.log('To test: N8N_MCP_USER_ID=my-custom-id npx tsx ' + process.argv[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stability estimate
|
||||||
|
console.log('\n=== Stability Analysis ===\n');
|
||||||
|
|
||||||
|
const hasStableSignals = existsSync('/proc/cpuinfo') || existsSync('/proc/meminfo');
|
||||||
|
if (hasStableSignals) {
|
||||||
|
console.log('✓ Host-based signals available');
|
||||||
|
console.log('✓ Fingerprint should be stable across container recreations');
|
||||||
|
console.log('✓ Different fingerprints on different physical hosts');
|
||||||
|
} else {
|
||||||
|
console.log('⚠️ Limited host signals (Windows/Mac Docker Desktop)');
|
||||||
|
console.log('⚠️ Fingerprint may not be fully stable');
|
||||||
|
console.log('💡 Recommendation: Use N8N_MCP_USER_ID env var for stability');
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('\n');
|
||||||
58
scripts/test-error-message-tracking.ts
Normal file
58
scripts/test-error-message-tracking.ts
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
/**
|
||||||
|
* Test script to verify error message tracking is working
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { telemetry } from '../src/telemetry';
|
||||||
|
|
||||||
|
async function testErrorTracking() {
|
||||||
|
console.log('=== Testing Error Message Tracking ===\n');
|
||||||
|
|
||||||
|
// Track session first
|
||||||
|
console.log('1. Starting session...');
|
||||||
|
telemetry.trackSessionStart();
|
||||||
|
|
||||||
|
// Track an error WITH a message
|
||||||
|
console.log('\n2. Tracking error WITH message:');
|
||||||
|
const testErrorMessage = 'This is a test error message with sensitive data: password=secret123 and test@example.com';
|
||||||
|
telemetry.trackError(
|
||||||
|
'TypeError',
|
||||||
|
'tool_execution',
|
||||||
|
'test_tool',
|
||||||
|
testErrorMessage
|
||||||
|
);
|
||||||
|
console.log(` Original message: "${testErrorMessage}"`);
|
||||||
|
|
||||||
|
// Track an error WITHOUT a message
|
||||||
|
console.log('\n3. Tracking error WITHOUT message:');
|
||||||
|
telemetry.trackError(
|
||||||
|
'Error',
|
||||||
|
'tool_execution',
|
||||||
|
'test_tool2'
|
||||||
|
);
|
||||||
|
|
||||||
|
// Check the event queue
|
||||||
|
const metrics = telemetry.getMetrics();
|
||||||
|
console.log('\n4. Telemetry metrics:');
|
||||||
|
console.log(' Status:', metrics.status);
|
||||||
|
console.log(' Events queued:', metrics.tracking.eventsQueued);
|
||||||
|
|
||||||
|
// Get raw event queue to inspect
|
||||||
|
const eventTracker = (telemetry as any).eventTracker;
|
||||||
|
const queue = eventTracker.getEventQueue();
|
||||||
|
|
||||||
|
console.log('\n5. Event queue contents:');
|
||||||
|
queue.forEach((event, i) => {
|
||||||
|
console.log(`\n Event ${i + 1}:`);
|
||||||
|
console.log(` - Type: ${event.event}`);
|
||||||
|
console.log(` - Properties:`, JSON.stringify(event.properties, null, 6));
|
||||||
|
});
|
||||||
|
|
||||||
|
// Flush to database
|
||||||
|
console.log('\n6. Flushing to database...');
|
||||||
|
await telemetry.flush();
|
||||||
|
|
||||||
|
console.log('\n7. Done! Check Supabase for error events with "error" field.');
|
||||||
|
console.log(' Query: SELECT * FROM telemetry_events WHERE event = \'error_occurred\' ORDER BY created_at DESC LIMIT 5;');
|
||||||
|
}
|
||||||
|
|
||||||
|
testErrorTracking().catch(console.error);
|
||||||
274
scripts/test-error-output-validation.ts
Normal file
274
scripts/test-error-output-validation.ts
Normal file
@@ -0,0 +1,274 @@
|
|||||||
|
#!/usr/bin/env npx tsx
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test script for error output validation improvements
|
||||||
|
* Tests both incorrect and correct error output configurations
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { WorkflowValidator } from '../dist/services/workflow-validator.js';
|
||||||
|
import { NodeRepository } from '../dist/database/node-repository.js';
|
||||||
|
import { EnhancedConfigValidator } from '../dist/services/enhanced-config-validator.js';
|
||||||
|
import { DatabaseAdapter } from '../dist/database/database-adapter.js';
|
||||||
|
import { Logger } from '../dist/utils/logger.js';
|
||||||
|
import path from 'path';
|
||||||
|
import { fileURLToPath } from 'url';
|
||||||
|
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
const __dirname = path.dirname(__filename);
|
||||||
|
|
||||||
|
const logger = new Logger({ prefix: '[TestErrorValidation]' });
|
||||||
|
|
||||||
|
async function runTests() {
|
||||||
|
// Initialize database
|
||||||
|
const dbPath = path.join(__dirname, '..', 'data', 'n8n-nodes.db');
|
||||||
|
const adapter = new DatabaseAdapter();
|
||||||
|
adapter.initialize({
|
||||||
|
type: 'better-sqlite3',
|
||||||
|
filename: dbPath
|
||||||
|
});
|
||||||
|
const db = adapter.getDatabase();
|
||||||
|
|
||||||
|
const nodeRepository = new NodeRepository(db);
|
||||||
|
const validator = new WorkflowValidator(nodeRepository, EnhancedConfigValidator);
|
||||||
|
|
||||||
|
console.log('\n🧪 Testing Error Output Validation Improvements\n');
|
||||||
|
console.log('=' .repeat(60));
|
||||||
|
|
||||||
|
// Test 1: Incorrect configuration - multiple nodes in same array
|
||||||
|
console.log('\n📝 Test 1: INCORRECT - Multiple nodes in main[0]');
|
||||||
|
console.log('-'.repeat(40));
|
||||||
|
|
||||||
|
const incorrectWorkflow = {
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: '132ef0dc-87af-41de-a95d-cabe3a0a5342',
|
||||||
|
name: 'Validate Input',
|
||||||
|
type: 'n8n-nodes-base.set',
|
||||||
|
typeVersion: 3.4,
|
||||||
|
position: [-400, 64] as [number, number],
|
||||||
|
parameters: {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: '5dedf217-63f9-409f-b34e-7780b22e199a',
|
||||||
|
name: 'Filter URLs',
|
||||||
|
type: 'n8n-nodes-base.filter',
|
||||||
|
typeVersion: 2.2,
|
||||||
|
position: [-176, 64] as [number, number],
|
||||||
|
parameters: {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: '9d5407cc-ca5a-4966-b4b7-0e5dfbf54ad3',
|
||||||
|
name: 'Error Response1',
|
||||||
|
type: 'n8n-nodes-base.respondToWebhook',
|
||||||
|
typeVersion: 1.5,
|
||||||
|
position: [-160, 240] as [number, number],
|
||||||
|
parameters: {}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {
|
||||||
|
'Validate Input': {
|
||||||
|
main: [
|
||||||
|
[
|
||||||
|
{ node: 'Filter URLs', type: 'main', index: 0 },
|
||||||
|
{ node: 'Error Response1', type: 'main', index: 0 } // WRONG!
|
||||||
|
]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const result1 = await validator.validateWorkflow(incorrectWorkflow);
|
||||||
|
|
||||||
|
if (result1.errors.length > 0) {
|
||||||
|
console.log('❌ ERROR DETECTED (as expected):');
|
||||||
|
const errorMessage = result1.errors.find(e =>
|
||||||
|
e.message.includes('Incorrect error output configuration')
|
||||||
|
);
|
||||||
|
if (errorMessage) {
|
||||||
|
console.log('\n' + errorMessage.message);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.log('✅ No errors found (but should have detected the issue!)');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 2: Correct configuration - separate arrays
|
||||||
|
console.log('\n📝 Test 2: CORRECT - Separate main[0] and main[1]');
|
||||||
|
console.log('-'.repeat(40));
|
||||||
|
|
||||||
|
const correctWorkflow = {
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: '132ef0dc-87af-41de-a95d-cabe3a0a5342',
|
||||||
|
name: 'Validate Input',
|
||||||
|
type: 'n8n-nodes-base.set',
|
||||||
|
typeVersion: 3.4,
|
||||||
|
position: [-400, 64] as [number, number],
|
||||||
|
parameters: {},
|
||||||
|
onError: 'continueErrorOutput' as const
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: '5dedf217-63f9-409f-b34e-7780b22e199a',
|
||||||
|
name: 'Filter URLs',
|
||||||
|
type: 'n8n-nodes-base.filter',
|
||||||
|
typeVersion: 2.2,
|
||||||
|
position: [-176, 64] as [number, number],
|
||||||
|
parameters: {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: '9d5407cc-ca5a-4966-b4b7-0e5dfbf54ad3',
|
||||||
|
name: 'Error Response1',
|
||||||
|
type: 'n8n-nodes-base.respondToWebhook',
|
||||||
|
typeVersion: 1.5,
|
||||||
|
position: [-160, 240] as [number, number],
|
||||||
|
parameters: {}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {
|
||||||
|
'Validate Input': {
|
||||||
|
main: [
|
||||||
|
[
|
||||||
|
{ node: 'Filter URLs', type: 'main', index: 0 }
|
||||||
|
],
|
||||||
|
[
|
||||||
|
{ node: 'Error Response1', type: 'main', index: 0 } // CORRECT!
|
||||||
|
]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const result2 = await validator.validateWorkflow(correctWorkflow);
|
||||||
|
|
||||||
|
const hasIncorrectError = result2.errors.some(e =>
|
||||||
|
e.message.includes('Incorrect error output configuration')
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!hasIncorrectError) {
|
||||||
|
console.log('✅ No error output configuration issues (correct!)');
|
||||||
|
} else {
|
||||||
|
console.log('❌ Unexpected error found');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 3: onError without error connections
|
||||||
|
console.log('\n📝 Test 3: onError without error connections');
|
||||||
|
console.log('-'.repeat(40));
|
||||||
|
|
||||||
|
const mismatchWorkflow = {
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: '1',
|
||||||
|
name: 'HTTP Request',
|
||||||
|
type: 'n8n-nodes-base.httpRequest',
|
||||||
|
typeVersion: 4,
|
||||||
|
position: [100, 100] as [number, number],
|
||||||
|
parameters: {},
|
||||||
|
onError: 'continueErrorOutput' as const
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: '2',
|
||||||
|
name: 'Process Data',
|
||||||
|
type: 'n8n-nodes-base.set',
|
||||||
|
typeVersion: 2,
|
||||||
|
position: [300, 100] as [number, number],
|
||||||
|
parameters: {}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {
|
||||||
|
'HTTP Request': {
|
||||||
|
main: [
|
||||||
|
[
|
||||||
|
{ node: 'Process Data', type: 'main', index: 0 }
|
||||||
|
]
|
||||||
|
// No main[1] for error output
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const result3 = await validator.validateWorkflow(mismatchWorkflow);
|
||||||
|
|
||||||
|
const mismatchError = result3.errors.find(e =>
|
||||||
|
e.message.includes("has onError: 'continueErrorOutput' but no error output connections")
|
||||||
|
);
|
||||||
|
|
||||||
|
if (mismatchError) {
|
||||||
|
console.log('❌ ERROR DETECTED (as expected):');
|
||||||
|
console.log(`Node: ${mismatchError.nodeName}`);
|
||||||
|
console.log(`Message: ${mismatchError.message}`);
|
||||||
|
} else {
|
||||||
|
console.log('✅ No mismatch detected (but should have!)');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 4: Error connections without onError
|
||||||
|
console.log('\n📝 Test 4: Error connections without onError property');
|
||||||
|
console.log('-'.repeat(40));
|
||||||
|
|
||||||
|
const missingOnErrorWorkflow = {
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: '1',
|
||||||
|
name: 'HTTP Request',
|
||||||
|
type: 'n8n-nodes-base.httpRequest',
|
||||||
|
typeVersion: 4,
|
||||||
|
position: [100, 100] as [number, number],
|
||||||
|
parameters: {}
|
||||||
|
// Missing onError property
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: '2',
|
||||||
|
name: 'Process Data',
|
||||||
|
type: 'n8n-nodes-base.set',
|
||||||
|
position: [300, 100] as [number, number],
|
||||||
|
parameters: {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: '3',
|
||||||
|
name: 'Error Handler',
|
||||||
|
type: 'n8n-nodes-base.set',
|
||||||
|
position: [300, 300] as [number, number],
|
||||||
|
parameters: {}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {
|
||||||
|
'HTTP Request': {
|
||||||
|
main: [
|
||||||
|
[
|
||||||
|
{ node: 'Process Data', type: 'main', index: 0 }
|
||||||
|
],
|
||||||
|
[
|
||||||
|
{ node: 'Error Handler', type: 'main', index: 0 }
|
||||||
|
]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const result4 = await validator.validateWorkflow(missingOnErrorWorkflow);
|
||||||
|
|
||||||
|
const missingOnErrorWarning = result4.warnings.find(w =>
|
||||||
|
w.message.includes('error output connections in main[1] but missing onError')
|
||||||
|
);
|
||||||
|
|
||||||
|
if (missingOnErrorWarning) {
|
||||||
|
console.log('⚠️ WARNING DETECTED (as expected):');
|
||||||
|
console.log(`Node: ${missingOnErrorWarning.nodeName}`);
|
||||||
|
console.log(`Message: ${missingOnErrorWarning.message}`);
|
||||||
|
} else {
|
||||||
|
console.log('✅ No warning (but should have warned!)');
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('\n' + '='.repeat(60));
|
||||||
|
console.log('\n📊 Summary:');
|
||||||
|
console.log('- Error output validation is working correctly');
|
||||||
|
console.log('- Detects incorrect configurations (multiple nodes in main[0])');
|
||||||
|
console.log('- Validates onError property matches connections');
|
||||||
|
console.log('- Provides clear error messages with fix examples');
|
||||||
|
|
||||||
|
// Close database
|
||||||
|
adapter.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
runTests().catch(error => {
|
||||||
|
console.error('Test failed:', error);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
158
scripts/test-error-validation.js
Normal file
158
scripts/test-error-validation.js
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
#!/usr/bin/env node
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test script for error output validation improvements
|
||||||
|
*/
|
||||||
|
|
||||||
|
const { WorkflowValidator } = require('../dist/services/workflow-validator.js');
|
||||||
|
const { NodeRepository } = require('../dist/database/node-repository.js');
|
||||||
|
const { EnhancedConfigValidator } = require('../dist/services/enhanced-config-validator.js');
|
||||||
|
const Database = require('better-sqlite3');
|
||||||
|
const path = require('path');
|
||||||
|
|
||||||
|
async function runTests() {
|
||||||
|
// Initialize database
|
||||||
|
const dbPath = path.join(__dirname, '..', 'data', 'nodes.db');
|
||||||
|
const db = new Database(dbPath, { readonly: true });
|
||||||
|
|
||||||
|
const nodeRepository = new NodeRepository(db);
|
||||||
|
const validator = new WorkflowValidator(nodeRepository, EnhancedConfigValidator);
|
||||||
|
|
||||||
|
console.log('\n🧪 Testing Error Output Validation Improvements\n');
|
||||||
|
console.log('=' .repeat(60));
|
||||||
|
|
||||||
|
// Test 1: Incorrect configuration - multiple nodes in same array
|
||||||
|
console.log('\n📝 Test 1: INCORRECT - Multiple nodes in main[0]');
|
||||||
|
console.log('-'.repeat(40));
|
||||||
|
|
||||||
|
const incorrectWorkflow = {
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: '132ef0dc-87af-41de-a95d-cabe3a0a5342',
|
||||||
|
name: 'Validate Input',
|
||||||
|
type: 'n8n-nodes-base.set',
|
||||||
|
typeVersion: 3.4,
|
||||||
|
position: [-400, 64],
|
||||||
|
parameters: {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: '5dedf217-63f9-409f-b34e-7780b22e199a',
|
||||||
|
name: 'Filter URLs',
|
||||||
|
type: 'n8n-nodes-base.filter',
|
||||||
|
typeVersion: 2.2,
|
||||||
|
position: [-176, 64],
|
||||||
|
parameters: {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: '9d5407cc-ca5a-4966-b4b7-0e5dfbf54ad3',
|
||||||
|
name: 'Error Response1',
|
||||||
|
type: 'n8n-nodes-base.respondToWebhook',
|
||||||
|
typeVersion: 1.5,
|
||||||
|
position: [-160, 240],
|
||||||
|
parameters: {}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {
|
||||||
|
'Validate Input': {
|
||||||
|
main: [
|
||||||
|
[
|
||||||
|
{ node: 'Filter URLs', type: 'main', index: 0 },
|
||||||
|
{ node: 'Error Response1', type: 'main', index: 0 } // WRONG!
|
||||||
|
]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const result1 = await validator.validateWorkflow(incorrectWorkflow);
|
||||||
|
|
||||||
|
if (result1.errors.length > 0) {
|
||||||
|
console.log('❌ ERROR DETECTED (as expected):');
|
||||||
|
const errorMessage = result1.errors.find(e =>
|
||||||
|
e.message.includes('Incorrect error output configuration')
|
||||||
|
);
|
||||||
|
if (errorMessage) {
|
||||||
|
console.log('\nError Summary:');
|
||||||
|
console.log(`Node: ${errorMessage.nodeName || 'Validate Input'}`);
|
||||||
|
console.log('\nFull Error Message:');
|
||||||
|
console.log(errorMessage.message);
|
||||||
|
} else {
|
||||||
|
console.log('Other errors found:', result1.errors.map(e => e.message));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.log('⚠️ No errors found - validation may not be working correctly');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 2: Correct configuration - separate arrays
|
||||||
|
console.log('\n📝 Test 2: CORRECT - Separate main[0] and main[1]');
|
||||||
|
console.log('-'.repeat(40));
|
||||||
|
|
||||||
|
const correctWorkflow = {
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: '132ef0dc-87af-41de-a95d-cabe3a0a5342',
|
||||||
|
name: 'Validate Input',
|
||||||
|
type: 'n8n-nodes-base.set',
|
||||||
|
typeVersion: 3.4,
|
||||||
|
position: [-400, 64],
|
||||||
|
parameters: {},
|
||||||
|
onError: 'continueErrorOutput'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: '5dedf217-63f9-409f-b34e-7780b22e199a',
|
||||||
|
name: 'Filter URLs',
|
||||||
|
type: 'n8n-nodes-base.filter',
|
||||||
|
typeVersion: 2.2,
|
||||||
|
position: [-176, 64],
|
||||||
|
parameters: {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: '9d5407cc-ca5a-4966-b4b7-0e5dfbf54ad3',
|
||||||
|
name: 'Error Response1',
|
||||||
|
type: 'n8n-nodes-base.respondToWebhook',
|
||||||
|
typeVersion: 1.5,
|
||||||
|
position: [-160, 240],
|
||||||
|
parameters: {}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {
|
||||||
|
'Validate Input': {
|
||||||
|
main: [
|
||||||
|
[
|
||||||
|
{ node: 'Filter URLs', type: 'main', index: 0 }
|
||||||
|
],
|
||||||
|
[
|
||||||
|
{ node: 'Error Response1', type: 'main', index: 0 } // CORRECT!
|
||||||
|
]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const result2 = await validator.validateWorkflow(correctWorkflow);
|
||||||
|
|
||||||
|
const hasIncorrectError = result2.errors.some(e =>
|
||||||
|
e.message.includes('Incorrect error output configuration')
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!hasIncorrectError) {
|
||||||
|
console.log('✅ No error output configuration issues (correct!)');
|
||||||
|
} else {
|
||||||
|
console.log('❌ Unexpected error found');
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('\n' + '='.repeat(60));
|
||||||
|
console.log('\n✨ Error output validation is working correctly!');
|
||||||
|
console.log('The validator now properly detects:');
|
||||||
|
console.log(' 1. Multiple nodes incorrectly placed in main[0]');
|
||||||
|
console.log(' 2. Provides clear JSON examples for fixing issues');
|
||||||
|
console.log(' 3. Validates onError property matches connections');
|
||||||
|
|
||||||
|
// Close database
|
||||||
|
db.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
runTests().catch(error => {
|
||||||
|
console.error('Test failed:', error);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
230
scripts/test-expression-format-validation.js
Normal file
230
scripts/test-expression-format-validation.js
Normal file
@@ -0,0 +1,230 @@
|
|||||||
|
#!/usr/bin/env node
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test script for expression format validation
|
||||||
|
* Tests the validation of expression prefixes and resource locator formats
|
||||||
|
*/
|
||||||
|
|
||||||
|
const { WorkflowValidator } = require('../dist/services/workflow-validator.js');
|
||||||
|
const { NodeRepository } = require('../dist/database/node-repository.js');
|
||||||
|
const { EnhancedConfigValidator } = require('../dist/services/enhanced-config-validator.js');
|
||||||
|
const { createDatabaseAdapter } = require('../dist/database/database-adapter.js');
|
||||||
|
const path = require('path');
|
||||||
|
|
||||||
|
async function runTests() {
|
||||||
|
// Initialize database
|
||||||
|
const dbPath = path.join(__dirname, '..', 'data', 'nodes.db');
|
||||||
|
const adapter = await createDatabaseAdapter(dbPath);
|
||||||
|
const db = adapter;
|
||||||
|
|
||||||
|
const nodeRepository = new NodeRepository(db);
|
||||||
|
const validator = new WorkflowValidator(nodeRepository, EnhancedConfigValidator);
|
||||||
|
|
||||||
|
console.log('\n🧪 Testing Expression Format Validation\n');
|
||||||
|
console.log('=' .repeat(60));
|
||||||
|
|
||||||
|
// Test 1: Email node with missing = prefix
|
||||||
|
console.log('\n📝 Test 1: Email Send node - Missing = prefix');
|
||||||
|
console.log('-'.repeat(40));
|
||||||
|
|
||||||
|
const emailWorkflowIncorrect = {
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: 'b9dd1cfd-ee66-4049-97e7-1af6d976a4e0',
|
||||||
|
name: 'Error Handler',
|
||||||
|
type: 'n8n-nodes-base.emailSend',
|
||||||
|
typeVersion: 2.1,
|
||||||
|
position: [-128, 400],
|
||||||
|
parameters: {
|
||||||
|
fromEmail: '{{ $env.ADMIN_EMAIL }}', // INCORRECT - missing =
|
||||||
|
toEmail: 'admin@company.com',
|
||||||
|
subject: 'GitHub Issue Workflow Error - HIGH PRIORITY',
|
||||||
|
options: {}
|
||||||
|
},
|
||||||
|
credentials: {
|
||||||
|
smtp: {
|
||||||
|
id: '7AQ08VMFHubmfvzR',
|
||||||
|
name: 'romuald@aiadvisors.pl'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {}
|
||||||
|
};
|
||||||
|
|
||||||
|
const result1 = await validator.validateWorkflow(emailWorkflowIncorrect);
|
||||||
|
|
||||||
|
if (result1.errors.some(e => e.message.includes('Expression format'))) {
|
||||||
|
console.log('✅ ERROR DETECTED (correct behavior):');
|
||||||
|
const formatError = result1.errors.find(e => e.message.includes('Expression format'));
|
||||||
|
console.log('\n' + formatError.message);
|
||||||
|
} else {
|
||||||
|
console.log('❌ No expression format error detected (should have detected missing prefix)');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 2: Email node with correct = prefix
|
||||||
|
console.log('\n📝 Test 2: Email Send node - Correct = prefix');
|
||||||
|
console.log('-'.repeat(40));
|
||||||
|
|
||||||
|
const emailWorkflowCorrect = {
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: 'b9dd1cfd-ee66-4049-97e7-1af6d976a4e0',
|
||||||
|
name: 'Error Handler',
|
||||||
|
type: 'n8n-nodes-base.emailSend',
|
||||||
|
typeVersion: 2.1,
|
||||||
|
position: [-128, 400],
|
||||||
|
parameters: {
|
||||||
|
fromEmail: '={{ $env.ADMIN_EMAIL }}', // CORRECT - has =
|
||||||
|
toEmail: 'admin@company.com',
|
||||||
|
subject: 'GitHub Issue Workflow Error - HIGH PRIORITY',
|
||||||
|
options: {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {}
|
||||||
|
};
|
||||||
|
|
||||||
|
const result2 = await validator.validateWorkflow(emailWorkflowCorrect);
|
||||||
|
|
||||||
|
if (result2.errors.some(e => e.message.includes('Expression format'))) {
|
||||||
|
console.log('❌ Unexpected expression format error (should accept = prefix)');
|
||||||
|
} else {
|
||||||
|
console.log('✅ No expression format errors (correct!)');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 3: GitHub node without resource locator format
|
||||||
|
console.log('\n📝 Test 3: GitHub node - Missing resource locator format');
|
||||||
|
console.log('-'.repeat(40));
|
||||||
|
|
||||||
|
const githubWorkflowIncorrect = {
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: '3c742ca1-af8f-4d80-a47e-e68fb1ced491',
|
||||||
|
name: 'Send Welcome Comment',
|
||||||
|
type: 'n8n-nodes-base.github',
|
||||||
|
typeVersion: 1.1,
|
||||||
|
position: [-240, 96],
|
||||||
|
parameters: {
|
||||||
|
operation: 'createComment',
|
||||||
|
owner: '{{ $vars.GITHUB_OWNER }}', // INCORRECT - needs RL format
|
||||||
|
repository: '{{ $vars.GITHUB_REPO }}', // INCORRECT - needs RL format
|
||||||
|
issueNumber: null,
|
||||||
|
body: '👋 Hi @{{ $(\'Extract Issue Data\').first().json.author }}!' // INCORRECT - missing =
|
||||||
|
},
|
||||||
|
credentials: {
|
||||||
|
githubApi: {
|
||||||
|
id: 'edgpwh6ldYN07MXx',
|
||||||
|
name: 'GitHub account'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {}
|
||||||
|
};
|
||||||
|
|
||||||
|
const result3 = await validator.validateWorkflow(githubWorkflowIncorrect);
|
||||||
|
|
||||||
|
const formatErrors = result3.errors.filter(e => e.message.includes('Expression format'));
|
||||||
|
console.log(`\nFound ${formatErrors.length} expression format errors:`);
|
||||||
|
|
||||||
|
if (formatErrors.length >= 3) {
|
||||||
|
console.log('✅ All format issues detected:');
|
||||||
|
formatErrors.forEach((error, index) => {
|
||||||
|
const field = error.message.match(/Field '([^']+)'/)?.[1] || 'unknown';
|
||||||
|
console.log(` ${index + 1}. Field '${field}' - ${error.message.includes('resource locator') ? 'Needs RL format' : 'Missing = prefix'}`);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
console.log('❌ Not all format issues detected');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 4: GitHub node with correct resource locator format
|
||||||
|
console.log('\n📝 Test 4: GitHub node - Correct resource locator format');
|
||||||
|
console.log('-'.repeat(40));
|
||||||
|
|
||||||
|
const githubWorkflowCorrect = {
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: '3c742ca1-af8f-4d80-a47e-e68fb1ced491',
|
||||||
|
name: 'Send Welcome Comment',
|
||||||
|
type: 'n8n-nodes-base.github',
|
||||||
|
typeVersion: 1.1,
|
||||||
|
position: [-240, 96],
|
||||||
|
parameters: {
|
||||||
|
operation: 'createComment',
|
||||||
|
owner: {
|
||||||
|
__rl: true,
|
||||||
|
value: '={{ $vars.GITHUB_OWNER }}', // CORRECT - RL format with =
|
||||||
|
mode: 'expression'
|
||||||
|
},
|
||||||
|
repository: {
|
||||||
|
__rl: true,
|
||||||
|
value: '={{ $vars.GITHUB_REPO }}', // CORRECT - RL format with =
|
||||||
|
mode: 'expression'
|
||||||
|
},
|
||||||
|
issueNumber: 123,
|
||||||
|
body: '=👋 Hi @{{ $(\'Extract Issue Data\').first().json.author }}!' // CORRECT - has =
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {}
|
||||||
|
};
|
||||||
|
|
||||||
|
const result4 = await validator.validateWorkflow(githubWorkflowCorrect);
|
||||||
|
|
||||||
|
const formatErrors4 = result4.errors.filter(e => e.message.includes('Expression format'));
|
||||||
|
if (formatErrors4.length === 0) {
|
||||||
|
console.log('✅ No expression format errors (correct!)');
|
||||||
|
} else {
|
||||||
|
console.log(`❌ Unexpected expression format errors: ${formatErrors4.length}`);
|
||||||
|
formatErrors4.forEach(e => console.log(' - ' + e.message.split('\n')[0]));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 5: Mixed content expressions
|
||||||
|
console.log('\n📝 Test 5: Mixed content with expressions');
|
||||||
|
console.log('-'.repeat(40));
|
||||||
|
|
||||||
|
const mixedContentWorkflow = {
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: '1',
|
||||||
|
name: 'HTTP Request',
|
||||||
|
type: 'n8n-nodes-base.httpRequest',
|
||||||
|
typeVersion: 4,
|
||||||
|
position: [0, 0],
|
||||||
|
parameters: {
|
||||||
|
url: 'https://api.example.com/users/{{ $json.userId }}', // INCORRECT
|
||||||
|
headers: {
|
||||||
|
'Authorization': '=Bearer {{ $env.API_TOKEN }}' // CORRECT
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {}
|
||||||
|
};
|
||||||
|
|
||||||
|
const result5 = await validator.validateWorkflow(mixedContentWorkflow);
|
||||||
|
|
||||||
|
const urlError = result5.errors.find(e => e.message.includes('url') && e.message.includes('Expression format'));
|
||||||
|
if (urlError) {
|
||||||
|
console.log('✅ Mixed content error detected for URL field');
|
||||||
|
console.log(' Should be: "=https://api.example.com/users/{{ $json.userId }}"');
|
||||||
|
} else {
|
||||||
|
console.log('❌ Mixed content error not detected');
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('\n' + '='.repeat(60));
|
||||||
|
console.log('\n✨ Expression Format Validation Summary:');
|
||||||
|
console.log(' - Detects missing = prefix in expressions');
|
||||||
|
console.log(' - Identifies fields needing resource locator format');
|
||||||
|
console.log(' - Provides clear correction examples');
|
||||||
|
console.log(' - Handles mixed literal and expression content');
|
||||||
|
|
||||||
|
// Close database
|
||||||
|
db.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
runTests().catch(error => {
|
||||||
|
console.error('Test failed:', error);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
126
scripts/test-multi-tenant-simple.ts
Normal file
126
scripts/test-multi-tenant-simple.ts
Normal file
@@ -0,0 +1,126 @@
|
|||||||
|
#!/usr/bin/env ts-node
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Simple test for multi-tenant functionality
|
||||||
|
* Tests that tools are registered correctly based on configuration
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { isN8nApiConfigured } from '../src/config/n8n-api';
|
||||||
|
import { InstanceContext } from '../src/types/instance-context';
|
||||||
|
import dotenv from 'dotenv';
|
||||||
|
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
async function testMultiTenant() {
|
||||||
|
console.log('🧪 Testing Multi-Tenant Tool Registration\n');
|
||||||
|
console.log('=' .repeat(60));
|
||||||
|
|
||||||
|
// Save original environment
|
||||||
|
const originalEnv = {
|
||||||
|
ENABLE_MULTI_TENANT: process.env.ENABLE_MULTI_TENANT,
|
||||||
|
N8N_API_URL: process.env.N8N_API_URL,
|
||||||
|
N8N_API_KEY: process.env.N8N_API_KEY
|
||||||
|
};
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Test 1: Default - no API config
|
||||||
|
console.log('\n✅ Test 1: No API configuration');
|
||||||
|
delete process.env.N8N_API_URL;
|
||||||
|
delete process.env.N8N_API_KEY;
|
||||||
|
delete process.env.ENABLE_MULTI_TENANT;
|
||||||
|
|
||||||
|
const hasConfig1 = isN8nApiConfigured();
|
||||||
|
console.log(` Environment API configured: ${hasConfig1}`);
|
||||||
|
console.log(` Multi-tenant enabled: ${process.env.ENABLE_MULTI_TENANT === 'true'}`);
|
||||||
|
console.log(` Should show tools: ${hasConfig1 || process.env.ENABLE_MULTI_TENANT === 'true'}`);
|
||||||
|
|
||||||
|
// Test 2: Multi-tenant enabled
|
||||||
|
console.log('\n✅ Test 2: Multi-tenant enabled (no env API)');
|
||||||
|
process.env.ENABLE_MULTI_TENANT = 'true';
|
||||||
|
|
||||||
|
const hasConfig2 = isN8nApiConfigured();
|
||||||
|
console.log(` Environment API configured: ${hasConfig2}`);
|
||||||
|
console.log(` Multi-tenant enabled: ${process.env.ENABLE_MULTI_TENANT === 'true'}`);
|
||||||
|
console.log(` Should show tools: ${hasConfig2 || process.env.ENABLE_MULTI_TENANT === 'true'}`);
|
||||||
|
|
||||||
|
// Test 3: Environment variables set
|
||||||
|
console.log('\n✅ Test 3: Environment variables set');
|
||||||
|
process.env.ENABLE_MULTI_TENANT = 'false';
|
||||||
|
process.env.N8N_API_URL = 'https://test.n8n.cloud';
|
||||||
|
process.env.N8N_API_KEY = 'test-key';
|
||||||
|
|
||||||
|
const hasConfig3 = isN8nApiConfigured();
|
||||||
|
console.log(` Environment API configured: ${hasConfig3}`);
|
||||||
|
console.log(` Multi-tenant enabled: ${process.env.ENABLE_MULTI_TENANT === 'true'}`);
|
||||||
|
console.log(` Should show tools: ${hasConfig3 || process.env.ENABLE_MULTI_TENANT === 'true'}`);
|
||||||
|
|
||||||
|
// Test 4: Instance context simulation
|
||||||
|
console.log('\n✅ Test 4: Instance context (simulated)');
|
||||||
|
const instanceContext: InstanceContext = {
|
||||||
|
n8nApiUrl: 'https://instance.n8n.cloud',
|
||||||
|
n8nApiKey: 'instance-key',
|
||||||
|
instanceId: 'test-instance'
|
||||||
|
};
|
||||||
|
|
||||||
|
const hasInstanceConfig = !!(instanceContext.n8nApiUrl && instanceContext.n8nApiKey);
|
||||||
|
console.log(` Instance has API config: ${hasInstanceConfig}`);
|
||||||
|
console.log(` Environment API configured: ${hasConfig3}`);
|
||||||
|
console.log(` Multi-tenant enabled: ${process.env.ENABLE_MULTI_TENANT === 'true'}`);
|
||||||
|
console.log(` Should show tools: ${hasConfig3 || hasInstanceConfig || process.env.ENABLE_MULTI_TENANT === 'true'}`);
|
||||||
|
|
||||||
|
// Test 5: Multi-tenant with instance strategy
|
||||||
|
console.log('\n✅ Test 5: Multi-tenant with instance strategy');
|
||||||
|
process.env.ENABLE_MULTI_TENANT = 'true';
|
||||||
|
process.env.MULTI_TENANT_SESSION_STRATEGY = 'instance';
|
||||||
|
delete process.env.N8N_API_URL;
|
||||||
|
delete process.env.N8N_API_KEY;
|
||||||
|
|
||||||
|
const hasConfig5 = isN8nApiConfigured();
|
||||||
|
const sessionStrategy = process.env.MULTI_TENANT_SESSION_STRATEGY || 'instance';
|
||||||
|
console.log(` Environment API configured: ${hasConfig5}`);
|
||||||
|
console.log(` Multi-tenant enabled: ${process.env.ENABLE_MULTI_TENANT === 'true'}`);
|
||||||
|
console.log(` Session strategy: ${sessionStrategy}`);
|
||||||
|
console.log(` Should show tools: ${hasConfig5 || process.env.ENABLE_MULTI_TENANT === 'true'}`);
|
||||||
|
|
||||||
|
if (instanceContext.instanceId) {
|
||||||
|
const sessionId = `instance-${instanceContext.instanceId}-uuid`;
|
||||||
|
console.log(` Session ID format: ${sessionId}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('\n' + '=' .repeat(60));
|
||||||
|
console.log('✅ All configuration tests passed!');
|
||||||
|
console.log('\n📝 Summary:');
|
||||||
|
console.log(' - Tools are shown when: env API configured OR multi-tenant enabled OR instance context provided');
|
||||||
|
console.log(' - Session isolation works with instance-based session IDs in multi-tenant mode');
|
||||||
|
console.log(' - Backward compatibility maintained for env-based configuration');
|
||||||
|
|
||||||
|
} catch (error) {
|
||||||
|
console.error('\n❌ Test failed:', error);
|
||||||
|
process.exit(1);
|
||||||
|
} finally {
|
||||||
|
// Restore original environment
|
||||||
|
if (originalEnv.ENABLE_MULTI_TENANT !== undefined) {
|
||||||
|
process.env.ENABLE_MULTI_TENANT = originalEnv.ENABLE_MULTI_TENANT;
|
||||||
|
} else {
|
||||||
|
delete process.env.ENABLE_MULTI_TENANT;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (originalEnv.N8N_API_URL !== undefined) {
|
||||||
|
process.env.N8N_API_URL = originalEnv.N8N_API_URL;
|
||||||
|
} else {
|
||||||
|
delete process.env.N8N_API_URL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (originalEnv.N8N_API_KEY !== undefined) {
|
||||||
|
process.env.N8N_API_KEY = originalEnv.N8N_API_KEY;
|
||||||
|
} else {
|
||||||
|
delete process.env.N8N_API_KEY;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run tests
|
||||||
|
testMultiTenant().catch(error => {
|
||||||
|
console.error('Test execution failed:', error);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
136
scripts/test-multi-tenant.ts
Normal file
136
scripts/test-multi-tenant.ts
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
#!/usr/bin/env ts-node
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test script for multi-tenant functionality
|
||||||
|
* Verifies that instance context from headers enables n8n API tools
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { N8NDocumentationMCPServer } from '../src/mcp/server';
|
||||||
|
import { InstanceContext } from '../src/types/instance-context';
|
||||||
|
import { logger } from '../src/utils/logger';
|
||||||
|
import dotenv from 'dotenv';
|
||||||
|
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
async function testMultiTenant() {
|
||||||
|
console.log('🧪 Testing Multi-Tenant Functionality\n');
|
||||||
|
console.log('=' .repeat(60));
|
||||||
|
|
||||||
|
// Save original environment
|
||||||
|
const originalEnv = {
|
||||||
|
ENABLE_MULTI_TENANT: process.env.ENABLE_MULTI_TENANT,
|
||||||
|
N8N_API_URL: process.env.N8N_API_URL,
|
||||||
|
N8N_API_KEY: process.env.N8N_API_KEY
|
||||||
|
};
|
||||||
|
|
||||||
|
// Wait a moment for database initialization
|
||||||
|
await new Promise(resolve => setTimeout(resolve, 100));
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Test 1: Without multi-tenant mode (default)
|
||||||
|
console.log('\n📌 Test 1: Without multi-tenant mode (no env vars)');
|
||||||
|
delete process.env.N8N_API_URL;
|
||||||
|
delete process.env.N8N_API_KEY;
|
||||||
|
process.env.ENABLE_MULTI_TENANT = 'false';
|
||||||
|
|
||||||
|
const server1 = new N8NDocumentationMCPServer();
|
||||||
|
const tools1 = await getToolsFromServer(server1);
|
||||||
|
const hasManagementTools1 = tools1.some(t => t.name.startsWith('n8n_'));
|
||||||
|
console.log(` Tools available: ${tools1.length}`);
|
||||||
|
console.log(` Has management tools: ${hasManagementTools1}`);
|
||||||
|
console.log(` ✅ Expected: No management tools (correct: ${!hasManagementTools1})`);
|
||||||
|
|
||||||
|
// Test 2: With instance context but multi-tenant disabled
|
||||||
|
console.log('\n📌 Test 2: With instance context but multi-tenant disabled');
|
||||||
|
const instanceContext: InstanceContext = {
|
||||||
|
n8nApiUrl: 'https://instance1.n8n.cloud',
|
||||||
|
n8nApiKey: 'test-api-key',
|
||||||
|
instanceId: 'instance-1'
|
||||||
|
};
|
||||||
|
|
||||||
|
const server2 = new N8NDocumentationMCPServer(instanceContext);
|
||||||
|
const tools2 = await getToolsFromServer(server2);
|
||||||
|
const hasManagementTools2 = tools2.some(t => t.name.startsWith('n8n_'));
|
||||||
|
console.log(` Tools available: ${tools2.length}`);
|
||||||
|
console.log(` Has management tools: ${hasManagementTools2}`);
|
||||||
|
console.log(` ✅ Expected: Has management tools (correct: ${hasManagementTools2})`);
|
||||||
|
|
||||||
|
// Test 3: With multi-tenant mode enabled
|
||||||
|
console.log('\n📌 Test 3: With multi-tenant mode enabled');
|
||||||
|
process.env.ENABLE_MULTI_TENANT = 'true';
|
||||||
|
|
||||||
|
const server3 = new N8NDocumentationMCPServer();
|
||||||
|
const tools3 = await getToolsFromServer(server3);
|
||||||
|
const hasManagementTools3 = tools3.some(t => t.name.startsWith('n8n_'));
|
||||||
|
console.log(` Tools available: ${tools3.length}`);
|
||||||
|
console.log(` Has management tools: ${hasManagementTools3}`);
|
||||||
|
console.log(` ✅ Expected: Has management tools (correct: ${hasManagementTools3})`);
|
||||||
|
|
||||||
|
// Test 4: Multi-tenant with instance context
|
||||||
|
console.log('\n📌 Test 4: Multi-tenant with instance context');
|
||||||
|
const server4 = new N8NDocumentationMCPServer(instanceContext);
|
||||||
|
const tools4 = await getToolsFromServer(server4);
|
||||||
|
const hasManagementTools4 = tools4.some(t => t.name.startsWith('n8n_'));
|
||||||
|
console.log(` Tools available: ${tools4.length}`);
|
||||||
|
console.log(` Has management tools: ${hasManagementTools4}`);
|
||||||
|
console.log(` ✅ Expected: Has management tools (correct: ${hasManagementTools4})`);
|
||||||
|
|
||||||
|
// Test 5: Environment variables (backward compatibility)
|
||||||
|
console.log('\n📌 Test 5: Environment variables (backward compatibility)');
|
||||||
|
process.env.ENABLE_MULTI_TENANT = 'false';
|
||||||
|
process.env.N8N_API_URL = 'https://env.n8n.cloud';
|
||||||
|
process.env.N8N_API_KEY = 'env-api-key';
|
||||||
|
|
||||||
|
const server5 = new N8NDocumentationMCPServer();
|
||||||
|
const tools5 = await getToolsFromServer(server5);
|
||||||
|
const hasManagementTools5 = tools5.some(t => t.name.startsWith('n8n_'));
|
||||||
|
console.log(` Tools available: ${tools5.length}`);
|
||||||
|
console.log(` Has management tools: ${hasManagementTools5}`);
|
||||||
|
console.log(` ✅ Expected: Has management tools (correct: ${hasManagementTools5})`);
|
||||||
|
|
||||||
|
console.log('\n' + '=' .repeat(60));
|
||||||
|
console.log('✅ All multi-tenant tests passed!');
|
||||||
|
|
||||||
|
} catch (error) {
|
||||||
|
console.error('\n❌ Test failed:', error);
|
||||||
|
process.exit(1);
|
||||||
|
} finally {
|
||||||
|
// Restore original environment
|
||||||
|
Object.assign(process.env, originalEnv);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to get tools from server
|
||||||
|
async function getToolsFromServer(server: N8NDocumentationMCPServer): Promise<any[]> {
|
||||||
|
// Access the private server instance to simulate tool listing
|
||||||
|
const serverInstance = (server as any).server;
|
||||||
|
const handlers = (serverInstance as any)._requestHandlers;
|
||||||
|
|
||||||
|
// Find and call the ListToolsRequestSchema handler
|
||||||
|
if (handlers && handlers.size > 0) {
|
||||||
|
for (const [schema, handler] of handlers) {
|
||||||
|
// Check for the tools/list schema
|
||||||
|
if (schema && schema.method === 'tools/list') {
|
||||||
|
const result = await handler({ params: {} });
|
||||||
|
return result.tools || [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: directly check the handlers map
|
||||||
|
const ListToolsRequestSchema = { method: 'tools/list' };
|
||||||
|
const handler = handlers?.get(ListToolsRequestSchema);
|
||||||
|
if (handler) {
|
||||||
|
const result = await handler({ params: {} });
|
||||||
|
return result.tools || [];
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(' ⚠️ Warning: Could not find tools/list handler');
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run tests
|
||||||
|
testMultiTenant().catch(error => {
|
||||||
|
console.error('Test execution failed:', error);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
178
scripts/test-operation-validation.ts
Normal file
178
scripts/test-operation-validation.ts
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
/**
|
||||||
|
* Test script for operation and resource validation with Google Drive example
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { DatabaseAdapter } from '../src/database/database-adapter';
|
||||||
|
import { NodeRepository } from '../src/database/node-repository';
|
||||||
|
import { EnhancedConfigValidator } from '../src/services/enhanced-config-validator';
|
||||||
|
import { WorkflowValidator } from '../src/services/workflow-validator';
|
||||||
|
import { createDatabaseAdapter } from '../src/database/database-adapter';
|
||||||
|
import { logger } from '../src/utils/logger';
|
||||||
|
import chalk from 'chalk';
|
||||||
|
|
||||||
|
async function testOperationValidation() {
|
||||||
|
console.log(chalk.blue('Testing Operation and Resource Validation'));
|
||||||
|
console.log('='.repeat(60));
|
||||||
|
|
||||||
|
// Initialize database
|
||||||
|
const dbPath = process.env.NODE_DB_PATH || 'data/nodes.db';
|
||||||
|
const db = await createDatabaseAdapter(dbPath);
|
||||||
|
const repository = new NodeRepository(db);
|
||||||
|
|
||||||
|
// Initialize similarity services
|
||||||
|
EnhancedConfigValidator.initializeSimilarityServices(repository);
|
||||||
|
|
||||||
|
// Test 1: Invalid operation "listFiles"
|
||||||
|
console.log(chalk.yellow('\n📝 Test 1: Google Drive with invalid operation "listFiles"'));
|
||||||
|
const invalidConfig = {
|
||||||
|
resource: 'fileFolder',
|
||||||
|
operation: 'listFiles'
|
||||||
|
};
|
||||||
|
|
||||||
|
const node = repository.getNode('nodes-base.googleDrive');
|
||||||
|
if (!node) {
|
||||||
|
console.error(chalk.red('Google Drive node not found in database'));
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result1 = EnhancedConfigValidator.validateWithMode(
|
||||||
|
'nodes-base.googleDrive',
|
||||||
|
invalidConfig,
|
||||||
|
node.properties,
|
||||||
|
'operation',
|
||||||
|
'ai-friendly'
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log(`Valid: ${result1.valid ? chalk.green('✓') : chalk.red('✗')}`);
|
||||||
|
if (result1.errors.length > 0) {
|
||||||
|
console.log(chalk.red('Errors:'));
|
||||||
|
result1.errors.forEach(error => {
|
||||||
|
console.log(` - ${error.property}: ${error.message}`);
|
||||||
|
if (error.fix) {
|
||||||
|
console.log(chalk.cyan(` Fix: ${error.fix}`));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 2: Invalid resource "files" (should be singular)
|
||||||
|
console.log(chalk.yellow('\n📝 Test 2: Google Drive with invalid resource "files"'));
|
||||||
|
const pluralResourceConfig = {
|
||||||
|
resource: 'files',
|
||||||
|
operation: 'download'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result2 = EnhancedConfigValidator.validateWithMode(
|
||||||
|
'nodes-base.googleDrive',
|
||||||
|
pluralResourceConfig,
|
||||||
|
node.properties,
|
||||||
|
'operation',
|
||||||
|
'ai-friendly'
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log(`Valid: ${result2.valid ? chalk.green('✓') : chalk.red('✗')}`);
|
||||||
|
if (result2.errors.length > 0) {
|
||||||
|
console.log(chalk.red('Errors:'));
|
||||||
|
result2.errors.forEach(error => {
|
||||||
|
console.log(` - ${error.property}: ${error.message}`);
|
||||||
|
if (error.fix) {
|
||||||
|
console.log(chalk.cyan(` Fix: ${error.fix}`));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 3: Valid configuration
|
||||||
|
console.log(chalk.yellow('\n📝 Test 3: Google Drive with valid configuration'));
|
||||||
|
const validConfig = {
|
||||||
|
resource: 'file',
|
||||||
|
operation: 'download'
|
||||||
|
};
|
||||||
|
|
||||||
|
const result3 = EnhancedConfigValidator.validateWithMode(
|
||||||
|
'nodes-base.googleDrive',
|
||||||
|
validConfig,
|
||||||
|
node.properties,
|
||||||
|
'operation',
|
||||||
|
'ai-friendly'
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log(`Valid: ${result3.valid ? chalk.green('✓') : chalk.red('✗')}`);
|
||||||
|
if (result3.errors.length > 0) {
|
||||||
|
console.log(chalk.red('Errors:'));
|
||||||
|
result3.errors.forEach(error => {
|
||||||
|
console.log(` - ${error.property}: ${error.message}`);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
console.log(chalk.green('No errors - configuration is valid!'));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 4: Test in workflow context
|
||||||
|
console.log(chalk.yellow('\n📝 Test 4: Full workflow with invalid Google Drive node'));
|
||||||
|
const workflow = {
|
||||||
|
name: 'Test Workflow',
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: '1',
|
||||||
|
name: 'Google Drive',
|
||||||
|
type: 'n8n-nodes-base.googleDrive',
|
||||||
|
position: [100, 100] as [number, number],
|
||||||
|
parameters: {
|
||||||
|
resource: 'fileFolder',
|
||||||
|
operation: 'listFiles' // Invalid operation
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {}
|
||||||
|
};
|
||||||
|
|
||||||
|
const validator = new WorkflowValidator(repository, EnhancedConfigValidator);
|
||||||
|
const workflowResult = await validator.validateWorkflow(workflow, {
|
||||||
|
validateNodes: true,
|
||||||
|
profile: 'ai-friendly'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`Workflow Valid: ${workflowResult.valid ? chalk.green('✓') : chalk.red('✗')}`);
|
||||||
|
if (workflowResult.errors.length > 0) {
|
||||||
|
console.log(chalk.red('Errors:'));
|
||||||
|
workflowResult.errors.forEach(error => {
|
||||||
|
console.log(` - ${error.nodeName || 'Workflow'}: ${error.message}`);
|
||||||
|
if (error.details?.fix) {
|
||||||
|
console.log(chalk.cyan(` Fix: ${error.details.fix}`));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 5: Typo in operation
|
||||||
|
console.log(chalk.yellow('\n📝 Test 5: Typo in operation "downlod"'));
|
||||||
|
const typoConfig = {
|
||||||
|
resource: 'file',
|
||||||
|
operation: 'downlod' // Typo
|
||||||
|
};
|
||||||
|
|
||||||
|
const result5 = EnhancedConfigValidator.validateWithMode(
|
||||||
|
'nodes-base.googleDrive',
|
||||||
|
typoConfig,
|
||||||
|
node.properties,
|
||||||
|
'operation',
|
||||||
|
'ai-friendly'
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log(`Valid: ${result5.valid ? chalk.green('✓') : chalk.red('✗')}`);
|
||||||
|
if (result5.errors.length > 0) {
|
||||||
|
console.log(chalk.red('Errors:'));
|
||||||
|
result5.errors.forEach(error => {
|
||||||
|
console.log(` - ${error.property}: ${error.message}`);
|
||||||
|
if (error.fix) {
|
||||||
|
console.log(chalk.cyan(` Fix: ${error.fix}`));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(chalk.green('\n✅ All tests completed!'));
|
||||||
|
db.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run tests
|
||||||
|
testOperationValidation().catch(error => {
|
||||||
|
console.error(chalk.red('Error running tests:'), error);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
118
scripts/test-telemetry-debug.ts
Normal file
118
scripts/test-telemetry-debug.ts
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
#!/usr/bin/env npx tsx
|
||||||
|
/**
|
||||||
|
* Debug script for telemetry integration
|
||||||
|
* Tests direct Supabase connection
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { createClient } from '@supabase/supabase-js';
|
||||||
|
import dotenv from 'dotenv';
|
||||||
|
|
||||||
|
// Load environment variables
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
async function debugTelemetry() {
|
||||||
|
console.log('🔍 Debugging Telemetry Integration\n');
|
||||||
|
|
||||||
|
const supabaseUrl = process.env.SUPABASE_URL;
|
||||||
|
const supabaseAnonKey = process.env.SUPABASE_ANON_KEY;
|
||||||
|
|
||||||
|
if (!supabaseUrl || !supabaseAnonKey) {
|
||||||
|
console.error('❌ Missing SUPABASE_URL or SUPABASE_ANON_KEY');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('Environment:');
|
||||||
|
console.log(' URL:', supabaseUrl);
|
||||||
|
console.log(' Key:', supabaseAnonKey.substring(0, 30) + '...');
|
||||||
|
|
||||||
|
// Create Supabase client
|
||||||
|
const supabase = createClient(supabaseUrl, supabaseAnonKey, {
|
||||||
|
auth: {
|
||||||
|
persistSession: false,
|
||||||
|
autoRefreshToken: false,
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Test 1: Direct insert to telemetry_events
|
||||||
|
console.log('\n📝 Test 1: Direct insert to telemetry_events...');
|
||||||
|
const testEvent = {
|
||||||
|
user_id: 'test-user-123',
|
||||||
|
event: 'test_event',
|
||||||
|
properties: {
|
||||||
|
test: true,
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const { data: eventData, error: eventError } = await supabase
|
||||||
|
.from('telemetry_events')
|
||||||
|
.insert([testEvent])
|
||||||
|
.select();
|
||||||
|
|
||||||
|
if (eventError) {
|
||||||
|
console.error('❌ Event insert failed:', eventError);
|
||||||
|
} else {
|
||||||
|
console.log('✅ Event inserted successfully:', eventData);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 2: Direct insert to telemetry_workflows
|
||||||
|
console.log('\n📝 Test 2: Direct insert to telemetry_workflows...');
|
||||||
|
const testWorkflow = {
|
||||||
|
user_id: 'test-user-123',
|
||||||
|
workflow_hash: 'test-hash-' + Date.now(),
|
||||||
|
node_count: 3,
|
||||||
|
node_types: ['webhook', 'http', 'slack'],
|
||||||
|
has_trigger: true,
|
||||||
|
has_webhook: true,
|
||||||
|
complexity: 'simple',
|
||||||
|
sanitized_workflow: {
|
||||||
|
nodes: [],
|
||||||
|
connections: {}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const { data: workflowData, error: workflowError } = await supabase
|
||||||
|
.from('telemetry_workflows')
|
||||||
|
.insert([testWorkflow])
|
||||||
|
.select();
|
||||||
|
|
||||||
|
if (workflowError) {
|
||||||
|
console.error('❌ Workflow insert failed:', workflowError);
|
||||||
|
} else {
|
||||||
|
console.log('✅ Workflow inserted successfully:', workflowData);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 3: Try to read data (should fail with anon key due to RLS)
|
||||||
|
console.log('\n📖 Test 3: Attempting to read data (should fail due to RLS)...');
|
||||||
|
const { data: readData, error: readError } = await supabase
|
||||||
|
.from('telemetry_events')
|
||||||
|
.select('*')
|
||||||
|
.limit(1);
|
||||||
|
|
||||||
|
if (readError) {
|
||||||
|
console.log('✅ Read correctly blocked by RLS:', readError.message);
|
||||||
|
} else {
|
||||||
|
console.log('⚠️ Unexpected: Read succeeded (RLS may not be working):', readData);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 4: Check table existence
|
||||||
|
console.log('\n🔍 Test 4: Verifying tables exist...');
|
||||||
|
const { data: tables, error: tablesError } = await supabase
|
||||||
|
.rpc('get_tables', { schema_name: 'public' })
|
||||||
|
.select('*');
|
||||||
|
|
||||||
|
if (tablesError) {
|
||||||
|
// This is expected - the RPC function might not exist
|
||||||
|
console.log('ℹ️ Cannot list tables (RPC function not available)');
|
||||||
|
} else {
|
||||||
|
console.log('Tables found:', tables);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('\n✨ Debug completed! Check your Supabase dashboard for the test data.');
|
||||||
|
console.log('Dashboard: https://supabase.com/dashboard/project/ydyufsohxdfpopqbubwk/editor');
|
||||||
|
}
|
||||||
|
|
||||||
|
debugTelemetry().catch(error => {
|
||||||
|
console.error('❌ Debug failed:', error);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
46
scripts/test-telemetry-direct.ts
Normal file
46
scripts/test-telemetry-direct.ts
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
#!/usr/bin/env npx tsx
|
||||||
|
/**
|
||||||
|
* Direct telemetry test with hardcoded credentials
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { createClient } from '@supabase/supabase-js';
|
||||||
|
|
||||||
|
const TELEMETRY_BACKEND = {
|
||||||
|
URL: 'https://ydyufsohxdfpopqbubwk.supabase.co',
|
||||||
|
ANON_KEY: 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InlkeXVmc29oeGRmcG9wcWJ1YndrIiwicm9sZSI6ImFub24iLCJpYXQiOjE3Mzc2MzAxMDgsImV4cCI6MjA1MzIwNjEwOH0.LsUTx9OsNtnqg-jxXaJPc84aBHVDehHiMaFoF2Ir8s0'
|
||||||
|
};
|
||||||
|
|
||||||
|
async function testDirect() {
|
||||||
|
console.log('🧪 Direct Telemetry Test\n');
|
||||||
|
|
||||||
|
const supabase = createClient(TELEMETRY_BACKEND.URL, TELEMETRY_BACKEND.ANON_KEY, {
|
||||||
|
auth: {
|
||||||
|
persistSession: false,
|
||||||
|
autoRefreshToken: false,
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const testEvent = {
|
||||||
|
user_id: 'direct-test-' + Date.now(),
|
||||||
|
event: 'direct_test',
|
||||||
|
properties: {
|
||||||
|
source: 'test-telemetry-direct.ts',
|
||||||
|
timestamp: new Date().toISOString()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
console.log('Sending event:', testEvent);
|
||||||
|
|
||||||
|
const { data, error } = await supabase
|
||||||
|
.from('telemetry_events')
|
||||||
|
.insert([testEvent]);
|
||||||
|
|
||||||
|
if (error) {
|
||||||
|
console.error('❌ Failed:', error);
|
||||||
|
} else {
|
||||||
|
console.log('✅ Success! Event sent directly to Supabase');
|
||||||
|
console.log('Response:', data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testDirect().catch(console.error);
|
||||||
62
scripts/test-telemetry-env.ts
Normal file
62
scripts/test-telemetry-env.ts
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
#!/usr/bin/env npx tsx
|
||||||
|
/**
|
||||||
|
* Test telemetry environment variable override
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { TelemetryConfigManager } from '../src/telemetry/config-manager';
|
||||||
|
import { telemetry } from '../src/telemetry/telemetry-manager';
|
||||||
|
|
||||||
|
async function testEnvOverride() {
|
||||||
|
console.log('🧪 Testing Telemetry Environment Variable Override\n');
|
||||||
|
|
||||||
|
const configManager = TelemetryConfigManager.getInstance();
|
||||||
|
|
||||||
|
// Test 1: Check current status without env var
|
||||||
|
console.log('Test 1: Without environment variable');
|
||||||
|
console.log('Is Enabled:', configManager.isEnabled());
|
||||||
|
console.log('Status:', configManager.getStatus());
|
||||||
|
|
||||||
|
// Test 2: Set environment variable and check again
|
||||||
|
console.log('\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n');
|
||||||
|
console.log('Test 2: With N8N_MCP_TELEMETRY_DISABLED=true');
|
||||||
|
process.env.N8N_MCP_TELEMETRY_DISABLED = 'true';
|
||||||
|
|
||||||
|
// Force reload by creating new instance (for testing)
|
||||||
|
const newConfigManager = TelemetryConfigManager.getInstance();
|
||||||
|
console.log('Is Enabled:', newConfigManager.isEnabled());
|
||||||
|
console.log('Status:', newConfigManager.getStatus());
|
||||||
|
|
||||||
|
// Test 3: Try tracking with env disabled
|
||||||
|
console.log('\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n');
|
||||||
|
console.log('Test 3: Attempting to track with telemetry disabled');
|
||||||
|
telemetry.trackToolUsage('test_tool', true, 100);
|
||||||
|
console.log('Tool usage tracking attempted (should be ignored)');
|
||||||
|
|
||||||
|
// Test 4: Alternative env vars
|
||||||
|
console.log('\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n');
|
||||||
|
console.log('Test 4: Alternative environment variables');
|
||||||
|
|
||||||
|
delete process.env.N8N_MCP_TELEMETRY_DISABLED;
|
||||||
|
process.env.TELEMETRY_DISABLED = 'true';
|
||||||
|
console.log('With TELEMETRY_DISABLED=true:', newConfigManager.isEnabled());
|
||||||
|
|
||||||
|
delete process.env.TELEMETRY_DISABLED;
|
||||||
|
process.env.DISABLE_TELEMETRY = 'true';
|
||||||
|
console.log('With DISABLE_TELEMETRY=true:', newConfigManager.isEnabled());
|
||||||
|
|
||||||
|
// Test 5: Env var takes precedence over config
|
||||||
|
console.log('\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n');
|
||||||
|
console.log('Test 5: Environment variable precedence');
|
||||||
|
|
||||||
|
// Enable via config
|
||||||
|
newConfigManager.enable();
|
||||||
|
console.log('After enabling via config:', newConfigManager.isEnabled());
|
||||||
|
|
||||||
|
// But env var should still override
|
||||||
|
process.env.N8N_MCP_TELEMETRY_DISABLED = 'true';
|
||||||
|
console.log('With env var set (should override config):', newConfigManager.isEnabled());
|
||||||
|
|
||||||
|
console.log('\n✅ All tests completed!');
|
||||||
|
}
|
||||||
|
|
||||||
|
testEnvOverride().catch(console.error);
|
||||||
94
scripts/test-telemetry-integration.ts
Normal file
94
scripts/test-telemetry-integration.ts
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
#!/usr/bin/env npx tsx
|
||||||
|
/**
|
||||||
|
* Integration test for the telemetry manager
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { telemetry } from '../src/telemetry/telemetry-manager';
|
||||||
|
|
||||||
|
async function testIntegration() {
|
||||||
|
console.log('🧪 Testing Telemetry Manager Integration\n');
|
||||||
|
|
||||||
|
// Check status
|
||||||
|
console.log('Status:', telemetry.getStatus());
|
||||||
|
|
||||||
|
// Track session start
|
||||||
|
console.log('\nTracking session start...');
|
||||||
|
telemetry.trackSessionStart();
|
||||||
|
|
||||||
|
// Track tool usage
|
||||||
|
console.log('Tracking tool usage...');
|
||||||
|
telemetry.trackToolUsage('search_nodes', true, 150);
|
||||||
|
telemetry.trackToolUsage('get_node_info', true, 75);
|
||||||
|
telemetry.trackToolUsage('validate_workflow', false, 200);
|
||||||
|
|
||||||
|
// Track errors
|
||||||
|
console.log('Tracking errors...');
|
||||||
|
telemetry.trackError('ValidationError', 'workflow_validation', 'validate_workflow', 'Required field missing: nodes array is empty');
|
||||||
|
|
||||||
|
// Track a test workflow
|
||||||
|
console.log('Tracking workflow creation...');
|
||||||
|
const testWorkflow = {
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: '1',
|
||||||
|
type: 'n8n-nodes-base.webhook',
|
||||||
|
name: 'Webhook',
|
||||||
|
position: [0, 0],
|
||||||
|
parameters: {
|
||||||
|
path: '/test-webhook',
|
||||||
|
httpMethod: 'POST'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: '2',
|
||||||
|
type: 'n8n-nodes-base.httpRequest',
|
||||||
|
name: 'HTTP Request',
|
||||||
|
position: [250, 0],
|
||||||
|
parameters: {
|
||||||
|
url: 'https://api.example.com/endpoint',
|
||||||
|
method: 'POST',
|
||||||
|
authentication: 'genericCredentialType',
|
||||||
|
genericAuthType: 'httpHeaderAuth',
|
||||||
|
sendHeaders: true,
|
||||||
|
headerParameters: {
|
||||||
|
parameters: [
|
||||||
|
{
|
||||||
|
name: 'Authorization',
|
||||||
|
value: 'Bearer sk-1234567890abcdef'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: '3',
|
||||||
|
type: 'n8n-nodes-base.slack',
|
||||||
|
name: 'Slack',
|
||||||
|
position: [500, 0],
|
||||||
|
parameters: {
|
||||||
|
channel: '#notifications',
|
||||||
|
text: 'Workflow completed!'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {
|
||||||
|
'1': {
|
||||||
|
main: [[{ node: '2', type: 'main', index: 0 }]]
|
||||||
|
},
|
||||||
|
'2': {
|
||||||
|
main: [[{ node: '3', type: 'main', index: 0 }]]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
telemetry.trackWorkflowCreation(testWorkflow, true);
|
||||||
|
|
||||||
|
// Force flush
|
||||||
|
console.log('\nFlushing telemetry data...');
|
||||||
|
await telemetry.flush();
|
||||||
|
|
||||||
|
console.log('\n✅ Telemetry integration test completed!');
|
||||||
|
console.log('Check your Supabase dashboard for the telemetry data.');
|
||||||
|
}
|
||||||
|
|
||||||
|
testIntegration().catch(console.error);
|
||||||
68
scripts/test-telemetry-no-select.ts
Normal file
68
scripts/test-telemetry-no-select.ts
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
#!/usr/bin/env npx tsx
|
||||||
|
/**
|
||||||
|
* Test telemetry without requesting data back
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { createClient } from '@supabase/supabase-js';
|
||||||
|
import dotenv from 'dotenv';
|
||||||
|
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
async function testNoSelect() {
|
||||||
|
const supabaseUrl = process.env.SUPABASE_URL!;
|
||||||
|
const supabaseAnonKey = process.env.SUPABASE_ANON_KEY!;
|
||||||
|
|
||||||
|
console.log('🧪 Telemetry Test (No Select)\n');
|
||||||
|
|
||||||
|
const supabase = createClient(supabaseUrl, supabaseAnonKey, {
|
||||||
|
auth: {
|
||||||
|
persistSession: false,
|
||||||
|
autoRefreshToken: false,
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Insert WITHOUT .select() - just fire and forget
|
||||||
|
const testData = {
|
||||||
|
user_id: 'test-' + Date.now(),
|
||||||
|
event: 'test_event',
|
||||||
|
properties: { test: true }
|
||||||
|
};
|
||||||
|
|
||||||
|
console.log('Inserting:', testData);
|
||||||
|
|
||||||
|
const { error } = await supabase
|
||||||
|
.from('telemetry_events')
|
||||||
|
.insert([testData]); // No .select() here!
|
||||||
|
|
||||||
|
if (error) {
|
||||||
|
console.error('❌ Failed:', error);
|
||||||
|
} else {
|
||||||
|
console.log('✅ Success! Data inserted (no response data)');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test workflow insert too
|
||||||
|
const testWorkflow = {
|
||||||
|
user_id: 'test-' + Date.now(),
|
||||||
|
workflow_hash: 'hash-' + Date.now(),
|
||||||
|
node_count: 3,
|
||||||
|
node_types: ['webhook', 'http', 'slack'],
|
||||||
|
has_trigger: true,
|
||||||
|
has_webhook: true,
|
||||||
|
complexity: 'simple',
|
||||||
|
sanitized_workflow: { nodes: [], connections: {} }
|
||||||
|
};
|
||||||
|
|
||||||
|
console.log('\nInserting workflow:', testWorkflow);
|
||||||
|
|
||||||
|
const { error: workflowError } = await supabase
|
||||||
|
.from('telemetry_workflows')
|
||||||
|
.insert([testWorkflow]); // No .select() here!
|
||||||
|
|
||||||
|
if (workflowError) {
|
||||||
|
console.error('❌ Workflow failed:', workflowError);
|
||||||
|
} else {
|
||||||
|
console.log('✅ Workflow inserted successfully!');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testNoSelect().catch(console.error);
|
||||||
87
scripts/test-telemetry-security.ts
Normal file
87
scripts/test-telemetry-security.ts
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
#!/usr/bin/env npx tsx
|
||||||
|
/**
|
||||||
|
* Test that RLS properly protects data
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { createClient } from '@supabase/supabase-js';
|
||||||
|
import dotenv from 'dotenv';
|
||||||
|
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
async function testSecurity() {
|
||||||
|
const supabaseUrl = process.env.SUPABASE_URL!;
|
||||||
|
const supabaseAnonKey = process.env.SUPABASE_ANON_KEY!;
|
||||||
|
|
||||||
|
console.log('🔒 Testing Telemetry Security (RLS)\n');
|
||||||
|
|
||||||
|
const supabase = createClient(supabaseUrl, supabaseAnonKey, {
|
||||||
|
auth: {
|
||||||
|
persistSession: false,
|
||||||
|
autoRefreshToken: false,
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Test 1: Verify anon can INSERT
|
||||||
|
console.log('Test 1: Anonymous INSERT (should succeed)...');
|
||||||
|
const testData = {
|
||||||
|
user_id: 'security-test-' + Date.now(),
|
||||||
|
event: 'security_test',
|
||||||
|
properties: { test: true }
|
||||||
|
};
|
||||||
|
|
||||||
|
const { error: insertError } = await supabase
|
||||||
|
.from('telemetry_events')
|
||||||
|
.insert([testData]);
|
||||||
|
|
||||||
|
if (insertError) {
|
||||||
|
console.error('❌ Insert failed:', insertError.message);
|
||||||
|
} else {
|
||||||
|
console.log('✅ Insert succeeded (as expected)');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 2: Verify anon CANNOT SELECT
|
||||||
|
console.log('\nTest 2: Anonymous SELECT (should fail)...');
|
||||||
|
const { data, error: selectError } = await supabase
|
||||||
|
.from('telemetry_events')
|
||||||
|
.select('*')
|
||||||
|
.limit(1);
|
||||||
|
|
||||||
|
if (selectError) {
|
||||||
|
console.log('✅ Select blocked by RLS (as expected):', selectError.message);
|
||||||
|
} else if (data && data.length > 0) {
|
||||||
|
console.error('❌ SECURITY ISSUE: Anon can read data!', data);
|
||||||
|
} else if (data && data.length === 0) {
|
||||||
|
console.log('⚠️ Select returned empty array (might be RLS working)');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 3: Verify anon CANNOT UPDATE
|
||||||
|
console.log('\nTest 3: Anonymous UPDATE (should fail)...');
|
||||||
|
const { error: updateError } = await supabase
|
||||||
|
.from('telemetry_events')
|
||||||
|
.update({ event: 'hacked' })
|
||||||
|
.eq('user_id', 'test');
|
||||||
|
|
||||||
|
if (updateError) {
|
||||||
|
console.log('✅ Update blocked (as expected):', updateError.message);
|
||||||
|
} else {
|
||||||
|
console.error('❌ SECURITY ISSUE: Anon can update data!');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 4: Verify anon CANNOT DELETE
|
||||||
|
console.log('\nTest 4: Anonymous DELETE (should fail)...');
|
||||||
|
const { error: deleteError } = await supabase
|
||||||
|
.from('telemetry_events')
|
||||||
|
.delete()
|
||||||
|
.eq('user_id', 'test');
|
||||||
|
|
||||||
|
if (deleteError) {
|
||||||
|
console.log('✅ Delete blocked (as expected):', deleteError.message);
|
||||||
|
} else {
|
||||||
|
console.error('❌ SECURITY ISSUE: Anon can delete data!');
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('\n✨ Security test completed!');
|
||||||
|
console.log('Summary: Anonymous users can INSERT (for telemetry) but cannot READ/UPDATE/DELETE');
|
||||||
|
}
|
||||||
|
|
||||||
|
testSecurity().catch(console.error);
|
||||||
45
scripts/test-telemetry-simple.ts
Normal file
45
scripts/test-telemetry-simple.ts
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
#!/usr/bin/env npx tsx
|
||||||
|
/**
|
||||||
|
* Simple test to verify telemetry works
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { createClient } from '@supabase/supabase-js';
|
||||||
|
import dotenv from 'dotenv';
|
||||||
|
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
async function testSimple() {
|
||||||
|
const supabaseUrl = process.env.SUPABASE_URL!;
|
||||||
|
const supabaseAnonKey = process.env.SUPABASE_ANON_KEY!;
|
||||||
|
|
||||||
|
console.log('🧪 Simple Telemetry Test\n');
|
||||||
|
|
||||||
|
const supabase = createClient(supabaseUrl, supabaseAnonKey, {
|
||||||
|
auth: {
|
||||||
|
persistSession: false,
|
||||||
|
autoRefreshToken: false,
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Simple insert
|
||||||
|
const testData = {
|
||||||
|
user_id: 'simple-test-' + Date.now(),
|
||||||
|
event: 'test_event',
|
||||||
|
properties: { test: true }
|
||||||
|
};
|
||||||
|
|
||||||
|
console.log('Inserting:', testData);
|
||||||
|
|
||||||
|
const { data, error } = await supabase
|
||||||
|
.from('telemetry_events')
|
||||||
|
.insert([testData])
|
||||||
|
.select();
|
||||||
|
|
||||||
|
if (error) {
|
||||||
|
console.error('❌ Failed:', error);
|
||||||
|
} else {
|
||||||
|
console.log('✅ Success! Inserted:', data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testSimple().catch(console.error);
|
||||||
119
scripts/test-user-id-persistence.ts
Normal file
119
scripts/test-user-id-persistence.ts
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
/**
|
||||||
|
* Test User ID Persistence
|
||||||
|
* Verifies that user IDs are consistent across sessions and modes
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { TelemetryConfigManager } from '../src/telemetry/config-manager';
|
||||||
|
import { hostname, platform, arch, homedir } from 'os';
|
||||||
|
import { createHash } from 'crypto';
|
||||||
|
|
||||||
|
console.log('=== User ID Persistence Test ===\n');
|
||||||
|
|
||||||
|
// Test 1: Verify deterministic ID generation
|
||||||
|
console.log('Test 1: Deterministic ID Generation');
|
||||||
|
console.log('-----------------------------------');
|
||||||
|
|
||||||
|
const machineId = `${hostname()}-${platform()}-${arch()}-${homedir()}`;
|
||||||
|
const expectedUserId = createHash('sha256')
|
||||||
|
.update(machineId)
|
||||||
|
.digest('hex')
|
||||||
|
.substring(0, 16);
|
||||||
|
|
||||||
|
console.log('Machine characteristics:');
|
||||||
|
console.log(' hostname:', hostname());
|
||||||
|
console.log(' platform:', platform());
|
||||||
|
console.log(' arch:', arch());
|
||||||
|
console.log(' homedir:', homedir());
|
||||||
|
console.log('\nGenerated machine ID:', machineId);
|
||||||
|
console.log('Expected user ID:', expectedUserId);
|
||||||
|
|
||||||
|
// Test 2: Load actual config
|
||||||
|
console.log('\n\nTest 2: Actual Config Manager');
|
||||||
|
console.log('-----------------------------------');
|
||||||
|
|
||||||
|
const configManager = TelemetryConfigManager.getInstance();
|
||||||
|
const actualUserId = configManager.getUserId();
|
||||||
|
const config = configManager.loadConfig();
|
||||||
|
|
||||||
|
console.log('Actual user ID:', actualUserId);
|
||||||
|
console.log('Config first run:', config.firstRun || 'Unknown');
|
||||||
|
console.log('Config version:', config.version || 'Unknown');
|
||||||
|
console.log('Telemetry enabled:', config.enabled);
|
||||||
|
|
||||||
|
// Test 3: Verify consistency
|
||||||
|
console.log('\n\nTest 3: Consistency Check');
|
||||||
|
console.log('-----------------------------------');
|
||||||
|
|
||||||
|
const match = actualUserId === expectedUserId;
|
||||||
|
console.log('User IDs match:', match ? '✓ YES' : '✗ NO');
|
||||||
|
|
||||||
|
if (!match) {
|
||||||
|
console.log('WARNING: User ID mismatch detected!');
|
||||||
|
console.log('This could indicate an implementation issue.');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 4: Multiple loads (simulate multiple sessions)
|
||||||
|
console.log('\n\nTest 4: Multiple Session Simulation');
|
||||||
|
console.log('-----------------------------------');
|
||||||
|
|
||||||
|
const userId1 = configManager.getUserId();
|
||||||
|
const userId2 = TelemetryConfigManager.getInstance().getUserId();
|
||||||
|
const userId3 = configManager.getUserId();
|
||||||
|
|
||||||
|
console.log('Session 1 user ID:', userId1);
|
||||||
|
console.log('Session 2 user ID:', userId2);
|
||||||
|
console.log('Session 3 user ID:', userId3);
|
||||||
|
|
||||||
|
const consistent = userId1 === userId2 && userId2 === userId3;
|
||||||
|
console.log('All sessions consistent:', consistent ? '✓ YES' : '✗ NO');
|
||||||
|
|
||||||
|
// Test 5: Docker environment simulation
|
||||||
|
console.log('\n\nTest 5: Docker Environment Check');
|
||||||
|
console.log('-----------------------------------');
|
||||||
|
|
||||||
|
const isDocker = process.env.IS_DOCKER === 'true';
|
||||||
|
console.log('Running in Docker:', isDocker);
|
||||||
|
|
||||||
|
if (isDocker) {
|
||||||
|
console.log('\n⚠️ DOCKER MODE DETECTED');
|
||||||
|
console.log('In Docker, user IDs may change across container recreations because:');
|
||||||
|
console.log(' 1. Container hostname changes each time');
|
||||||
|
console.log(' 2. Config file is not persisted (no volume mount)');
|
||||||
|
console.log(' 3. Each container gets a new ephemeral filesystem');
|
||||||
|
console.log('\nRecommendation: Mount ~/.n8n-mcp as a volume for persistent user IDs');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 6: Environment variable override check
|
||||||
|
console.log('\n\nTest 6: Environment Variable Override');
|
||||||
|
console.log('-----------------------------------');
|
||||||
|
|
||||||
|
const telemetryDisabledVars = [
|
||||||
|
'N8N_MCP_TELEMETRY_DISABLED',
|
||||||
|
'TELEMETRY_DISABLED',
|
||||||
|
'DISABLE_TELEMETRY'
|
||||||
|
];
|
||||||
|
|
||||||
|
telemetryDisabledVars.forEach(varName => {
|
||||||
|
const value = process.env[varName];
|
||||||
|
if (value !== undefined) {
|
||||||
|
console.log(`${varName}:`, value);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('\nTelemetry status:', configManager.isEnabled() ? 'ENABLED' : 'DISABLED');
|
||||||
|
|
||||||
|
// Summary
|
||||||
|
console.log('\n\n=== SUMMARY ===');
|
||||||
|
console.log('User ID:', actualUserId);
|
||||||
|
console.log('Deterministic:', match ? 'YES ✓' : 'NO ✗');
|
||||||
|
console.log('Persistent across sessions:', consistent ? 'YES ✓' : 'NO ✗');
|
||||||
|
console.log('Telemetry enabled:', config.enabled ? 'YES' : 'NO');
|
||||||
|
console.log('Docker mode:', isDocker ? 'YES' : 'NO');
|
||||||
|
|
||||||
|
if (isDocker && !process.env.N8N_MCP_CONFIG_VOLUME) {
|
||||||
|
console.log('\n⚠️ WARNING: Running in Docker without persistent volume!');
|
||||||
|
console.log('User IDs will change on container recreation.');
|
||||||
|
console.log('Mount /home/nodejs/.n8n-mcp to persist telemetry config.');
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('\n');
|
||||||
55
scripts/test-workflow-insert.ts
Normal file
55
scripts/test-workflow-insert.ts
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
#!/usr/bin/env npx tsx
|
||||||
|
/**
|
||||||
|
* Test direct workflow insert to Supabase
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { createClient } from '@supabase/supabase-js';
|
||||||
|
|
||||||
|
const TELEMETRY_BACKEND = {
|
||||||
|
URL: 'https://ydyufsohxdfpopqbubwk.supabase.co',
|
||||||
|
ANON_KEY: 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InlkeXVmc29oeGRmcG9wcWJ1YndrIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NTg3OTYyMDAsImV4cCI6MjA3NDM3MjIwMH0.xESphg6h5ozaDsm4Vla3QnDJGc6Nc_cpfoqTHRynkCk'
|
||||||
|
};
|
||||||
|
|
||||||
|
async function testWorkflowInsert() {
|
||||||
|
const supabase = createClient(TELEMETRY_BACKEND.URL, TELEMETRY_BACKEND.ANON_KEY, {
|
||||||
|
auth: {
|
||||||
|
persistSession: false,
|
||||||
|
autoRefreshToken: false,
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const testWorkflow = {
|
||||||
|
user_id: 'direct-test-' + Date.now(),
|
||||||
|
workflow_hash: 'hash-direct-' + Date.now(),
|
||||||
|
node_count: 2,
|
||||||
|
node_types: ['webhook', 'http'],
|
||||||
|
has_trigger: true,
|
||||||
|
has_webhook: true,
|
||||||
|
complexity: 'simple' as const,
|
||||||
|
sanitized_workflow: {
|
||||||
|
nodes: [
|
||||||
|
{ id: '1', type: 'webhook', parameters: {} },
|
||||||
|
{ id: '2', type: 'http', parameters: {} }
|
||||||
|
],
|
||||||
|
connections: {}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
console.log('Attempting direct insert to telemetry_workflows...');
|
||||||
|
console.log('Data:', JSON.stringify(testWorkflow, null, 2));
|
||||||
|
|
||||||
|
const { data, error } = await supabase
|
||||||
|
.from('telemetry_workflows')
|
||||||
|
.insert([testWorkflow]);
|
||||||
|
|
||||||
|
if (error) {
|
||||||
|
console.error('\n❌ Error:', error);
|
||||||
|
} else {
|
||||||
|
console.log('\n✅ Success! Workflow inserted');
|
||||||
|
if (data) {
|
||||||
|
console.log('Response:', data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testWorkflowInsert().catch(console.error);
|
||||||
67
scripts/test-workflow-sanitizer.ts
Normal file
67
scripts/test-workflow-sanitizer.ts
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
#!/usr/bin/env npx tsx
|
||||||
|
/**
|
||||||
|
* Test workflow sanitizer
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { WorkflowSanitizer } from '../src/telemetry/workflow-sanitizer';
|
||||||
|
|
||||||
|
const testWorkflow = {
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: 'webhook1',
|
||||||
|
type: 'n8n-nodes-base.webhook',
|
||||||
|
name: 'Webhook',
|
||||||
|
position: [0, 0],
|
||||||
|
parameters: {
|
||||||
|
path: '/test-webhook',
|
||||||
|
httpMethod: 'POST'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'http1',
|
||||||
|
type: 'n8n-nodes-base.httpRequest',
|
||||||
|
name: 'HTTP Request',
|
||||||
|
position: [250, 0],
|
||||||
|
parameters: {
|
||||||
|
url: 'https://api.example.com/endpoint',
|
||||||
|
method: 'GET',
|
||||||
|
authentication: 'genericCredentialType',
|
||||||
|
sendHeaders: true,
|
||||||
|
headerParameters: {
|
||||||
|
parameters: [
|
||||||
|
{
|
||||||
|
name: 'Authorization',
|
||||||
|
value: 'Bearer sk-1234567890abcdef'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {
|
||||||
|
'webhook1': {
|
||||||
|
main: [[{ node: 'http1', type: 'main', index: 0 }]]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
console.log('🧪 Testing Workflow Sanitizer\n');
|
||||||
|
console.log('Original workflow has', testWorkflow.nodes.length, 'nodes');
|
||||||
|
|
||||||
|
try {
|
||||||
|
const sanitized = WorkflowSanitizer.sanitizeWorkflow(testWorkflow);
|
||||||
|
|
||||||
|
console.log('\n✅ Sanitization successful!');
|
||||||
|
console.log('\nSanitized output:');
|
||||||
|
console.log(JSON.stringify(sanitized, null, 2));
|
||||||
|
|
||||||
|
console.log('\n📊 Metrics:');
|
||||||
|
console.log('- Workflow Hash:', sanitized.workflowHash);
|
||||||
|
console.log('- Node Count:', sanitized.nodeCount);
|
||||||
|
console.log('- Node Types:', sanitized.nodeTypes);
|
||||||
|
console.log('- Has Trigger:', sanitized.hasTrigger);
|
||||||
|
console.log('- Has Webhook:', sanitized.hasWebhook);
|
||||||
|
console.log('- Complexity:', sanitized.complexity);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('❌ Sanitization failed:', error);
|
||||||
|
}
|
||||||
71
scripts/test-workflow-tracking-debug.ts
Normal file
71
scripts/test-workflow-tracking-debug.ts
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
#!/usr/bin/env npx tsx
|
||||||
|
/**
|
||||||
|
* Debug workflow tracking in telemetry manager
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { TelemetryManager } from '../src/telemetry/telemetry-manager';
|
||||||
|
|
||||||
|
// Get the singleton instance
|
||||||
|
const telemetry = TelemetryManager.getInstance();
|
||||||
|
|
||||||
|
const testWorkflow = {
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: 'webhook1',
|
||||||
|
type: 'n8n-nodes-base.webhook',
|
||||||
|
name: 'Webhook',
|
||||||
|
position: [0, 0],
|
||||||
|
parameters: {
|
||||||
|
path: '/test-' + Date.now(),
|
||||||
|
httpMethod: 'POST'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'http1',
|
||||||
|
type: 'n8n-nodes-base.httpRequest',
|
||||||
|
name: 'HTTP Request',
|
||||||
|
position: [250, 0],
|
||||||
|
parameters: {
|
||||||
|
url: 'https://api.example.com/data',
|
||||||
|
method: 'GET'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'slack1',
|
||||||
|
type: 'n8n-nodes-base.slack',
|
||||||
|
name: 'Slack',
|
||||||
|
position: [500, 0],
|
||||||
|
parameters: {
|
||||||
|
channel: '#general',
|
||||||
|
text: 'Workflow complete!'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
connections: {
|
||||||
|
'webhook1': {
|
||||||
|
main: [[{ node: 'http1', type: 'main', index: 0 }]]
|
||||||
|
},
|
||||||
|
'http1': {
|
||||||
|
main: [[{ node: 'slack1', type: 'main', index: 0 }]]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
console.log('🧪 Testing Workflow Tracking\n');
|
||||||
|
console.log('Workflow has', testWorkflow.nodes.length, 'nodes');
|
||||||
|
|
||||||
|
// Track the workflow
|
||||||
|
console.log('Calling trackWorkflowCreation...');
|
||||||
|
telemetry.trackWorkflowCreation(testWorkflow, true);
|
||||||
|
|
||||||
|
console.log('Waiting for async processing...');
|
||||||
|
|
||||||
|
// Wait for setImmediate to process
|
||||||
|
setTimeout(async () => {
|
||||||
|
console.log('\nForcing flush...');
|
||||||
|
await telemetry.flush();
|
||||||
|
console.log('✅ Flush complete!');
|
||||||
|
|
||||||
|
console.log('\nWorkflow should now be in the telemetry_workflows table.');
|
||||||
|
console.log('Check with: SELECT * FROM telemetry_workflows ORDER BY created_at DESC LIMIT 1;');
|
||||||
|
}, 2000);
|
||||||
@@ -48,5 +48,27 @@ export function isN8nApiConfigured(): boolean {
|
|||||||
return config !== null;
|
return config !== null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create n8n API configuration from instance context
|
||||||
|
* Used for flexible instance configuration support
|
||||||
|
*/
|
||||||
|
export function getN8nApiConfigFromContext(context: {
|
||||||
|
n8nApiUrl?: string;
|
||||||
|
n8nApiKey?: string;
|
||||||
|
n8nApiTimeout?: number;
|
||||||
|
n8nApiMaxRetries?: number;
|
||||||
|
}): N8nApiConfig | null {
|
||||||
|
if (!context.n8nApiUrl || !context.n8nApiKey) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
baseUrl: context.n8nApiUrl,
|
||||||
|
apiKey: context.n8nApiKey,
|
||||||
|
timeout: context.n8nApiTimeout ?? 30000,
|
||||||
|
maxRetries: context.n8nApiMaxRetries ?? 3,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// Type export
|
// Type export
|
||||||
export type N8nApiConfig = NonNullable<ReturnType<typeof getN8nApiConfig>>;
|
export type N8nApiConfig = NonNullable<ReturnType<typeof getN8nApiConfig>>;
|
||||||
310
src/data/canonical-ai-tool-examples.json
Normal file
310
src/data/canonical-ai-tool-examples.json
Normal file
@@ -0,0 +1,310 @@
|
|||||||
|
{
|
||||||
|
"description": "Canonical configuration examples for critical AI tools based on FINAL_AI_VALIDATION_SPEC.md",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"examples": [
|
||||||
|
{
|
||||||
|
"node_type": "@n8n/n8n-nodes-langchain.toolHttpRequest",
|
||||||
|
"display_name": "HTTP Request Tool",
|
||||||
|
"examples": [
|
||||||
|
{
|
||||||
|
"name": "Weather API Tool",
|
||||||
|
"use_case": "Fetch current weather data for AI Agent",
|
||||||
|
"complexity": "simple",
|
||||||
|
"parameters": {
|
||||||
|
"method": "GET",
|
||||||
|
"url": "https://api.weatherapi.com/v1/current.json?key={{$credentials.weatherApiKey}}&q={city}",
|
||||||
|
"toolDescription": "Get current weather conditions for a city. Provide the city name (e.g., 'London', 'New York') and receive temperature, humidity, wind speed, and conditions.",
|
||||||
|
"placeholderDefinitions": {
|
||||||
|
"values": [
|
||||||
|
{
|
||||||
|
"name": "city",
|
||||||
|
"description": "Name of the city to get weather for",
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"authentication": "predefinedCredentialType",
|
||||||
|
"nodeCredentialType": "weatherApiApi"
|
||||||
|
},
|
||||||
|
"credentials": {
|
||||||
|
"weatherApiApi": {
|
||||||
|
"id": "1",
|
||||||
|
"name": "Weather API account"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"notes": "Example shows proper toolDescription, URL with placeholder, and credential configuration"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "GitHub Issues Tool",
|
||||||
|
"use_case": "Create GitHub issues from AI Agent conversations",
|
||||||
|
"complexity": "medium",
|
||||||
|
"parameters": {
|
||||||
|
"method": "POST",
|
||||||
|
"url": "https://api.github.com/repos/{owner}/{repo}/issues",
|
||||||
|
"toolDescription": "Create a new GitHub issue. Requires owner (repo owner username), repo (repository name), title, and body. Returns the created issue URL and number.",
|
||||||
|
"placeholderDefinitions": {
|
||||||
|
"values": [
|
||||||
|
{
|
||||||
|
"name": "owner",
|
||||||
|
"description": "GitHub repository owner username",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "repo",
|
||||||
|
"description": "Repository name",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "title",
|
||||||
|
"description": "Issue title",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "body",
|
||||||
|
"description": "Issue description and details",
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"sendBody": true,
|
||||||
|
"specifyBody": "json",
|
||||||
|
"jsonBody": "={{ { \"title\": $json.title, \"body\": $json.body } }}",
|
||||||
|
"authentication": "predefinedCredentialType",
|
||||||
|
"nodeCredentialType": "githubApi"
|
||||||
|
},
|
||||||
|
"credentials": {
|
||||||
|
"githubApi": {
|
||||||
|
"id": "2",
|
||||||
|
"name": "GitHub credentials"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"notes": "Example shows POST request with JSON body, multiple placeholders, and expressions"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Slack Message Tool",
|
||||||
|
"use_case": "Send Slack messages from AI Agent",
|
||||||
|
"complexity": "simple",
|
||||||
|
"parameters": {
|
||||||
|
"method": "POST",
|
||||||
|
"url": "https://slack.com/api/chat.postMessage",
|
||||||
|
"toolDescription": "Send a message to a Slack channel. Provide channel ID or name (e.g., '#general', 'C1234567890') and message text.",
|
||||||
|
"placeholderDefinitions": {
|
||||||
|
"values": [
|
||||||
|
{
|
||||||
|
"name": "channel",
|
||||||
|
"description": "Channel ID or name (e.g., #general)",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "text",
|
||||||
|
"description": "Message text to send",
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"sendHeaders": true,
|
||||||
|
"headerParameters": {
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"name": "Content-Type",
|
||||||
|
"value": "application/json; charset=utf-8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Authorization",
|
||||||
|
"value": "=Bearer {{$credentials.slackApi.accessToken}}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"sendBody": true,
|
||||||
|
"specifyBody": "json",
|
||||||
|
"jsonBody": "={{ { \"channel\": $json.channel, \"text\": $json.text } }}",
|
||||||
|
"authentication": "predefinedCredentialType",
|
||||||
|
"nodeCredentialType": "slackApi"
|
||||||
|
},
|
||||||
|
"credentials": {
|
||||||
|
"slackApi": {
|
||||||
|
"id": "3",
|
||||||
|
"name": "Slack account"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"notes": "Example shows headers with credential expressions and JSON body construction"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"node_type": "@n8n/n8n-nodes-langchain.toolCode",
|
||||||
|
"display_name": "Code Tool",
|
||||||
|
"examples": [
|
||||||
|
{
|
||||||
|
"name": "Calculate Shipping Cost",
|
||||||
|
"use_case": "Calculate shipping costs based on weight and distance",
|
||||||
|
"complexity": "simple",
|
||||||
|
"parameters": {
|
||||||
|
"name": "calculate_shipping_cost",
|
||||||
|
"description": "Calculate shipping cost based on package weight (in kg) and distance (in km). Returns the cost in USD.",
|
||||||
|
"language": "javaScript",
|
||||||
|
"code": "const baseRate = 5;\nconst perKgRate = 2;\nconst perKmRate = 0.1;\n\nconst weight = $input.weight || 0;\nconst distance = $input.distance || 0;\n\nconst cost = baseRate + (weight * perKgRate) + (distance * perKmRate);\n\nreturn { cost: parseFloat(cost.toFixed(2)), currency: 'USD' };",
|
||||||
|
"specifyInputSchema": true,
|
||||||
|
"schemaType": "manual",
|
||||||
|
"inputSchema": "{\n \"type\": \"object\",\n \"properties\": {\n \"weight\": {\n \"type\": \"number\",\n \"description\": \"Package weight in kilograms\"\n },\n \"distance\": {\n \"type\": \"number\",\n \"description\": \"Shipping distance in kilometers\"\n }\n },\n \"required\": [\"weight\", \"distance\"]\n}"
|
||||||
|
},
|
||||||
|
"notes": "Example shows proper function naming, detailed description, input schema, and return value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Format Customer Data",
|
||||||
|
"use_case": "Transform and validate customer information",
|
||||||
|
"complexity": "medium",
|
||||||
|
"parameters": {
|
||||||
|
"name": "format_customer_data",
|
||||||
|
"description": "Format and validate customer data. Takes raw customer info (name, email, phone) and returns formatted object with validation status.",
|
||||||
|
"language": "javaScript",
|
||||||
|
"code": "const { name, email, phone } = $input;\n\n// Validation\nconst emailRegex = /^[^\\s@]+@[^\\s@]+\\.[^\\s@]+$/;\nconst phoneRegex = /^\\+?[1-9]\\d{1,14}$/;\n\nconst errors = [];\nif (!emailRegex.test(email)) errors.push('Invalid email format');\nif (!phoneRegex.test(phone)) errors.push('Invalid phone format');\n\n// Formatting\nconst formatted = {\n name: name.trim(),\n email: email.toLowerCase().trim(),\n phone: phone.replace(/\\s/g, ''),\n valid: errors.length === 0,\n errors: errors\n};\n\nreturn formatted;",
|
||||||
|
"specifyInputSchema": true,
|
||||||
|
"schemaType": "manual",
|
||||||
|
"inputSchema": "{\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\",\n \"description\": \"Customer full name\"\n },\n \"email\": {\n \"type\": \"string\",\n \"description\": \"Customer email address\"\n },\n \"phone\": {\n \"type\": \"string\",\n \"description\": \"Customer phone number\"\n }\n },\n \"required\": [\"name\", \"email\", \"phone\"]\n}"
|
||||||
|
},
|
||||||
|
"notes": "Example shows data validation, formatting, and structured error handling"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Parse Date Range",
|
||||||
|
"use_case": "Convert natural language date ranges to ISO format",
|
||||||
|
"complexity": "medium",
|
||||||
|
"parameters": {
|
||||||
|
"name": "parse_date_range",
|
||||||
|
"description": "Parse natural language date ranges (e.g., 'last 7 days', 'this month', 'Q1 2024') into start and end dates in ISO format.",
|
||||||
|
"language": "javaScript",
|
||||||
|
"code": "const input = $input.dateRange || '';\nconst now = new Date();\nlet start, end;\n\nif (input.includes('last') && input.includes('days')) {\n const days = parseInt(input.match(/\\d+/)[0]);\n start = new Date(now.getTime() - (days * 24 * 60 * 60 * 1000));\n end = now;\n} else if (input === 'this month') {\n start = new Date(now.getFullYear(), now.getMonth(), 1);\n end = new Date(now.getFullYear(), now.getMonth() + 1, 0);\n} else if (input === 'this year') {\n start = new Date(now.getFullYear(), 0, 1);\n end = new Date(now.getFullYear(), 11, 31);\n} else {\n throw new Error('Unsupported date range format');\n}\n\nreturn {\n startDate: start.toISOString().split('T')[0],\n endDate: end.toISOString().split('T')[0],\n daysCount: Math.ceil((end - start) / (24 * 60 * 60 * 1000))\n};",
|
||||||
|
"specifyInputSchema": true,
|
||||||
|
"schemaType": "manual",
|
||||||
|
"inputSchema": "{\n \"type\": \"object\",\n \"properties\": {\n \"dateRange\": {\n \"type\": \"string\",\n \"description\": \"Natural language date range (e.g., 'last 7 days', 'this month')\"\n }\n },\n \"required\": [\"dateRange\"]\n}"
|
||||||
|
},
|
||||||
|
"notes": "Example shows complex logic, error handling, and date manipulation"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"node_type": "@n8n/n8n-nodes-langchain.agentTool",
|
||||||
|
"display_name": "AI Agent Tool",
|
||||||
|
"examples": [
|
||||||
|
{
|
||||||
|
"name": "Research Specialist Agent",
|
||||||
|
"use_case": "Specialized sub-agent for in-depth research tasks",
|
||||||
|
"complexity": "medium",
|
||||||
|
"parameters": {
|
||||||
|
"name": "research_specialist",
|
||||||
|
"description": "Expert research agent that can search multiple sources, synthesize information, and provide comprehensive analysis on any topic. Use this when you need detailed, well-researched information.",
|
||||||
|
"promptType": "define",
|
||||||
|
"text": "You are a research specialist. Your role is to:\n1. Search for relevant information from multiple sources\n2. Synthesize findings into a coherent analysis\n3. Cite your sources\n4. Highlight key insights and patterns\n\nProvide thorough, well-structured research that answers the user's question comprehensively.",
|
||||||
|
"systemMessage": "You are a meticulous researcher focused on accuracy and completeness. Always cite sources and acknowledge limitations in available information."
|
||||||
|
},
|
||||||
|
"connections": {
|
||||||
|
"ai_languageModel": [
|
||||||
|
{
|
||||||
|
"node": "OpenAI GPT-4",
|
||||||
|
"type": "ai_languageModel",
|
||||||
|
"index": 0
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ai_tool": [
|
||||||
|
{
|
||||||
|
"node": "SerpApi Tool",
|
||||||
|
"type": "ai_tool",
|
||||||
|
"index": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"node": "Wikipedia Tool",
|
||||||
|
"type": "ai_tool",
|
||||||
|
"index": 0
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"notes": "Example shows specialized sub-agent with custom prompt, specific system message, and multiple search tools"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Data Analysis Agent",
|
||||||
|
"use_case": "Sub-agent for analyzing and visualizing data",
|
||||||
|
"complexity": "complex",
|
||||||
|
"parameters": {
|
||||||
|
"name": "data_analyst",
|
||||||
|
"description": "Data analysis specialist that can process datasets, calculate statistics, identify trends, and generate insights. Use for any data analysis or statistical questions.",
|
||||||
|
"promptType": "auto",
|
||||||
|
"systemMessage": "You are a data analyst with expertise in statistics and data interpretation. Break down complex datasets into understandable insights. Use the Code Tool to perform calculations when needed.",
|
||||||
|
"maxIterations": 10
|
||||||
|
},
|
||||||
|
"connections": {
|
||||||
|
"ai_languageModel": [
|
||||||
|
{
|
||||||
|
"node": "Anthropic Claude",
|
||||||
|
"type": "ai_languageModel",
|
||||||
|
"index": 0
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ai_tool": [
|
||||||
|
{
|
||||||
|
"node": "Code Tool - Stats",
|
||||||
|
"type": "ai_tool",
|
||||||
|
"index": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"node": "HTTP Request Tool - Data API",
|
||||||
|
"type": "ai_tool",
|
||||||
|
"index": 0
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"notes": "Example shows auto prompt type with specialized system message and analytical tools"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"node_type": "@n8n/n8n-nodes-langchain.mcpClientTool",
|
||||||
|
"display_name": "MCP Client Tool",
|
||||||
|
"examples": [
|
||||||
|
{
|
||||||
|
"name": "Filesystem MCP Tool",
|
||||||
|
"use_case": "Access filesystem operations via MCP protocol",
|
||||||
|
"complexity": "medium",
|
||||||
|
"parameters": {
|
||||||
|
"description": "Access file system operations through MCP. Can read files, list directories, create files, and search for content.",
|
||||||
|
"mcpServer": {
|
||||||
|
"transport": "stdio",
|
||||||
|
"command": "npx",
|
||||||
|
"args": ["-y", "@modelcontextprotocol/server-filesystem", "/path/to/allowed/directory"]
|
||||||
|
},
|
||||||
|
"tool": "read_file"
|
||||||
|
},
|
||||||
|
"notes": "Example shows stdio transport MCP server with filesystem access tool"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Puppeteer MCP Tool",
|
||||||
|
"use_case": "Browser automation via MCP for AI Agents",
|
||||||
|
"complexity": "complex",
|
||||||
|
"parameters": {
|
||||||
|
"description": "Control a web browser to navigate pages, take screenshots, and extract content. Useful for web scraping and automated testing.",
|
||||||
|
"mcpServer": {
|
||||||
|
"transport": "stdio",
|
||||||
|
"command": "npx",
|
||||||
|
"args": ["-y", "@modelcontextprotocol/server-puppeteer"]
|
||||||
|
},
|
||||||
|
"tool": "puppeteer_navigate"
|
||||||
|
},
|
||||||
|
"notes": "Example shows Puppeteer MCP server for browser automation"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Database MCP Tool",
|
||||||
|
"use_case": "Query databases via MCP protocol",
|
||||||
|
"complexity": "complex",
|
||||||
|
"parameters": {
|
||||||
|
"description": "Execute SQL queries and retrieve data from PostgreSQL databases. Supports SELECT, INSERT, UPDATE operations with proper escaping.",
|
||||||
|
"mcpServer": {
|
||||||
|
"transport": "sse",
|
||||||
|
"url": "https://mcp-server.example.com/database"
|
||||||
|
},
|
||||||
|
"tool": "execute_query"
|
||||||
|
},
|
||||||
|
"notes": "Example shows SSE transport MCP server for remote database access"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -376,52 +376,71 @@ class SQLJSStatement implements PreparedStatement {
|
|||||||
constructor(private stmt: any, private onModify: () => void) {}
|
constructor(private stmt: any, private onModify: () => void) {}
|
||||||
|
|
||||||
run(...params: any[]): RunResult {
|
run(...params: any[]): RunResult {
|
||||||
if (params.length > 0) {
|
try {
|
||||||
this.bindParams(params);
|
if (params.length > 0) {
|
||||||
this.stmt.bind(this.boundParams);
|
this.bindParams(params);
|
||||||
|
if (this.boundParams) {
|
||||||
|
this.stmt.bind(this.boundParams);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
this.stmt.run();
|
||||||
|
this.onModify();
|
||||||
|
|
||||||
|
// sql.js doesn't provide changes/lastInsertRowid easily
|
||||||
|
return {
|
||||||
|
changes: 1, // Assume success means 1 change
|
||||||
|
lastInsertRowid: 0
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
this.stmt.reset();
|
||||||
|
throw error;
|
||||||
}
|
}
|
||||||
|
|
||||||
this.stmt.run();
|
|
||||||
this.onModify();
|
|
||||||
|
|
||||||
// sql.js doesn't provide changes/lastInsertRowid easily
|
|
||||||
return {
|
|
||||||
changes: 0,
|
|
||||||
lastInsertRowid: 0
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
get(...params: any[]): any {
|
get(...params: any[]): any {
|
||||||
if (params.length > 0) {
|
try {
|
||||||
this.bindParams(params);
|
if (params.length > 0) {
|
||||||
}
|
this.bindParams(params);
|
||||||
|
if (this.boundParams) {
|
||||||
this.stmt.bind(this.boundParams);
|
this.stmt.bind(this.boundParams);
|
||||||
|
}
|
||||||
if (this.stmt.step()) {
|
}
|
||||||
const result = this.stmt.getAsObject();
|
|
||||||
|
if (this.stmt.step()) {
|
||||||
|
const result = this.stmt.getAsObject();
|
||||||
|
this.stmt.reset();
|
||||||
|
return this.convertIntegerColumns(result);
|
||||||
|
}
|
||||||
|
|
||||||
this.stmt.reset();
|
this.stmt.reset();
|
||||||
return this.convertIntegerColumns(result);
|
return undefined;
|
||||||
|
} catch (error) {
|
||||||
|
this.stmt.reset();
|
||||||
|
throw error;
|
||||||
}
|
}
|
||||||
|
|
||||||
this.stmt.reset();
|
|
||||||
return undefined;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
all(...params: any[]): any[] {
|
all(...params: any[]): any[] {
|
||||||
if (params.length > 0) {
|
try {
|
||||||
this.bindParams(params);
|
if (params.length > 0) {
|
||||||
|
this.bindParams(params);
|
||||||
|
if (this.boundParams) {
|
||||||
|
this.stmt.bind(this.boundParams);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const results: any[] = [];
|
||||||
|
while (this.stmt.step()) {
|
||||||
|
results.push(this.convertIntegerColumns(this.stmt.getAsObject()));
|
||||||
|
}
|
||||||
|
|
||||||
|
this.stmt.reset();
|
||||||
|
return results;
|
||||||
|
} catch (error) {
|
||||||
|
this.stmt.reset();
|
||||||
|
throw error;
|
||||||
}
|
}
|
||||||
|
|
||||||
this.stmt.bind(this.boundParams);
|
|
||||||
|
|
||||||
const results: any[] = [];
|
|
||||||
while (this.stmt.step()) {
|
|
||||||
results.push(this.convertIntegerColumns(this.stmt.getAsObject()));
|
|
||||||
}
|
|
||||||
|
|
||||||
this.stmt.reset();
|
|
||||||
return results;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
iterate(...params: any[]): IterableIterator<any> {
|
iterate(...params: any[]): IterableIterator<any> {
|
||||||
@@ -455,12 +474,18 @@ class SQLJSStatement implements PreparedStatement {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private bindParams(params: any[]): void {
|
private bindParams(params: any[]): void {
|
||||||
if (params.length === 1 && typeof params[0] === 'object' && !Array.isArray(params[0])) {
|
if (params.length === 0) {
|
||||||
|
this.boundParams = null;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (params.length === 1 && typeof params[0] === 'object' && !Array.isArray(params[0]) && params[0] !== null) {
|
||||||
// Named parameters passed as object
|
// Named parameters passed as object
|
||||||
this.boundParams = params[0];
|
this.boundParams = params[0];
|
||||||
} else {
|
} else {
|
||||||
// Positional parameters - sql.js uses array for positional
|
// Positional parameters - sql.js uses array for positional
|
||||||
this.boundParams = params;
|
// Filter out undefined values that might cause issues
|
||||||
|
this.boundParams = params.map(p => p === undefined ? null : p);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
59
src/database/migrations/add-template-node-configs.sql
Normal file
59
src/database/migrations/add-template-node-configs.sql
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
-- Migration: Add template_node_configs table
|
||||||
|
-- Run during `npm run rebuild` or `npm run fetch:templates`
|
||||||
|
-- This migration is idempotent - safe to run multiple times
|
||||||
|
|
||||||
|
-- Create table if it doesn't exist
|
||||||
|
CREATE TABLE IF NOT EXISTS template_node_configs (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
node_type TEXT NOT NULL,
|
||||||
|
template_id INTEGER NOT NULL,
|
||||||
|
template_name TEXT NOT NULL,
|
||||||
|
template_views INTEGER DEFAULT 0,
|
||||||
|
|
||||||
|
-- Node configuration (extracted from workflow)
|
||||||
|
node_name TEXT, -- Node name in workflow (e.g., "HTTP Request")
|
||||||
|
parameters_json TEXT NOT NULL, -- JSON: node.parameters
|
||||||
|
credentials_json TEXT, -- JSON: node.credentials (if present)
|
||||||
|
|
||||||
|
-- Pre-calculated metadata for filtering
|
||||||
|
has_credentials INTEGER DEFAULT 0,
|
||||||
|
has_expressions INTEGER DEFAULT 0, -- Contains {{...}} or $json/$node
|
||||||
|
complexity TEXT CHECK(complexity IN ('simple', 'medium', 'complex')),
|
||||||
|
use_cases TEXT, -- JSON array from template.metadata.use_cases
|
||||||
|
|
||||||
|
-- Pre-calculated ranking (1 = best, 2 = second best, etc.)
|
||||||
|
rank INTEGER DEFAULT 0,
|
||||||
|
|
||||||
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
FOREIGN KEY (template_id) REFERENCES templates(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Create indexes if they don't exist
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_config_node_type_rank
|
||||||
|
ON template_node_configs(node_type, rank);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_config_complexity
|
||||||
|
ON template_node_configs(node_type, complexity, rank);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_config_auth
|
||||||
|
ON template_node_configs(node_type, has_credentials, rank);
|
||||||
|
|
||||||
|
-- Create view if it doesn't exist
|
||||||
|
CREATE VIEW IF NOT EXISTS ranked_node_configs AS
|
||||||
|
SELECT
|
||||||
|
node_type,
|
||||||
|
template_name,
|
||||||
|
template_views,
|
||||||
|
parameters_json,
|
||||||
|
credentials_json,
|
||||||
|
has_credentials,
|
||||||
|
has_expressions,
|
||||||
|
complexity,
|
||||||
|
use_cases,
|
||||||
|
rank
|
||||||
|
FROM template_node_configs
|
||||||
|
WHERE rank <= 5 -- Top 5 per node type
|
||||||
|
ORDER BY node_type, rank;
|
||||||
|
|
||||||
|
-- Note: Actual data population is handled by the fetch-templates script
|
||||||
|
-- This migration only creates the schema
|
||||||
@@ -1,16 +1,18 @@
|
|||||||
import { DatabaseAdapter } from './database-adapter';
|
import { DatabaseAdapter } from './database-adapter';
|
||||||
import { ParsedNode } from '../parsers/node-parser';
|
import { ParsedNode } from '../parsers/node-parser';
|
||||||
import { SQLiteStorageService } from '../services/sqlite-storage-service';
|
import { SQLiteStorageService } from '../services/sqlite-storage-service';
|
||||||
|
import { NodeTypeNormalizer } from '../utils/node-type-normalizer';
|
||||||
|
|
||||||
export class NodeRepository {
|
export class NodeRepository {
|
||||||
private db: DatabaseAdapter;
|
private db: DatabaseAdapter;
|
||||||
|
|
||||||
constructor(dbOrService: DatabaseAdapter | SQLiteStorageService) {
|
constructor(dbOrService: DatabaseAdapter | SQLiteStorageService) {
|
||||||
if ('db' in dbOrService) {
|
if (dbOrService instanceof SQLiteStorageService) {
|
||||||
this.db = dbOrService.db;
|
this.db = dbOrService.db;
|
||||||
} else {
|
return;
|
||||||
this.db = dbOrService;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this.db = dbOrService;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -50,33 +52,30 @@ export class NodeRepository {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Get node with proper JSON deserialization
|
* Get node with proper JSON deserialization
|
||||||
|
* Automatically normalizes node type to full form for consistent lookups
|
||||||
*/
|
*/
|
||||||
getNode(nodeType: string): any {
|
getNode(nodeType: string): any {
|
||||||
|
// Normalize to full form first for consistent lookups
|
||||||
|
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||||
|
|
||||||
const row = this.db.prepare(`
|
const row = this.db.prepare(`
|
||||||
SELECT * FROM nodes WHERE node_type = ?
|
SELECT * FROM nodes WHERE node_type = ?
|
||||||
`).get(nodeType) as any;
|
`).get(normalizedType) as any;
|
||||||
|
|
||||||
|
// Fallback: try original type if normalization didn't help (e.g., community nodes)
|
||||||
|
if (!row && normalizedType !== nodeType) {
|
||||||
|
const originalRow = this.db.prepare(`
|
||||||
|
SELECT * FROM nodes WHERE node_type = ?
|
||||||
|
`).get(nodeType) as any;
|
||||||
|
|
||||||
|
if (originalRow) {
|
||||||
|
return this.parseNodeRow(originalRow);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!row) return null;
|
if (!row) return null;
|
||||||
|
|
||||||
return {
|
return this.parseNodeRow(row);
|
||||||
nodeType: row.node_type,
|
|
||||||
displayName: row.display_name,
|
|
||||||
description: row.description,
|
|
||||||
category: row.category,
|
|
||||||
developmentStyle: row.development_style,
|
|
||||||
package: row.package_name,
|
|
||||||
isAITool: Number(row.is_ai_tool) === 1,
|
|
||||||
isTrigger: Number(row.is_trigger) === 1,
|
|
||||||
isWebhook: Number(row.is_webhook) === 1,
|
|
||||||
isVersioned: Number(row.is_versioned) === 1,
|
|
||||||
version: row.version,
|
|
||||||
properties: this.safeJsonParse(row.properties_schema, []),
|
|
||||||
operations: this.safeJsonParse(row.operations, []),
|
|
||||||
credentials: this.safeJsonParse(row.credentials_required, []),
|
|
||||||
hasDocumentation: !!row.documentation,
|
|
||||||
outputs: row.outputs ? this.safeJsonParse(row.outputs, null) : null,
|
|
||||||
outputNames: row.output_names ? this.safeJsonParse(row.output_names, null) : null
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -124,10 +123,22 @@ export class NodeRepository {
|
|||||||
return rows.map(row => this.parseNodeRow(row));
|
return rows.map(row => this.parseNodeRow(row));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Legacy LIKE-based search method for direct repository usage.
|
||||||
|
*
|
||||||
|
* NOTE: MCP tools do NOT use this method. They use MCPServer.searchNodes()
|
||||||
|
* which automatically detects and uses FTS5 full-text search when available.
|
||||||
|
* See src/mcp/server.ts:1135-1148 for FTS5 implementation.
|
||||||
|
*
|
||||||
|
* This method remains for:
|
||||||
|
* - Direct repository access in scripts/benchmarks
|
||||||
|
* - Fallback when FTS5 table doesn't exist
|
||||||
|
* - Legacy compatibility
|
||||||
|
*/
|
||||||
searchNodes(query: string, mode: 'OR' | 'AND' | 'FUZZY' = 'OR', limit: number = 20): any[] {
|
searchNodes(query: string, mode: 'OR' | 'AND' | 'FUZZY' = 'OR', limit: number = 20): any[] {
|
||||||
let sql = '';
|
let sql = '';
|
||||||
const params: any[] = [];
|
const params: any[] = [];
|
||||||
|
|
||||||
if (mode === 'FUZZY') {
|
if (mode === 'FUZZY') {
|
||||||
// Simple fuzzy search
|
// Simple fuzzy search
|
||||||
sql = `
|
sql = `
|
||||||
@@ -248,4 +259,207 @@ export class NodeRepository {
|
|||||||
outputNames: row.output_names ? this.safeJsonParse(row.output_names, null) : null
|
outputNames: row.output_names ? this.safeJsonParse(row.output_names, null) : null
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get operations for a specific node, optionally filtered by resource
|
||||||
|
*/
|
||||||
|
getNodeOperations(nodeType: string, resource?: string): any[] {
|
||||||
|
const node = this.getNode(nodeType);
|
||||||
|
if (!node) return [];
|
||||||
|
|
||||||
|
const operations: any[] = [];
|
||||||
|
|
||||||
|
// Parse operations field
|
||||||
|
if (node.operations) {
|
||||||
|
if (Array.isArray(node.operations)) {
|
||||||
|
operations.push(...node.operations);
|
||||||
|
} else if (typeof node.operations === 'object') {
|
||||||
|
// Operations might be grouped by resource
|
||||||
|
if (resource && node.operations[resource]) {
|
||||||
|
return node.operations[resource];
|
||||||
|
} else {
|
||||||
|
// Return all operations
|
||||||
|
Object.values(node.operations).forEach(ops => {
|
||||||
|
if (Array.isArray(ops)) {
|
||||||
|
operations.push(...ops);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also check properties for operation fields
|
||||||
|
if (node.properties && Array.isArray(node.properties)) {
|
||||||
|
for (const prop of node.properties) {
|
||||||
|
if (prop.name === 'operation' && prop.options) {
|
||||||
|
// If resource is specified, filter by displayOptions
|
||||||
|
if (resource && prop.displayOptions?.show?.resource) {
|
||||||
|
const allowedResources = Array.isArray(prop.displayOptions.show.resource)
|
||||||
|
? prop.displayOptions.show.resource
|
||||||
|
: [prop.displayOptions.show.resource];
|
||||||
|
if (!allowedResources.includes(resource)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add operations from this property
|
||||||
|
operations.push(...prop.options);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return operations;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all resources defined for a node
|
||||||
|
*/
|
||||||
|
getNodeResources(nodeType: string): any[] {
|
||||||
|
const node = this.getNode(nodeType);
|
||||||
|
if (!node || !node.properties) return [];
|
||||||
|
|
||||||
|
const resources: any[] = [];
|
||||||
|
|
||||||
|
// Look for resource property
|
||||||
|
for (const prop of node.properties) {
|
||||||
|
if (prop.name === 'resource' && prop.options) {
|
||||||
|
resources.push(...prop.options);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resources;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get operations that are valid for a specific resource
|
||||||
|
*/
|
||||||
|
getOperationsForResource(nodeType: string, resource: string): any[] {
|
||||||
|
const node = this.getNode(nodeType);
|
||||||
|
if (!node || !node.properties) return [];
|
||||||
|
|
||||||
|
const operations: any[] = [];
|
||||||
|
|
||||||
|
// Find operation properties that are visible for this resource
|
||||||
|
for (const prop of node.properties) {
|
||||||
|
if (prop.name === 'operation' && prop.displayOptions?.show?.resource) {
|
||||||
|
const allowedResources = Array.isArray(prop.displayOptions.show.resource)
|
||||||
|
? prop.displayOptions.show.resource
|
||||||
|
: [prop.displayOptions.show.resource];
|
||||||
|
|
||||||
|
if (allowedResources.includes(resource) && prop.options) {
|
||||||
|
operations.push(...prop.options);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return operations;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all operations across all nodes (for analysis)
|
||||||
|
*/
|
||||||
|
getAllOperations(): Map<string, any[]> {
|
||||||
|
const allOperations = new Map<string, any[]>();
|
||||||
|
const nodes = this.getAllNodes();
|
||||||
|
|
||||||
|
for (const node of nodes) {
|
||||||
|
const operations = this.getNodeOperations(node.nodeType);
|
||||||
|
if (operations.length > 0) {
|
||||||
|
allOperations.set(node.nodeType, operations);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return allOperations;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all resources across all nodes (for analysis)
|
||||||
|
*/
|
||||||
|
getAllResources(): Map<string, any[]> {
|
||||||
|
const allResources = new Map<string, any[]>();
|
||||||
|
const nodes = this.getAllNodes();
|
||||||
|
|
||||||
|
for (const node of nodes) {
|
||||||
|
const resources = this.getNodeResources(node.nodeType);
|
||||||
|
if (resources.length > 0) {
|
||||||
|
allResources.set(node.nodeType, resources);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return allResources;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get default values for node properties
|
||||||
|
*/
|
||||||
|
getNodePropertyDefaults(nodeType: string): Record<string, any> {
|
||||||
|
try {
|
||||||
|
const node = this.getNode(nodeType);
|
||||||
|
if (!node || !node.properties) return {};
|
||||||
|
|
||||||
|
const defaults: Record<string, any> = {};
|
||||||
|
|
||||||
|
for (const prop of node.properties) {
|
||||||
|
if (prop.name && prop.default !== undefined) {
|
||||||
|
defaults[prop.name] = prop.default;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return defaults;
|
||||||
|
} catch (error) {
|
||||||
|
// Log error and return empty defaults rather than throwing
|
||||||
|
console.error(`Error getting property defaults for ${nodeType}:`, error);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the default operation for a specific resource
|
||||||
|
*/
|
||||||
|
getDefaultOperationForResource(nodeType: string, resource?: string): string | undefined {
|
||||||
|
try {
|
||||||
|
const node = this.getNode(nodeType);
|
||||||
|
if (!node || !node.properties) return undefined;
|
||||||
|
|
||||||
|
// Find operation property that's visible for this resource
|
||||||
|
for (const prop of node.properties) {
|
||||||
|
if (prop.name === 'operation') {
|
||||||
|
// If there's a resource dependency, check if it matches
|
||||||
|
if (resource && prop.displayOptions?.show?.resource) {
|
||||||
|
// Validate displayOptions structure
|
||||||
|
const resourceDep = prop.displayOptions.show.resource;
|
||||||
|
if (!Array.isArray(resourceDep) && typeof resourceDep !== 'string') {
|
||||||
|
continue; // Skip malformed displayOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
const allowedResources = Array.isArray(resourceDep)
|
||||||
|
? resourceDep
|
||||||
|
: [resourceDep];
|
||||||
|
|
||||||
|
if (!allowedResources.includes(resource)) {
|
||||||
|
continue; // This operation property doesn't apply to our resource
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the default value if it exists
|
||||||
|
if (prop.default !== undefined) {
|
||||||
|
return prop.default;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no default but has options, return the first option's value
|
||||||
|
if (prop.options && Array.isArray(prop.options) && prop.options.length > 0) {
|
||||||
|
const firstOption = prop.options[0];
|
||||||
|
return typeof firstOption === 'string' ? firstOption : firstOption.value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Log error and return undefined rather than throwing
|
||||||
|
// This ensures validation continues even with malformed node data
|
||||||
|
console.error(`Error getting default operation for ${nodeType}:`, error);
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
0
src/database/nodes.db
Normal file
0
src/database/nodes.db
Normal file
@@ -25,6 +25,40 @@ CREATE INDEX IF NOT EXISTS idx_package ON nodes(package_name);
|
|||||||
CREATE INDEX IF NOT EXISTS idx_ai_tool ON nodes(is_ai_tool);
|
CREATE INDEX IF NOT EXISTS idx_ai_tool ON nodes(is_ai_tool);
|
||||||
CREATE INDEX IF NOT EXISTS idx_category ON nodes(category);
|
CREATE INDEX IF NOT EXISTS idx_category ON nodes(category);
|
||||||
|
|
||||||
|
-- FTS5 full-text search index for nodes
|
||||||
|
CREATE VIRTUAL TABLE IF NOT EXISTS nodes_fts USING fts5(
|
||||||
|
node_type,
|
||||||
|
display_name,
|
||||||
|
description,
|
||||||
|
documentation,
|
||||||
|
operations,
|
||||||
|
content=nodes,
|
||||||
|
content_rowid=rowid
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Triggers to keep FTS5 in sync with nodes table
|
||||||
|
CREATE TRIGGER IF NOT EXISTS nodes_fts_insert AFTER INSERT ON nodes
|
||||||
|
BEGIN
|
||||||
|
INSERT INTO nodes_fts(rowid, node_type, display_name, description, documentation, operations)
|
||||||
|
VALUES (new.rowid, new.node_type, new.display_name, new.description, new.documentation, new.operations);
|
||||||
|
END;
|
||||||
|
|
||||||
|
CREATE TRIGGER IF NOT EXISTS nodes_fts_update AFTER UPDATE ON nodes
|
||||||
|
BEGIN
|
||||||
|
UPDATE nodes_fts
|
||||||
|
SET node_type = new.node_type,
|
||||||
|
display_name = new.display_name,
|
||||||
|
description = new.description,
|
||||||
|
documentation = new.documentation,
|
||||||
|
operations = new.operations
|
||||||
|
WHERE rowid = new.rowid;
|
||||||
|
END;
|
||||||
|
|
||||||
|
CREATE TRIGGER IF NOT EXISTS nodes_fts_delete AFTER DELETE ON nodes
|
||||||
|
BEGIN
|
||||||
|
DELETE FROM nodes_fts WHERE rowid = old.rowid;
|
||||||
|
END;
|
||||||
|
|
||||||
-- Templates table for n8n workflow templates
|
-- Templates table for n8n workflow templates
|
||||||
CREATE TABLE IF NOT EXISTS templates (
|
CREATE TABLE IF NOT EXISTS templates (
|
||||||
id INTEGER PRIMARY KEY,
|
id INTEGER PRIMARY KEY,
|
||||||
@@ -35,19 +69,79 @@ CREATE TABLE IF NOT EXISTS templates (
|
|||||||
author_username TEXT,
|
author_username TEXT,
|
||||||
author_verified INTEGER DEFAULT 0,
|
author_verified INTEGER DEFAULT 0,
|
||||||
nodes_used TEXT, -- JSON array of node types
|
nodes_used TEXT, -- JSON array of node types
|
||||||
workflow_json TEXT NOT NULL, -- Complete workflow JSON
|
workflow_json TEXT, -- Complete workflow JSON (deprecated, use workflow_json_compressed)
|
||||||
|
workflow_json_compressed TEXT, -- Compressed workflow JSON (base64 encoded gzip)
|
||||||
categories TEXT, -- JSON array of categories
|
categories TEXT, -- JSON array of categories
|
||||||
views INTEGER DEFAULT 0,
|
views INTEGER DEFAULT 0,
|
||||||
created_at DATETIME,
|
created_at DATETIME,
|
||||||
updated_at DATETIME,
|
updated_at DATETIME,
|
||||||
url TEXT,
|
url TEXT,
|
||||||
scraped_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
scraped_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
metadata_json TEXT, -- Structured metadata from OpenAI (JSON)
|
||||||
|
metadata_generated_at DATETIME -- When metadata was generated
|
||||||
);
|
);
|
||||||
|
|
||||||
-- Templates indexes
|
-- Templates indexes
|
||||||
CREATE INDEX IF NOT EXISTS idx_template_nodes ON templates(nodes_used);
|
CREATE INDEX IF NOT EXISTS idx_template_nodes ON templates(nodes_used);
|
||||||
CREATE INDEX IF NOT EXISTS idx_template_updated ON templates(updated_at);
|
CREATE INDEX IF NOT EXISTS idx_template_updated ON templates(updated_at);
|
||||||
CREATE INDEX IF NOT EXISTS idx_template_name ON templates(name);
|
CREATE INDEX IF NOT EXISTS idx_template_name ON templates(name);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_template_metadata ON templates(metadata_generated_at);
|
||||||
|
|
||||||
-- Note: FTS5 tables are created conditionally at runtime if FTS5 is supported
|
-- Pre-extracted node configurations from templates
|
||||||
-- See template-repository.ts initializeFTS5() method
|
-- This table stores the top node configurations from popular templates
|
||||||
|
-- Provides fast access to real-world configuration examples
|
||||||
|
CREATE TABLE IF NOT EXISTS template_node_configs (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
node_type TEXT NOT NULL,
|
||||||
|
template_id INTEGER NOT NULL,
|
||||||
|
template_name TEXT NOT NULL,
|
||||||
|
template_views INTEGER DEFAULT 0,
|
||||||
|
|
||||||
|
-- Node configuration (extracted from workflow)
|
||||||
|
node_name TEXT, -- Node name in workflow (e.g., "HTTP Request")
|
||||||
|
parameters_json TEXT NOT NULL, -- JSON: node.parameters
|
||||||
|
credentials_json TEXT, -- JSON: node.credentials (if present)
|
||||||
|
|
||||||
|
-- Pre-calculated metadata for filtering
|
||||||
|
has_credentials INTEGER DEFAULT 0,
|
||||||
|
has_expressions INTEGER DEFAULT 0, -- Contains {{...}} or $json/$node
|
||||||
|
complexity TEXT CHECK(complexity IN ('simple', 'medium', 'complex')),
|
||||||
|
use_cases TEXT, -- JSON array from template.metadata.use_cases
|
||||||
|
|
||||||
|
-- Pre-calculated ranking (1 = best, 2 = second best, etc.)
|
||||||
|
rank INTEGER DEFAULT 0,
|
||||||
|
|
||||||
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
FOREIGN KEY (template_id) REFERENCES templates(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes for fast queries
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_config_node_type_rank
|
||||||
|
ON template_node_configs(node_type, rank);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_config_complexity
|
||||||
|
ON template_node_configs(node_type, complexity, rank);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_config_auth
|
||||||
|
ON template_node_configs(node_type, has_credentials, rank);
|
||||||
|
|
||||||
|
-- View for easy querying of top configs
|
||||||
|
CREATE VIEW IF NOT EXISTS ranked_node_configs AS
|
||||||
|
SELECT
|
||||||
|
node_type,
|
||||||
|
template_name,
|
||||||
|
template_views,
|
||||||
|
parameters_json,
|
||||||
|
credentials_json,
|
||||||
|
has_credentials,
|
||||||
|
has_expressions,
|
||||||
|
complexity,
|
||||||
|
use_cases,
|
||||||
|
rank
|
||||||
|
FROM template_node_configs
|
||||||
|
WHERE rank <= 5 -- Top 5 per node type
|
||||||
|
ORDER BY node_type, rank;
|
||||||
|
|
||||||
|
-- Note: Template FTS5 tables are created conditionally at runtime if FTS5 is supported
|
||||||
|
-- See template-repository.ts initializeFTS5() method
|
||||||
|
-- Node FTS5 table (nodes_fts) is created above during schema initialization
|
||||||
53
src/errors/validation-service-error.ts
Normal file
53
src/errors/validation-service-error.ts
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
/**
|
||||||
|
* Custom error class for validation service failures
|
||||||
|
*/
|
||||||
|
export class ValidationServiceError extends Error {
|
||||||
|
constructor(
|
||||||
|
message: string,
|
||||||
|
public readonly nodeType?: string,
|
||||||
|
public readonly property?: string,
|
||||||
|
public readonly cause?: Error
|
||||||
|
) {
|
||||||
|
super(message);
|
||||||
|
this.name = 'ValidationServiceError';
|
||||||
|
|
||||||
|
// Maintains proper stack trace for where our error was thrown (only available on V8)
|
||||||
|
if (Error.captureStackTrace) {
|
||||||
|
Error.captureStackTrace(this, ValidationServiceError);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create error for JSON parsing failure
|
||||||
|
*/
|
||||||
|
static jsonParseError(nodeType: string, cause: Error): ValidationServiceError {
|
||||||
|
return new ValidationServiceError(
|
||||||
|
`Failed to parse JSON data for node ${nodeType}`,
|
||||||
|
nodeType,
|
||||||
|
undefined,
|
||||||
|
cause
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create error for node not found
|
||||||
|
*/
|
||||||
|
static nodeNotFound(nodeType: string): ValidationServiceError {
|
||||||
|
return new ValidationServiceError(
|
||||||
|
`Node type ${nodeType} not found in repository`,
|
||||||
|
nodeType
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create error for critical data extraction failure
|
||||||
|
*/
|
||||||
|
static dataExtractionError(nodeType: string, dataType: string, cause?: Error): ValidationServiceError {
|
||||||
|
return new ValidationServiceError(
|
||||||
|
`Failed to extract ${dataType} for node ${nodeType}`,
|
||||||
|
nodeType,
|
||||||
|
dataType,
|
||||||
|
cause
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -9,6 +9,7 @@ import { n8nDocumentationToolsFinal } from './mcp/tools';
|
|||||||
import { n8nManagementTools } from './mcp/tools-n8n-manager';
|
import { n8nManagementTools } from './mcp/tools-n8n-manager';
|
||||||
import { N8NDocumentationMCPServer } from './mcp/server';
|
import { N8NDocumentationMCPServer } from './mcp/server';
|
||||||
import { logger } from './utils/logger';
|
import { logger } from './utils/logger';
|
||||||
|
import { AuthManager } from './utils/auth';
|
||||||
import { PROJECT_VERSION } from './utils/version';
|
import { PROJECT_VERSION } from './utils/version';
|
||||||
import { isN8nApiConfigured } from './config/n8n-api';
|
import { isN8nApiConfigured } from './config/n8n-api';
|
||||||
import dotenv from 'dotenv';
|
import dotenv from 'dotenv';
|
||||||
@@ -308,15 +309,19 @@ export async function startFixedHTTPServer() {
|
|||||||
|
|
||||||
// Extract token and trim whitespace
|
// Extract token and trim whitespace
|
||||||
const token = authHeader.slice(7).trim();
|
const token = authHeader.slice(7).trim();
|
||||||
|
|
||||||
// Check if token matches
|
// SECURITY: Use timing-safe comparison to prevent timing attacks
|
||||||
if (token !== authToken) {
|
// See: https://github.com/czlonkowski/n8n-mcp/issues/265 (CRITICAL-02)
|
||||||
logger.warn('Authentication failed: Invalid token', {
|
const isValidToken = authToken &&
|
||||||
|
AuthManager.timingSafeCompare(token, authToken);
|
||||||
|
|
||||||
|
if (!isValidToken) {
|
||||||
|
logger.warn('Authentication failed: Invalid token', {
|
||||||
ip: req.ip,
|
ip: req.ip,
|
||||||
userAgent: req.get('user-agent'),
|
userAgent: req.get('user-agent'),
|
||||||
reason: 'invalid_token'
|
reason: 'invalid_token'
|
||||||
});
|
});
|
||||||
res.status(401).json({
|
res.status(401).json({
|
||||||
jsonrpc: '2.0',
|
jsonrpc: '2.0',
|
||||||
error: {
|
error: {
|
||||||
code: -32001,
|
code: -32001,
|
||||||
|
|||||||
23
src/index.ts
23
src/index.ts
@@ -10,6 +10,29 @@ export { SingleSessionHTTPServer } from './http-server-single-session';
|
|||||||
export { ConsoleManager } from './utils/console-manager';
|
export { ConsoleManager } from './utils/console-manager';
|
||||||
export { N8NDocumentationMCPServer } from './mcp/server';
|
export { N8NDocumentationMCPServer } from './mcp/server';
|
||||||
|
|
||||||
|
// Type exports for multi-tenant and library usage
|
||||||
|
export type {
|
||||||
|
InstanceContext
|
||||||
|
} from './types/instance-context';
|
||||||
|
export {
|
||||||
|
validateInstanceContext,
|
||||||
|
isInstanceContext
|
||||||
|
} from './types/instance-context';
|
||||||
|
|
||||||
|
// Session restoration types (v2.19.0)
|
||||||
|
export type {
|
||||||
|
SessionRestoreHook,
|
||||||
|
SessionRestorationOptions,
|
||||||
|
SessionState
|
||||||
|
} from './types/session-restoration';
|
||||||
|
|
||||||
|
// Re-export MCP SDK types for convenience
|
||||||
|
export type {
|
||||||
|
Tool,
|
||||||
|
CallToolResult,
|
||||||
|
ListToolsResult
|
||||||
|
} from '@modelcontextprotocol/sdk/types.js';
|
||||||
|
|
||||||
// Default export for convenience
|
// Default export for convenience
|
||||||
import N8NMCPEngine from './mcp-engine';
|
import N8NMCPEngine from './mcp-engine';
|
||||||
export default N8NMCPEngine;
|
export default N8NMCPEngine;
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
/**
|
/**
|
||||||
* N8N MCP Engine - Clean interface for service integration
|
* N8N MCP Engine - Clean interface for service integration
|
||||||
*
|
*
|
||||||
* This class provides a simple API for integrating the n8n-MCP server
|
* This class provides a simple API for integrating the n8n-MCP server
|
||||||
* into larger services. The wrapping service handles authentication,
|
* into larger services. The wrapping service handles authentication,
|
||||||
* multi-tenancy, rate limiting, etc.
|
* multi-tenancy, rate limiting, etc.
|
||||||
@@ -8,6 +8,8 @@
|
|||||||
import { Request, Response } from 'express';
|
import { Request, Response } from 'express';
|
||||||
import { SingleSessionHTTPServer } from './http-server-single-session';
|
import { SingleSessionHTTPServer } from './http-server-single-session';
|
||||||
import { logger } from './utils/logger';
|
import { logger } from './utils/logger';
|
||||||
|
import { InstanceContext } from './types/instance-context';
|
||||||
|
import { SessionRestoreHook, SessionState } from './types/session-restoration';
|
||||||
|
|
||||||
export interface EngineHealth {
|
export interface EngineHealth {
|
||||||
status: 'healthy' | 'unhealthy';
|
status: 'healthy' | 'unhealthy';
|
||||||
@@ -24,6 +26,71 @@ export interface EngineHealth {
|
|||||||
export interface EngineOptions {
|
export interface EngineOptions {
|
||||||
sessionTimeout?: number;
|
sessionTimeout?: number;
|
||||||
logLevel?: 'error' | 'warn' | 'info' | 'debug';
|
logLevel?: 'error' | 'warn' | 'info' | 'debug';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Session restoration hook for multi-tenant persistence
|
||||||
|
* Called when a client tries to use an unknown session ID
|
||||||
|
* Return instance context to restore the session, or null to reject
|
||||||
|
*
|
||||||
|
* @security IMPORTANT: Implement rate limiting in this hook to prevent abuse.
|
||||||
|
* Malicious clients could trigger excessive database lookups by sending random
|
||||||
|
* session IDs. Consider using express-rate-limit or similar middleware.
|
||||||
|
*
|
||||||
|
* @since 2.19.0
|
||||||
|
*/
|
||||||
|
onSessionNotFound?: SessionRestoreHook;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Maximum time to wait for session restoration (milliseconds)
|
||||||
|
* @default 5000 (5 seconds)
|
||||||
|
* @since 2.19.0
|
||||||
|
*/
|
||||||
|
sessionRestorationTimeout?: number;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Session lifecycle event handlers (Phase 3 - REQ-4)
|
||||||
|
*
|
||||||
|
* Optional callbacks for session lifecycle events:
|
||||||
|
* - onSessionCreated: Called when a new session is created
|
||||||
|
* - onSessionRestored: Called when a session is restored from storage
|
||||||
|
* - onSessionAccessed: Called on EVERY request (consider throttling!)
|
||||||
|
* - onSessionExpired: Called when a session expires
|
||||||
|
* - onSessionDeleted: Called when a session is manually deleted
|
||||||
|
*
|
||||||
|
* All handlers are fire-and-forget (non-blocking).
|
||||||
|
* Errors are logged but don't affect session operations.
|
||||||
|
*
|
||||||
|
* @since 2.19.0
|
||||||
|
*/
|
||||||
|
sessionEvents?: {
|
||||||
|
onSessionCreated?: (sessionId: string, instanceContext: InstanceContext) => void | Promise<void>;
|
||||||
|
onSessionRestored?: (sessionId: string, instanceContext: InstanceContext) => void | Promise<void>;
|
||||||
|
onSessionAccessed?: (sessionId: string) => void | Promise<void>;
|
||||||
|
onSessionExpired?: (sessionId: string) => void | Promise<void>;
|
||||||
|
onSessionDeleted?: (sessionId: string) => void | Promise<void>;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Number of retry attempts for failed session restoration (Phase 4 - REQ-7)
|
||||||
|
*
|
||||||
|
* When the restoration hook throws an error, the system will retry
|
||||||
|
* up to this many times with a delay between attempts.
|
||||||
|
*
|
||||||
|
* Timeout errors are NOT retried (already took too long).
|
||||||
|
* The overall timeout applies to ALL retry attempts combined.
|
||||||
|
*
|
||||||
|
* @default 0 (no retries, opt-in)
|
||||||
|
* @since 2.19.0
|
||||||
|
*/
|
||||||
|
sessionRestorationRetries?: number;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delay between retry attempts in milliseconds (Phase 4 - REQ-7)
|
||||||
|
*
|
||||||
|
* @default 100 (100 milliseconds)
|
||||||
|
* @since 2.19.0
|
||||||
|
*/
|
||||||
|
sessionRestorationRetryDelay?: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
export class N8NMCPEngine {
|
export class N8NMCPEngine {
|
||||||
@@ -31,30 +98,42 @@ export class N8NMCPEngine {
|
|||||||
private startTime: Date;
|
private startTime: Date;
|
||||||
|
|
||||||
constructor(options: EngineOptions = {}) {
|
constructor(options: EngineOptions = {}) {
|
||||||
this.server = new SingleSessionHTTPServer();
|
this.server = new SingleSessionHTTPServer(options);
|
||||||
this.startTime = new Date();
|
this.startTime = new Date();
|
||||||
|
|
||||||
if (options.logLevel) {
|
if (options.logLevel) {
|
||||||
process.env.LOG_LEVEL = options.logLevel;
|
process.env.LOG_LEVEL = options.logLevel;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Process a single MCP request
|
* Process a single MCP request with optional instance context
|
||||||
* The wrapping service handles authentication, multi-tenancy, etc.
|
* The wrapping service handles authentication, multi-tenancy, etc.
|
||||||
*
|
*
|
||||||
|
* @param req - Express request object
|
||||||
|
* @param res - Express response object
|
||||||
|
* @param instanceContext - Optional instance-specific configuration
|
||||||
|
*
|
||||||
* @example
|
* @example
|
||||||
* // In your service
|
* // Basic usage (backward compatible)
|
||||||
* const engine = new N8NMCPEngine();
|
* await engine.processRequest(req, res);
|
||||||
*
|
*
|
||||||
* app.post('/api/users/:userId/mcp', authenticate, async (req, res) => {
|
* @example
|
||||||
* // Your service handles auth, rate limiting, user context
|
* // With instance context
|
||||||
* await engine.processRequest(req, res);
|
* const context: InstanceContext = {
|
||||||
* });
|
* n8nApiUrl: 'https://instance1.n8n.cloud',
|
||||||
|
* n8nApiKey: 'instance1-key',
|
||||||
|
* instanceId: 'tenant-123'
|
||||||
|
* };
|
||||||
|
* await engine.processRequest(req, res, context);
|
||||||
*/
|
*/
|
||||||
async processRequest(req: Request, res: Response): Promise<void> {
|
async processRequest(
|
||||||
|
req: Request,
|
||||||
|
res: Response,
|
||||||
|
instanceContext?: InstanceContext
|
||||||
|
): Promise<void> {
|
||||||
try {
|
try {
|
||||||
await this.server.handleRequest(req, res);
|
await this.server.handleRequest(req, res, instanceContext);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('Engine processRequest error:', error);
|
logger.error('Engine processRequest error:', error);
|
||||||
throw error;
|
throw error;
|
||||||
@@ -84,7 +163,7 @@ export class N8NMCPEngine {
|
|||||||
total: Math.round(memoryUsage.heapTotal / 1024 / 1024),
|
total: Math.round(memoryUsage.heapTotal / 1024 / 1024),
|
||||||
unit: 'MB'
|
unit: 'MB'
|
||||||
},
|
},
|
||||||
version: '2.3.2'
|
version: '2.19.4'
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('Health check failed:', error);
|
logger.error('Health check failed:', error);
|
||||||
@@ -93,7 +172,7 @@ export class N8NMCPEngine {
|
|||||||
uptime: 0,
|
uptime: 0,
|
||||||
sessionActive: false,
|
sessionActive: false,
|
||||||
memoryUsage: { used: 0, total: 0, unit: 'MB' },
|
memoryUsage: { used: 0, total: 0, unit: 'MB' },
|
||||||
version: '2.3.2'
|
version: '2.19.4'
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -105,10 +184,118 @@ export class N8NMCPEngine {
|
|||||||
getSessionInfo(): { active: boolean; sessionId?: string; age?: number } {
|
getSessionInfo(): { active: boolean; sessionId?: string; age?: number } {
|
||||||
return this.server.getSessionInfo();
|
return this.server.getSessionInfo();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all active session IDs (Phase 2 - REQ-5)
|
||||||
|
* Returns array of currently active session IDs
|
||||||
|
*
|
||||||
|
* @returns Array of session IDs
|
||||||
|
* @since 2.19.0
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* const engine = new N8NMCPEngine();
|
||||||
|
* const sessionIds = engine.getActiveSessions();
|
||||||
|
* console.log(`Active sessions: ${sessionIds.length}`);
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
getActiveSessions(): string[] {
|
||||||
|
return this.server.getActiveSessions();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get session state for a specific session (Phase 2 - REQ-5)
|
||||||
|
* Returns session state or null if session doesn't exist
|
||||||
|
*
|
||||||
|
* @param sessionId - The session ID to get state for
|
||||||
|
* @returns SessionState object or null
|
||||||
|
* @since 2.19.0
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* const state = engine.getSessionState('session-123');
|
||||||
|
* if (state) {
|
||||||
|
* // Save to database
|
||||||
|
* await db.saveSession(state);
|
||||||
|
* }
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
getSessionState(sessionId: string): SessionState | null {
|
||||||
|
return this.server.getSessionState(sessionId);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all session states (Phase 2 - REQ-5)
|
||||||
|
* Returns array of all active session states for bulk backup
|
||||||
|
*
|
||||||
|
* @returns Array of SessionState objects
|
||||||
|
* @since 2.19.0
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* // Periodic backup every 5 minutes
|
||||||
|
* setInterval(async () => {
|
||||||
|
* const states = engine.getAllSessionStates();
|
||||||
|
* for (const state of states) {
|
||||||
|
* await database.upsertSession(state);
|
||||||
|
* }
|
||||||
|
* }, 300000);
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
getAllSessionStates(): SessionState[] {
|
||||||
|
return this.server.getAllSessionStates();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Manually restore a session (Phase 2 - REQ-5)
|
||||||
|
* Creates a session with the given ID and instance context
|
||||||
|
*
|
||||||
|
* @param sessionId - The session ID to restore
|
||||||
|
* @param instanceContext - Instance configuration
|
||||||
|
* @returns true if session was restored successfully, false otherwise
|
||||||
|
* @since 2.19.0
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* // Restore session from database
|
||||||
|
* const session = await db.loadSession('session-123');
|
||||||
|
* if (session) {
|
||||||
|
* const restored = engine.restoreSession(
|
||||||
|
* session.sessionId,
|
||||||
|
* session.instanceContext
|
||||||
|
* );
|
||||||
|
* console.log(`Restored: ${restored}`);
|
||||||
|
* }
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
restoreSession(sessionId: string, instanceContext: InstanceContext): boolean {
|
||||||
|
return this.server.manuallyRestoreSession(sessionId, instanceContext);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Manually delete a session (Phase 2 - REQ-5)
|
||||||
|
* Removes the session and cleans up resources
|
||||||
|
*
|
||||||
|
* @param sessionId - The session ID to delete
|
||||||
|
* @returns true if session was deleted, false if not found
|
||||||
|
* @since 2.19.0
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```typescript
|
||||||
|
* // Delete expired session
|
||||||
|
* const deleted = engine.deleteSession('session-123');
|
||||||
|
* if (deleted) {
|
||||||
|
* await db.deleteSession('session-123');
|
||||||
|
* }
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
deleteSession(sessionId: string): boolean {
|
||||||
|
return this.server.manuallyDeleteSession(sessionId);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Graceful shutdown for service lifecycle
|
* Graceful shutdown for service lifecycle
|
||||||
*
|
*
|
||||||
* @example
|
* @example
|
||||||
* process.on('SIGTERM', async () => {
|
* process.on('SIGTERM', async () => {
|
||||||
* await engine.shutdown();
|
* await engine.shutdown();
|
||||||
@@ -130,36 +317,39 @@ export class N8NMCPEngine {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Example usage in a multi-tenant service:
|
* Example usage with flexible instance configuration:
|
||||||
*
|
*
|
||||||
* ```typescript
|
* ```typescript
|
||||||
* import { N8NMCPEngine } from 'n8n-mcp/engine';
|
* import { N8NMCPEngine, InstanceContext } from 'n8n-mcp';
|
||||||
* import express from 'express';
|
* import express from 'express';
|
||||||
*
|
*
|
||||||
* const app = express();
|
* const app = express();
|
||||||
* const engine = new N8NMCPEngine();
|
* const engine = new N8NMCPEngine();
|
||||||
*
|
*
|
||||||
* // Middleware for authentication
|
* // Middleware for authentication
|
||||||
* const authenticate = (req, res, next) => {
|
* const authenticate = (req, res, next) => {
|
||||||
* // Your auth logic
|
* // Your auth logic
|
||||||
* req.userId = 'user123';
|
* req.userId = 'user123';
|
||||||
* next();
|
* next();
|
||||||
* };
|
* };
|
||||||
*
|
*
|
||||||
* // MCP endpoint with multi-tenant support
|
* // MCP endpoint with flexible instance support
|
||||||
* app.post('/api/mcp/:userId', authenticate, async (req, res) => {
|
* app.post('/api/instances/:instanceId/mcp', authenticate, async (req, res) => {
|
||||||
* // Log usage for billing
|
* // Get instance configuration from your database
|
||||||
* await logUsage(req.userId, 'mcp-request');
|
* const instance = await getInstanceConfig(req.params.instanceId);
|
||||||
*
|
*
|
||||||
* // Rate limiting
|
* // Create instance context
|
||||||
* if (await isRateLimited(req.userId)) {
|
* const context: InstanceContext = {
|
||||||
* return res.status(429).json({ error: 'Rate limited' });
|
* n8nApiUrl: instance.n8nUrl,
|
||||||
* }
|
* n8nApiKey: instance.apiKey,
|
||||||
*
|
* instanceId: instance.id,
|
||||||
* // Process request
|
* metadata: { userId: req.userId }
|
||||||
* await engine.processRequest(req, res);
|
* };
|
||||||
|
*
|
||||||
|
* // Process request with instance context
|
||||||
|
* await engine.processRequest(req, res, context);
|
||||||
* });
|
* });
|
||||||
*
|
*
|
||||||
* // Health endpoint
|
* // Health endpoint
|
||||||
* app.get('/health', async (req, res) => {
|
* app.get('/health', async (req, res) => {
|
||||||
* const health = await engine.healthCheck();
|
* const health = await engine.healthCheck();
|
||||||
|
|||||||
@@ -62,8 +62,12 @@ export class MCPEngine {
|
|||||||
hiddenProperties: []
|
hiddenProperties: []
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
return ConfigValidator.validate(args.nodeType, args.config, node.properties || []);
|
// CRITICAL FIX: Extract user-provided keys before validation
|
||||||
|
// This prevents false warnings about default values
|
||||||
|
const userProvidedKeys = new Set(Object.keys(args.config || {}));
|
||||||
|
|
||||||
|
return ConfigValidator.validate(args.nodeType, args.config, node.properties || [], userProvidedKeys);
|
||||||
}
|
}
|
||||||
|
|
||||||
async validateNodeMinimal(args: any) {
|
async validateNodeMinimal(args: any) {
|
||||||
@@ -89,10 +93,6 @@ export class MCPEngine {
|
|||||||
return this.repository.searchNodeProperties(args.nodeType, args.query, args.maxResults || 20);
|
return this.repository.searchNodeProperties(args.nodeType, args.query, args.maxResults || 20);
|
||||||
}
|
}
|
||||||
|
|
||||||
async getNodeForTask(args: any) {
|
|
||||||
return TaskTemplates.getTaskTemplate(args.task);
|
|
||||||
}
|
|
||||||
|
|
||||||
async listAITools(args: any) {
|
async listAITools(args: any) {
|
||||||
return this.repository.getAIToolNodes();
|
return this.repository.getAIToolNodes();
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -10,6 +10,7 @@ import { WorkflowDiffEngine } from '../services/workflow-diff-engine';
|
|||||||
import { getN8nApiClient } from './handlers-n8n-manager';
|
import { getN8nApiClient } from './handlers-n8n-manager';
|
||||||
import { N8nApiError, getUserFriendlyErrorMessage } from '../utils/n8n-errors';
|
import { N8nApiError, getUserFriendlyErrorMessage } from '../utils/n8n-errors';
|
||||||
import { logger } from '../utils/logger';
|
import { logger } from '../utils/logger';
|
||||||
|
import { InstanceContext } from '../types/instance-context';
|
||||||
|
|
||||||
// Zod schema for the diff request
|
// Zod schema for the diff request
|
||||||
const workflowDiffSchema = z.object({
|
const workflowDiffSchema = z.object({
|
||||||
@@ -21,24 +22,34 @@ const workflowDiffSchema = z.object({
|
|||||||
node: z.any().optional(),
|
node: z.any().optional(),
|
||||||
nodeId: z.string().optional(),
|
nodeId: z.string().optional(),
|
||||||
nodeName: z.string().optional(),
|
nodeName: z.string().optional(),
|
||||||
changes: z.any().optional(),
|
updates: z.any().optional(),
|
||||||
position: z.tuple([z.number(), z.number()]).optional(),
|
position: z.tuple([z.number(), z.number()]).optional(),
|
||||||
// Connection operations
|
// Connection operations
|
||||||
source: z.string().optional(),
|
source: z.string().optional(),
|
||||||
target: z.string().optional(),
|
target: z.string().optional(),
|
||||||
|
from: z.string().optional(), // For rewireConnection
|
||||||
|
to: z.string().optional(), // For rewireConnection
|
||||||
sourceOutput: z.string().optional(),
|
sourceOutput: z.string().optional(),
|
||||||
targetInput: z.string().optional(),
|
targetInput: z.string().optional(),
|
||||||
sourceIndex: z.number().optional(),
|
sourceIndex: z.number().optional(),
|
||||||
targetIndex: z.number().optional(),
|
targetIndex: z.number().optional(),
|
||||||
|
// Smart parameters (Phase 1 UX improvement)
|
||||||
|
branch: z.enum(['true', 'false']).optional(),
|
||||||
|
case: z.number().optional(),
|
||||||
|
ignoreErrors: z.boolean().optional(),
|
||||||
|
// Connection cleanup operations
|
||||||
|
dryRun: z.boolean().optional(),
|
||||||
|
connections: z.any().optional(),
|
||||||
// Metadata operations
|
// Metadata operations
|
||||||
settings: z.any().optional(),
|
settings: z.any().optional(),
|
||||||
name: z.string().optional(),
|
name: z.string().optional(),
|
||||||
tag: z.string().optional(),
|
tag: z.string().optional(),
|
||||||
})),
|
})),
|
||||||
validateOnly: z.boolean().optional(),
|
validateOnly: z.boolean().optional(),
|
||||||
|
continueOnError: z.boolean().optional(),
|
||||||
});
|
});
|
||||||
|
|
||||||
export async function handleUpdatePartialWorkflow(args: unknown): Promise<McpToolResponse> {
|
export async function handleUpdatePartialWorkflow(args: unknown, context?: InstanceContext): Promise<McpToolResponse> {
|
||||||
try {
|
try {
|
||||||
// Debug logging (only in debug mode)
|
// Debug logging (only in debug mode)
|
||||||
if (process.env.DEBUG_MCP === 'true') {
|
if (process.env.DEBUG_MCP === 'true') {
|
||||||
@@ -54,7 +65,7 @@ export async function handleUpdatePartialWorkflow(args: unknown): Promise<McpToo
|
|||||||
const input = workflowDiffSchema.parse(args);
|
const input = workflowDiffSchema.parse(args);
|
||||||
|
|
||||||
// Get API client
|
// Get API client
|
||||||
const client = getN8nApiClient();
|
const client = getN8nApiClient(context);
|
||||||
if (!client) {
|
if (!client) {
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
@@ -79,17 +90,28 @@ export async function handleUpdatePartialWorkflow(args: unknown): Promise<McpToo
|
|||||||
|
|
||||||
// Apply diff operations
|
// Apply diff operations
|
||||||
const diffEngine = new WorkflowDiffEngine();
|
const diffEngine = new WorkflowDiffEngine();
|
||||||
const diffResult = await diffEngine.applyDiff(workflow, input as WorkflowDiffRequest);
|
const diffRequest = input as WorkflowDiffRequest;
|
||||||
|
const diffResult = await diffEngine.applyDiff(workflow, diffRequest);
|
||||||
|
|
||||||
|
// Check if this is a complete failure or partial success in continueOnError mode
|
||||||
if (!diffResult.success) {
|
if (!diffResult.success) {
|
||||||
return {
|
// In continueOnError mode, partial success is still valuable
|
||||||
success: false,
|
if (diffRequest.continueOnError && diffResult.workflow && diffResult.operationsApplied && diffResult.operationsApplied > 0) {
|
||||||
error: 'Failed to apply diff operations',
|
logger.info(`continueOnError mode: Applying ${diffResult.operationsApplied} successful operations despite ${diffResult.failed?.length || 0} failures`);
|
||||||
details: {
|
// Continue to update workflow with partial changes
|
||||||
errors: diffResult.errors,
|
} else {
|
||||||
operationsApplied: diffResult.operationsApplied
|
// Complete failure - return error
|
||||||
}
|
return {
|
||||||
};
|
success: false,
|
||||||
|
error: 'Failed to apply diff operations',
|
||||||
|
details: {
|
||||||
|
errors: diffResult.errors,
|
||||||
|
operationsApplied: diffResult.operationsApplied,
|
||||||
|
applied: diffResult.applied,
|
||||||
|
failed: diffResult.failed
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If validateOnly, return validation result
|
// If validateOnly, return validation result
|
||||||
@@ -115,7 +137,10 @@ export async function handleUpdatePartialWorkflow(args: unknown): Promise<McpToo
|
|||||||
details: {
|
details: {
|
||||||
operationsApplied: diffResult.operationsApplied,
|
operationsApplied: diffResult.operationsApplied,
|
||||||
workflowId: updatedWorkflow.id,
|
workflowId: updatedWorkflow.id,
|
||||||
workflowName: updatedWorkflow.name
|
workflowName: updatedWorkflow.name,
|
||||||
|
applied: diffResult.applied,
|
||||||
|
failed: diffResult.failed,
|
||||||
|
errors: diffResult.errors
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
185
src/mcp/index.ts
185
src/mcp/index.ts
@@ -2,6 +2,10 @@
|
|||||||
|
|
||||||
import { N8NDocumentationMCPServer } from './server';
|
import { N8NDocumentationMCPServer } from './server';
|
||||||
import { logger } from '../utils/logger';
|
import { logger } from '../utils/logger';
|
||||||
|
import { TelemetryConfigManager } from '../telemetry/config-manager';
|
||||||
|
import { EarlyErrorLogger } from '../telemetry/early-error-logger';
|
||||||
|
import { STARTUP_CHECKPOINTS, findFailedCheckpoint, StartupCheckpoint } from '../telemetry/startup-checkpoints';
|
||||||
|
import { existsSync } from 'fs';
|
||||||
|
|
||||||
// Add error details to stderr for Claude Desktop debugging
|
// Add error details to stderr for Claude Desktop debugging
|
||||||
process.on('uncaughtException', (error) => {
|
process.on('uncaughtException', (error) => {
|
||||||
@@ -20,9 +24,93 @@ process.on('unhandledRejection', (reason, promise) => {
|
|||||||
process.exit(1);
|
process.exit(1);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Detects if running in a container environment (Docker, Podman, Kubernetes, etc.)
|
||||||
|
* Uses multiple detection methods for robustness:
|
||||||
|
* 1. Environment variables (IS_DOCKER, IS_CONTAINER with multiple formats)
|
||||||
|
* 2. Filesystem markers (/.dockerenv, /run/.containerenv)
|
||||||
|
*/
|
||||||
|
function isContainerEnvironment(): boolean {
|
||||||
|
// Check environment variables with multiple truthy formats
|
||||||
|
const dockerEnv = (process.env.IS_DOCKER || '').toLowerCase();
|
||||||
|
const containerEnv = (process.env.IS_CONTAINER || '').toLowerCase();
|
||||||
|
|
||||||
|
if (['true', '1', 'yes'].includes(dockerEnv)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (['true', '1', 'yes'].includes(containerEnv)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: Check filesystem markers
|
||||||
|
// /.dockerenv exists in Docker containers
|
||||||
|
// /run/.containerenv exists in Podman containers
|
||||||
|
try {
|
||||||
|
return existsSync('/.dockerenv') || existsSync('/run/.containerenv');
|
||||||
|
} catch (error) {
|
||||||
|
// If filesystem check fails, assume not in container
|
||||||
|
logger.debug('Container detection filesystem check failed:', error);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
async function main() {
|
async function main() {
|
||||||
|
// Initialize early error logger for pre-handshake error capture (v2.18.3)
|
||||||
|
// Now using singleton pattern with defensive initialization
|
||||||
|
const startTime = Date.now();
|
||||||
|
const earlyLogger = EarlyErrorLogger.getInstance();
|
||||||
|
const checkpoints: StartupCheckpoint[] = [];
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Checkpoint: Process started (fire-and-forget, no await)
|
||||||
|
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.PROCESS_STARTED);
|
||||||
|
checkpoints.push(STARTUP_CHECKPOINTS.PROCESS_STARTED);
|
||||||
|
|
||||||
|
// Handle telemetry CLI commands
|
||||||
|
const args = process.argv.slice(2);
|
||||||
|
if (args.length > 0 && args[0] === 'telemetry') {
|
||||||
|
const telemetryConfig = TelemetryConfigManager.getInstance();
|
||||||
|
const action = args[1];
|
||||||
|
|
||||||
|
switch (action) {
|
||||||
|
case 'enable':
|
||||||
|
telemetryConfig.enable();
|
||||||
|
process.exit(0);
|
||||||
|
break;
|
||||||
|
case 'disable':
|
||||||
|
telemetryConfig.disable();
|
||||||
|
process.exit(0);
|
||||||
|
break;
|
||||||
|
case 'status':
|
||||||
|
console.log(telemetryConfig.getStatus());
|
||||||
|
process.exit(0);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
console.log(`
|
||||||
|
Usage: n8n-mcp telemetry [command]
|
||||||
|
|
||||||
|
Commands:
|
||||||
|
enable Enable anonymous telemetry
|
||||||
|
disable Disable anonymous telemetry
|
||||||
|
status Show current telemetry status
|
||||||
|
|
||||||
|
Learn more: https://github.com/czlonkowski/n8n-mcp/blob/main/PRIVACY.md
|
||||||
|
`);
|
||||||
|
process.exit(args[1] ? 1 : 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const mode = process.env.MCP_MODE || 'stdio';
|
const mode = process.env.MCP_MODE || 'stdio';
|
||||||
|
|
||||||
|
// Checkpoint: Telemetry initializing (fire-and-forget, no await)
|
||||||
|
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.TELEMETRY_INITIALIZING);
|
||||||
|
checkpoints.push(STARTUP_CHECKPOINTS.TELEMETRY_INITIALIZING);
|
||||||
|
|
||||||
|
// Telemetry is already initialized by TelemetryConfigManager in imports
|
||||||
|
// Mark as ready (fire-and-forget, no await)
|
||||||
|
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.TELEMETRY_READY);
|
||||||
|
checkpoints.push(STARTUP_CHECKPOINTS.TELEMETRY_READY);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Only show debug messages in HTTP mode to avoid corrupting stdio communication
|
// Only show debug messages in HTTP mode to avoid corrupting stdio communication
|
||||||
if (mode === 'http') {
|
if (mode === 'http') {
|
||||||
@@ -30,6 +118,10 @@ async function main() {
|
|||||||
console.error('Current directory:', process.cwd());
|
console.error('Current directory:', process.cwd());
|
||||||
console.error('Node version:', process.version);
|
console.error('Node version:', process.version);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Checkpoint: MCP handshake starting (fire-and-forget, no await)
|
||||||
|
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.MCP_HANDSHAKE_STARTING);
|
||||||
|
checkpoints.push(STARTUP_CHECKPOINTS.MCP_HANDSHAKE_STARTING);
|
||||||
|
|
||||||
if (mode === 'http') {
|
if (mode === 'http') {
|
||||||
// Check if we should use the fixed implementation
|
// Check if we should use the fixed implementation
|
||||||
@@ -55,15 +147,95 @@ async function main() {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Stdio mode - for local Claude Desktop
|
// Stdio mode - for local Claude Desktop
|
||||||
const server = new N8NDocumentationMCPServer();
|
const server = new N8NDocumentationMCPServer(undefined, earlyLogger);
|
||||||
|
|
||||||
|
// Graceful shutdown handler (fixes Issue #277)
|
||||||
|
let isShuttingDown = false;
|
||||||
|
const shutdown = async (signal: string = 'UNKNOWN') => {
|
||||||
|
if (isShuttingDown) return; // Prevent multiple shutdown calls
|
||||||
|
isShuttingDown = true;
|
||||||
|
|
||||||
|
try {
|
||||||
|
logger.info(`Shutdown initiated by: ${signal}`);
|
||||||
|
|
||||||
|
await server.shutdown();
|
||||||
|
|
||||||
|
// Close stdin to signal we're done reading
|
||||||
|
if (process.stdin && !process.stdin.destroyed) {
|
||||||
|
process.stdin.pause();
|
||||||
|
process.stdin.destroy();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exit with timeout to ensure we don't hang
|
||||||
|
// Increased to 1000ms for slower systems
|
||||||
|
setTimeout(() => {
|
||||||
|
logger.warn('Shutdown timeout exceeded, forcing exit');
|
||||||
|
process.exit(0);
|
||||||
|
}, 1000).unref();
|
||||||
|
|
||||||
|
// Let the timeout handle the exit for graceful shutdown
|
||||||
|
// (removed immediate exit to allow cleanup to complete)
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Error during shutdown:', error);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Handle termination signals (fixes Issue #277)
|
||||||
|
// Signal handling strategy:
|
||||||
|
// - Claude Desktop (Windows/macOS/Linux): stdin handlers + signal handlers
|
||||||
|
// Primary: stdin close when Claude quits | Fallback: SIGTERM/SIGINT/SIGHUP
|
||||||
|
// - Container environments: signal handlers ONLY
|
||||||
|
// stdin closed in detached mode would trigger immediate shutdown
|
||||||
|
// Container detection via IS_DOCKER/IS_CONTAINER env vars + filesystem markers
|
||||||
|
// - Manual execution: Both stdin and signal handlers work
|
||||||
|
process.on('SIGTERM', () => shutdown('SIGTERM'));
|
||||||
|
process.on('SIGINT', () => shutdown('SIGINT'));
|
||||||
|
process.on('SIGHUP', () => shutdown('SIGHUP'));
|
||||||
|
|
||||||
|
// Handle stdio disconnect - PRIMARY shutdown mechanism for Claude Desktop
|
||||||
|
// Skip in container environments (Docker, Kubernetes, Podman) to prevent
|
||||||
|
// premature shutdown when stdin is closed in detached mode.
|
||||||
|
// Containers rely on signal handlers (SIGTERM/SIGINT/SIGHUP) for proper shutdown.
|
||||||
|
const isContainer = isContainerEnvironment();
|
||||||
|
|
||||||
|
if (!isContainer && process.stdin.readable && !process.stdin.destroyed) {
|
||||||
|
try {
|
||||||
|
process.stdin.on('end', () => shutdown('STDIN_END'));
|
||||||
|
process.stdin.on('close', () => shutdown('STDIN_CLOSE'));
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Failed to register stdin handlers, using signal handlers only:', error);
|
||||||
|
// Continue - signal handlers will still work
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
await server.run();
|
await server.run();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Checkpoint: MCP handshake complete (fire-and-forget, no await)
|
||||||
|
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.MCP_HANDSHAKE_COMPLETE);
|
||||||
|
checkpoints.push(STARTUP_CHECKPOINTS.MCP_HANDSHAKE_COMPLETE);
|
||||||
|
|
||||||
|
// Checkpoint: Server ready (fire-and-forget, no await)
|
||||||
|
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.SERVER_READY);
|
||||||
|
checkpoints.push(STARTUP_CHECKPOINTS.SERVER_READY);
|
||||||
|
|
||||||
|
// Log successful startup (fire-and-forget, no await)
|
||||||
|
const startupDuration = Date.now() - startTime;
|
||||||
|
earlyLogger.logStartupSuccess(checkpoints, startupDuration);
|
||||||
|
|
||||||
|
logger.info(`Server startup completed in ${startupDuration}ms (${checkpoints.length} checkpoints passed)`);
|
||||||
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
// Log startup error with checkpoint context (fire-and-forget, no await)
|
||||||
|
const failedCheckpoint = findFailedCheckpoint(checkpoints);
|
||||||
|
earlyLogger.logStartupError(failedCheckpoint, error);
|
||||||
|
|
||||||
// In stdio mode, we cannot output to console at all
|
// In stdio mode, we cannot output to console at all
|
||||||
if (mode !== 'stdio') {
|
if (mode !== 'stdio') {
|
||||||
console.error('Failed to start MCP server:', error);
|
console.error('Failed to start MCP server:', error);
|
||||||
logger.error('Failed to start MCP server', error);
|
logger.error('Failed to start MCP server', error);
|
||||||
|
|
||||||
// Provide helpful error messages
|
// Provide helpful error messages
|
||||||
if (error instanceof Error && error.message.includes('nodes.db not found')) {
|
if (error instanceof Error && error.message.includes('nodes.db not found')) {
|
||||||
console.error('\nTo fix this issue:');
|
console.error('\nTo fix this issue:');
|
||||||
@@ -77,7 +249,12 @@ async function main() {
|
|||||||
console.error('3. If that doesn\'t work, try: rm -rf node_modules && npm install');
|
console.error('3. If that doesn\'t work, try: rm -rf node_modules && npm install');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
} catch (outerError) {
|
||||||
|
// Outer error catch for early initialization failures
|
||||||
|
logger.error('Critical startup error:', outerError);
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user