mirror of
https://github.com/czlonkowski/n8n-mcp.git
synced 2026-01-30 14:32:04 +00:00
Compare commits
322 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c7e7bda505 | ||
|
|
bac4936c6d | ||
|
|
25784142fe | ||
|
|
f770043d3d | ||
|
|
1be06c217f | ||
|
|
c974947c84 | ||
|
|
ff69e4ccca | ||
|
|
9ee4b9492f | ||
|
|
4df9558b3e | ||
|
|
05424f66af | ||
|
|
5d2c5df53e | ||
|
|
f5cf1e2934 | ||
|
|
9050967cd6 | ||
|
|
717d6f927f | ||
|
|
fc37907348 | ||
|
|
47d9f55dc5 | ||
|
|
5575630711 | ||
|
|
1bbfaabbc2 | ||
|
|
597bd290b6 | ||
|
|
99c5907b71 | ||
|
|
77151e013e | ||
|
|
14f3b9c12a | ||
|
|
eb362febd6 | ||
|
|
821ace310e | ||
|
|
53252adc68 | ||
|
|
2010d77ed8 | ||
|
|
caf9383ba1 | ||
|
|
8728a808ac | ||
|
|
60ab66d64d | ||
|
|
eee52a7f53 | ||
|
|
a66cb18cce | ||
|
|
0e0f0998af | ||
|
|
08a4be8370 | ||
|
|
3578f2cc31 | ||
|
|
4d3b8fbc91 | ||
|
|
5688384113 | ||
|
|
346fa3c8d2 | ||
|
|
3d5ceae43f | ||
|
|
1834d474a5 | ||
|
|
a4ef1efaf8 | ||
|
|
65f51ad8b5 | ||
|
|
af6efe9e88 | ||
|
|
3f427f9528 | ||
|
|
18b8747005 | ||
|
|
749f1c53eb | ||
|
|
892c4ed70a | ||
|
|
590dc087ac | ||
|
|
ee7229b4db | ||
|
|
b6683b8381 | ||
|
|
b2300429fd | ||
|
|
b87f638e52 | ||
|
|
1f94427d54 | ||
|
|
2eb459c80c | ||
|
|
79ef853e8c | ||
|
|
2682be33b8 | ||
|
|
9f291154f2 | ||
|
|
bfff497020 | ||
|
|
e522aec08c | ||
|
|
817bf7d211 | ||
|
|
9a3520adb7 | ||
|
|
ced7fafcbf | ||
|
|
ad4b521402 | ||
|
|
b18f6ec7a4 | ||
|
|
95ea6ca0bb | ||
|
|
a4c7e097e8 | ||
|
|
0778c55d85 | ||
|
|
913ff31164 | ||
|
|
952a97ef73 | ||
|
|
56114f041b | ||
|
|
c52a3dd253 | ||
|
|
bc156fce2a | ||
|
|
aaa6be6d74 | ||
|
|
3806efdbd8 | ||
|
|
0e26ea6a68 | ||
|
|
1bfbf05561 | ||
|
|
f23e09934d | ||
|
|
5ea00e12a2 | ||
|
|
04e7c53b59 | ||
|
|
c7f8614de1 | ||
|
|
5702a64a01 | ||
|
|
551fea841b | ||
|
|
eac4e67101 | ||
|
|
c76ffd9fb1 | ||
|
|
7300957d13 | ||
|
|
32a25e2706 | ||
|
|
ab6b554692 | ||
|
|
32264da107 | ||
|
|
ef1cf747a3 | ||
|
|
dbdc88d629 | ||
|
|
538618b1bc | ||
|
|
41830c88fe | ||
|
|
0d2d9bdd52 | ||
|
|
05f68b8ea1 | ||
|
|
5881304ed8 | ||
|
|
0f5b0d9463 | ||
|
|
4399899255 | ||
|
|
8d20c64f5c | ||
|
|
fe1309151a | ||
|
|
dd62040155 | ||
|
|
112b40119c | ||
|
|
318986f546 | ||
|
|
aa8a6a7069 | ||
|
|
e11a885b0d | ||
|
|
ee99cb7ba1 | ||
|
|
66cb66b31b | ||
|
|
b67d6ba353 | ||
|
|
3ba5584df9 | ||
|
|
be0211d826 | ||
|
|
0d71a16f83 | ||
|
|
085f6db7a2 | ||
|
|
b6bc3b732e | ||
|
|
c16c9a2398 | ||
|
|
1d34ad81d5 | ||
|
|
4566253bdc | ||
|
|
54c598717c | ||
|
|
8b5b01de98 | ||
|
|
275e573d8d | ||
|
|
6256105053 | ||
|
|
1f43784315 | ||
|
|
80e3391773 | ||
|
|
c580a3dde4 | ||
|
|
fc8fb66900 | ||
|
|
4625ebf64d | ||
|
|
43dea68f0b | ||
|
|
dc62fd66cb | ||
|
|
a94ff0586c | ||
|
|
29b2b1d4c1 | ||
|
|
fa6ff89516 | ||
|
|
34811eaf69 | ||
|
|
52c9902efd | ||
|
|
fba8b2a490 | ||
|
|
275e4f8cef | ||
|
|
4016ac42ef | ||
|
|
b8227ff775 | ||
|
|
f61fd9b429 | ||
|
|
4b36ed6a95 | ||
|
|
f072b2e003 | ||
|
|
cfd2325ca4 | ||
|
|
978347e8d0 | ||
|
|
1b7dd3b517 | ||
|
|
c52bbcbb83 | ||
|
|
5fb63cd725 | ||
|
|
36eb8e3864 | ||
|
|
51278f52e9 | ||
|
|
6479ac2bf5 | ||
|
|
08d43bd7fb | ||
|
|
914805f5ea | ||
|
|
08a1d42f09 | ||
|
|
ae11738ac7 | ||
|
|
6e365714e2 | ||
|
|
a2cc37bdf7 | ||
|
|
cf3c66c0ea | ||
|
|
f33b626179 | ||
|
|
2113714ec2 | ||
|
|
49757e3c22 | ||
|
|
dd521d0d87 | ||
|
|
331883f944 | ||
|
|
f3164e202f | ||
|
|
8e2e1dce62 | ||
|
|
b986beef2c | ||
|
|
943f5862a3 | ||
|
|
2c536a25fd | ||
|
|
e95ac7c335 | ||
|
|
e2c8fd0125 | ||
|
|
3332eb09fc | ||
|
|
bd03412fc8 | ||
|
|
73fa494735 | ||
|
|
67d8f5d4d4 | ||
|
|
d2a250e23d | ||
|
|
710f054b93 | ||
|
|
fd65727632 | ||
|
|
5d9936a909 | ||
|
|
de95fb21ba | ||
|
|
2bcd7c757b | ||
|
|
50439e2aa1 | ||
|
|
96cb9eca0f | ||
|
|
36dc8b489c | ||
|
|
cffd5e8b2e | ||
|
|
1ad2c6f6d2 | ||
|
|
28cff8c77b | ||
|
|
0818b4d56c | ||
|
|
5e2a6bdb9c | ||
|
|
ec9d8fdb7e | ||
|
|
ddc4de8c3e | ||
|
|
c67659a7c3 | ||
|
|
4cf8bb5c98 | ||
|
|
53b5dc312d | ||
|
|
1eedb43e9f | ||
|
|
81dfbbbd77 | ||
|
|
3ba3f101b3 | ||
|
|
92eb4ef34f | ||
|
|
ccbe04f007 | ||
|
|
91ad08493c | ||
|
|
7bb021163f | ||
|
|
59ae78f03a | ||
|
|
cb224de01f | ||
|
|
fd9ea985f2 | ||
|
|
225bb06cd5 | ||
|
|
2627028be3 | ||
|
|
cc9fe69449 | ||
|
|
0144484f96 | ||
|
|
2b7bc48699 | ||
|
|
0ec02fa0da | ||
|
|
d207cc3723 | ||
|
|
eeb4b6ac3e | ||
|
|
06cbb40213 | ||
|
|
9a00a99011 | ||
|
|
36aedd5050 | ||
|
|
59f49c47ab | ||
|
|
b106550520 | ||
|
|
e1be4473a3 | ||
|
|
b12a927a10 | ||
|
|
08abdb7937 | ||
|
|
95bb002577 | ||
|
|
36e02c68d3 | ||
|
|
3078273d93 | ||
|
|
aeb74102e5 | ||
|
|
af949b09a5 | ||
|
|
44568a6edd | ||
|
|
59e4cb85ac | ||
|
|
f78f53e731 | ||
|
|
c6e0e528d1 | ||
|
|
34bafe240d | ||
|
|
f139d38c81 | ||
|
|
aeaba3b9ca | ||
|
|
a7bfa73479 | ||
|
|
ee125c52f8 | ||
|
|
f9194ee74c | ||
|
|
2a85000411 | ||
|
|
653f395666 | ||
|
|
cfe3c5e584 | ||
|
|
67c3c9c9c8 | ||
|
|
6d50cf93f0 | ||
|
|
de9f222cfe | ||
|
|
da593400d2 | ||
|
|
126d09c66b | ||
|
|
4f81962953 | ||
|
|
9e7a0e0487 | ||
|
|
a7dc07abab | ||
|
|
1c56eb0daa | ||
|
|
fcf778c79d | ||
|
|
c519cd5060 | ||
|
|
69f3a31d41 | ||
|
|
bd8a7f68ac | ||
|
|
abc6a31302 | ||
|
|
57459c27e3 | ||
|
|
9380602439 | ||
|
|
a696af8cfa | ||
|
|
b467bec93e | ||
|
|
6e042467b2 | ||
|
|
287b9aa819 | ||
|
|
3331b72df4 | ||
|
|
c0d7145a5a | ||
|
|
08e906739f | ||
|
|
ae329c3bb6 | ||
|
|
1cfbdc3bdf | ||
|
|
b3d42b3390 | ||
|
|
4feb905bd0 | ||
|
|
ad1f611d2a | ||
|
|
02574e5555 | ||
|
|
b27d245dab | ||
|
|
ecf0d50a63 | ||
|
|
1db9ecf33f | ||
|
|
fc973d83db | ||
|
|
2e19eaa309 | ||
|
|
73db3dfdfe | ||
|
|
7fcfa8f696 | ||
|
|
c8cdd3c0b5 | ||
|
|
62d01ab237 | ||
|
|
00289e90d7 | ||
|
|
5c01624c3a | ||
|
|
dad3a442d9 | ||
|
|
7a402bc7ad | ||
|
|
88e288f8f6 | ||
|
|
12a7f1e8bf | ||
|
|
2f18a2bb9a | ||
|
|
9b94e3be9c | ||
|
|
9e1a4129c0 | ||
|
|
4b764c6110 | ||
|
|
c3b691cedf | ||
|
|
4bf8f7006d | ||
|
|
2a9a3b9410 | ||
|
|
cd27d78bfd | ||
|
|
8d1ae278ee | ||
|
|
a84dbd6a15 | ||
|
|
1728495146 | ||
|
|
2305aaab9e | ||
|
|
f74427bdb5 | ||
|
|
fe59688e03 | ||
|
|
675989971c | ||
|
|
d875ac1e0c | ||
|
|
5bf1bc46e9 | ||
|
|
3bab53a3be | ||
|
|
8ffda534be | ||
|
|
0bf0e1cd74 | ||
|
|
9fb847a16f | ||
|
|
bf999232a3 | ||
|
|
59e476fdf0 | ||
|
|
711cecb90d | ||
|
|
582c9aac53 | ||
|
|
997cc93a0a | ||
|
|
2f234780dd | ||
|
|
99518f71cf | ||
|
|
fe1e3640af | ||
|
|
aef9d983e2 | ||
|
|
e252a36e3f | ||
|
|
39e13c451f | ||
|
|
a8e0b1ed34 | ||
|
|
ed7de10fd2 | ||
|
|
b7fa12667b | ||
|
|
4854a50854 | ||
|
|
cb5691f17d | ||
|
|
6d45ff8bcb | ||
|
|
64b9cf47a7 | ||
|
|
f4dff6b8e1 | ||
|
|
ec0d2e8a6e | ||
|
|
a1db133a50 | ||
|
|
d8bab6e667 | ||
|
|
3728a9cc67 | ||
|
|
47e6a7846c | ||
|
|
cabda2a0f8 | ||
|
|
34cb8f8c44 |
@@ -1,6 +1,7 @@
|
||||
---
|
||||
name: code-reviewer
|
||||
description: Use this agent when you need to review recently written or modified code for quality, security, and maintainability. This agent should be invoked immediately after code changes are made to ensure high standards are maintained. Examples:\n\n<example>\nContext: The user has created a code-review agent that should be called after writing code.\nuser: "Please write a function that validates email addresses"\nassistant: "I'll create an email validation function for you:"\n<function implementation>\nassistant: "Now let me use the code-reviewer agent to review this code for quality and security"\n<commentary>\nSince new code was just written, the code-reviewer agent should be invoked to ensure it meets quality standards.\n</commentary>\n</example>\n\n<example>\nContext: The user has modified existing code and wants it reviewed.\nuser: "I've updated the authentication logic, can you check if it's secure?"\nassistant: "I'll use the code-reviewer agent to analyze your authentication changes for security and best practices"\n<commentary>\nThe user has made changes to security-critical code, so the code-reviewer agent is the appropriate tool to ensure the modifications are secure and well-implemented.\n</commentary>\n</example>
|
||||
model: inherit
|
||||
---
|
||||
|
||||
You are a senior code reviewer with extensive experience in software engineering, security, and best practices. Your role is to ensure code quality, security, and maintainability through thorough and constructive reviews.
|
||||
|
||||
@@ -26,4 +26,8 @@ USE_NGINX=false
|
||||
# N8N_API_URL=https://your-n8n-instance.com
|
||||
# N8N_API_KEY=your-api-key-here
|
||||
# N8N_API_TIMEOUT=30000
|
||||
# N8N_API_MAX_RETRIES=3
|
||||
# N8N_API_MAX_RETRIES=3
|
||||
|
||||
# Optional: Disable specific tools (comma-separated list)
|
||||
# Example: DISABLED_TOOLS=n8n_diagnostic,n8n_health_check
|
||||
# DISABLED_TOOLS=
|
||||
85
.env.example
85
.env.example
@@ -69,6 +69,57 @@ AUTH_TOKEN=your-secure-token-here
|
||||
# Default: 0 (disabled)
|
||||
# TRUST_PROXY=0
|
||||
|
||||
# =========================
|
||||
# SECURITY CONFIGURATION
|
||||
# =========================
|
||||
|
||||
# Rate Limiting Configuration
|
||||
# Protects authentication endpoint from brute force attacks
|
||||
# Window: Time period in milliseconds (default: 900000 = 15 minutes)
|
||||
# Max: Maximum authentication attempts per IP within window (default: 20)
|
||||
# AUTH_RATE_LIMIT_WINDOW=900000
|
||||
# AUTH_RATE_LIMIT_MAX=20
|
||||
|
||||
# SSRF Protection Mode
|
||||
# Prevents webhooks from accessing internal networks and cloud metadata
|
||||
#
|
||||
# Modes:
|
||||
# - strict (default): Block localhost + private IPs + cloud metadata
|
||||
# Use for: Production deployments, cloud environments
|
||||
# Security: Maximum
|
||||
#
|
||||
# - moderate: Allow localhost, block private IPs + cloud metadata
|
||||
# Use for: Local development with local n8n instance
|
||||
# Security: Good balance
|
||||
# Example: n8n running on http://localhost:5678 or http://host.docker.internal:5678
|
||||
#
|
||||
# - permissive: Allow localhost + private IPs, block cloud metadata
|
||||
# Use for: Internal network testing, private cloud (NOT for production)
|
||||
# Security: Minimal - use with caution
|
||||
#
|
||||
# Default: strict
|
||||
# WEBHOOK_SECURITY_MODE=strict
|
||||
#
|
||||
# For local development with local n8n:
|
||||
# WEBHOOK_SECURITY_MODE=moderate
|
||||
|
||||
# Disabled Tools Configuration
|
||||
# Filter specific tools from registration at startup
|
||||
# Useful for multi-tenant deployments, security hardening, or feature flags
|
||||
#
|
||||
# Format: Comma-separated list of tool names
|
||||
# Example: DISABLED_TOOLS=n8n_diagnostic,n8n_health_check,custom_tool
|
||||
#
|
||||
# Common use cases:
|
||||
# - Multi-tenant: Hide tools that check env vars instead of instance context
|
||||
# Example: DISABLED_TOOLS=n8n_diagnostic,n8n_health_check
|
||||
# - Security: Disable management tools in production for certain users
|
||||
# - Feature flags: Gradually roll out new tools
|
||||
# - Deployment-specific: Different tool sets for cloud vs self-hosted
|
||||
#
|
||||
# Default: (empty - all tools enabled)
|
||||
# DISABLED_TOOLS=
|
||||
|
||||
# =========================
|
||||
# MULTI-TENANT CONFIGURATION
|
||||
# =========================
|
||||
@@ -132,4 +183,36 @@ ENABLE_MULTI_TENANT=false
|
||||
|
||||
# Enable metadata generation during template fetch (default: false)
|
||||
# Set to true to automatically generate metadata when running fetch:templates
|
||||
# METADATA_GENERATION_ENABLED=false
|
||||
# METADATA_GENERATION_ENABLED=false
|
||||
|
||||
# ========================================
|
||||
# INTEGRATION TESTING CONFIGURATION
|
||||
# ========================================
|
||||
# Configuration for integration tests that call real n8n instance API
|
||||
|
||||
# n8n API Configuration for Integration Tests
|
||||
# For local development: Use your local n8n instance
|
||||
# For CI: These will be provided by GitHub secrets
|
||||
# N8N_API_URL=http://localhost:5678
|
||||
# N8N_API_KEY=
|
||||
|
||||
# Pre-activated Webhook Workflows for Testing
|
||||
# These workflows must be created manually in n8n and activated
|
||||
# because n8n API doesn't support workflow activation.
|
||||
#
|
||||
# Setup Instructions:
|
||||
# 1. Create 4 workflows in n8n UI (one for each HTTP method)
|
||||
# 2. Each workflow should have a single Webhook node
|
||||
# 3. Configure webhook paths: mcp-test-get, mcp-test-post, mcp-test-put, mcp-test-delete
|
||||
# 4. ACTIVATE each workflow in n8n UI
|
||||
# 5. Copy the workflow IDs here
|
||||
#
|
||||
# N8N_TEST_WEBHOOK_GET_ID= # Workflow ID for GET method webhook
|
||||
# N8N_TEST_WEBHOOK_POST_ID= # Workflow ID for POST method webhook
|
||||
# N8N_TEST_WEBHOOK_PUT_ID= # Workflow ID for PUT method webhook
|
||||
# N8N_TEST_WEBHOOK_DELETE_ID= # Workflow ID for DELETE method webhook
|
||||
|
||||
# Test Configuration
|
||||
N8N_TEST_CLEANUP_ENABLED=true # Enable automatic cleanup of test workflows
|
||||
N8N_TEST_TAG=mcp-integration-test # Tag applied to all test workflows
|
||||
N8N_TEST_NAME_PREFIX=[MCP-TEST] # Name prefix for test workflows
|
||||
52
.github/workflows/docker-build.yml
vendored
52
.github/workflows/docker-build.yml
vendored
@@ -5,8 +5,6 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- 'v*'
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- '**.txt'
|
||||
@@ -38,6 +36,12 @@ on:
|
||||
- 'CODE_OF_CONDUCT.md'
|
||||
workflow_dispatch:
|
||||
|
||||
# Prevent concurrent Docker pushes across all workflows (shared with release.yml)
|
||||
# This ensures docker-build.yml and release.yml never push to 'latest' simultaneously
|
||||
concurrency:
|
||||
group: docker-push-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
@@ -89,16 +93,54 @@ jobs:
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
no-cache: true
|
||||
no-cache: false
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
provenance: false
|
||||
|
||||
- name: Verify multi-arch manifest for latest tag
|
||||
if: github.event_name != 'pull_request' && github.ref == 'refs/heads/main'
|
||||
run: |
|
||||
echo "Verifying multi-arch manifest for latest tag..."
|
||||
|
||||
# Retry with exponential backoff (registry propagation can take time)
|
||||
MAX_ATTEMPTS=5
|
||||
ATTEMPT=1
|
||||
WAIT_TIME=2
|
||||
|
||||
while [ $ATTEMPT -le $MAX_ATTEMPTS ]; do
|
||||
echo "Attempt $ATTEMPT of $MAX_ATTEMPTS..."
|
||||
|
||||
MANIFEST=$(docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest 2>&1 || true)
|
||||
|
||||
# Check for both platforms
|
||||
if echo "$MANIFEST" | grep -q "linux/amd64" && echo "$MANIFEST" | grep -q "linux/arm64"; then
|
||||
echo "✅ Multi-arch manifest verified: both amd64 and arm64 present"
|
||||
echo "$MANIFEST"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $ATTEMPT -lt $MAX_ATTEMPTS ]; then
|
||||
echo "⏳ Registry still propagating, waiting ${WAIT_TIME}s before retry..."
|
||||
sleep $WAIT_TIME
|
||||
WAIT_TIME=$((WAIT_TIME * 2)) # Exponential backoff: 2s, 4s, 8s, 16s
|
||||
fi
|
||||
|
||||
ATTEMPT=$((ATTEMPT + 1))
|
||||
done
|
||||
|
||||
echo "❌ ERROR: Multi-arch manifest incomplete after $MAX_ATTEMPTS attempts!"
|
||||
echo "$MANIFEST"
|
||||
exit 1
|
||||
|
||||
build-railway:
|
||||
name: Build Railway Docker Image
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
@@ -143,11 +185,13 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile.railway
|
||||
no-cache: true
|
||||
no-cache: false
|
||||
platforms: linux/amd64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta-railway.outputs.tags }}
|
||||
labels: ${{ steps.meta-railway.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
provenance: false
|
||||
|
||||
# Nginx build commented out until Phase 2
|
||||
|
||||
243
.github/workflows/release.yml
vendored
243
.github/workflows/release.yml
vendored
@@ -13,9 +13,10 @@ permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
# Prevent concurrent releases
|
||||
# Prevent concurrent Docker pushes across all workflows (shared with docker-build.yml)
|
||||
# This ensures release.yml and docker-build.yml never push to 'latest' simultaneously
|
||||
concurrency:
|
||||
group: release
|
||||
group: docker-push-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
@@ -79,53 +80,111 @@ jobs:
|
||||
echo "ℹ️ No version change detected"
|
||||
fi
|
||||
|
||||
extract-changelog:
|
||||
name: Extract Changelog
|
||||
- name: Validate version against npm registry
|
||||
if: steps.check.outputs.changed == 'true'
|
||||
run: |
|
||||
CURRENT_VERSION="${{ steps.check.outputs.version }}"
|
||||
|
||||
# Get latest version from npm (handle package not found)
|
||||
NPM_VERSION=$(npm view n8n-mcp version 2>/dev/null || echo "0.0.0")
|
||||
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
echo "NPM registry version: $NPM_VERSION"
|
||||
|
||||
# Check if version already exists in npm
|
||||
if [ "$CURRENT_VERSION" = "$NPM_VERSION" ]; then
|
||||
echo "❌ Error: Version $CURRENT_VERSION already published to npm"
|
||||
echo "Please bump the version in package.json before releasing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Simple semver comparison (assumes format: major.minor.patch)
|
||||
# Compare if current version is greater than npm version
|
||||
if [ "$NPM_VERSION" != "0.0.0" ]; then
|
||||
# Sort versions and check if current is not the highest
|
||||
HIGHEST=$(printf '%s\n%s' "$NPM_VERSION" "$CURRENT_VERSION" | sort -V | tail -n1)
|
||||
if [ "$HIGHEST" != "$CURRENT_VERSION" ]; then
|
||||
echo "❌ Error: Version $CURRENT_VERSION is not greater than npm version $NPM_VERSION"
|
||||
echo "Please use a higher version number"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "✅ Version $CURRENT_VERSION is valid (higher than npm version $NPM_VERSION)"
|
||||
|
||||
generate-release-notes:
|
||||
name: Generate Release Notes
|
||||
runs-on: ubuntu-latest
|
||||
needs: detect-version-change
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
outputs:
|
||||
release-notes: ${{ steps.extract.outputs.notes }}
|
||||
has-notes: ${{ steps.extract.outputs.has-notes }}
|
||||
release-notes: ${{ steps.generate.outputs.notes }}
|
||||
has-notes: ${{ steps.generate.outputs.has-notes }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Extract changelog for version
|
||||
id: extract
|
||||
with:
|
||||
fetch-depth: 0 # Need full history for git log
|
||||
|
||||
- name: Generate release notes from commits
|
||||
id: generate
|
||||
run: |
|
||||
VERSION="${{ needs.detect-version-change.outputs.new-version }}"
|
||||
CHANGELOG_FILE="docs/CHANGELOG.md"
|
||||
|
||||
if [ ! -f "$CHANGELOG_FILE" ]; then
|
||||
echo "Changelog file not found at $CHANGELOG_FILE"
|
||||
echo "has-notes=false" >> $GITHUB_OUTPUT
|
||||
echo "notes=No changelog entries found for version $VERSION" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Use the extracted changelog script
|
||||
if NOTES=$(node scripts/extract-changelog.js "$VERSION" "$CHANGELOG_FILE" 2>/dev/null); then
|
||||
CURRENT_VERSION="${{ needs.detect-version-change.outputs.new-version }}"
|
||||
CURRENT_TAG="v$CURRENT_VERSION"
|
||||
|
||||
# Get the previous tag (excluding the current tag which doesn't exist yet)
|
||||
PREVIOUS_TAG=$(git tag --sort=-version:refname | grep -v "^$CURRENT_TAG$" | head -1)
|
||||
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
echo "Current tag: $CURRENT_TAG"
|
||||
echo "Previous tag: $PREVIOUS_TAG"
|
||||
|
||||
if [ -z "$PREVIOUS_TAG" ]; then
|
||||
echo "ℹ️ No previous tag found, this might be the first release"
|
||||
|
||||
# Generate initial release notes using script
|
||||
if NOTES=$(node scripts/generate-initial-release-notes.js "$CURRENT_VERSION" 2>/dev/null); then
|
||||
echo "✅ Successfully generated initial release notes for version $CURRENT_VERSION"
|
||||
else
|
||||
echo "⚠️ Could not generate initial release notes for version $CURRENT_VERSION"
|
||||
NOTES="Initial release v$CURRENT_VERSION"
|
||||
fi
|
||||
|
||||
echo "has-notes=true" >> $GITHUB_OUTPUT
|
||||
|
||||
|
||||
# Use heredoc to properly handle multiline content
|
||||
{
|
||||
echo "notes<<EOF"
|
||||
echo "$NOTES"
|
||||
echo "EOF"
|
||||
} >> $GITHUB_OUTPUT
|
||||
|
||||
echo "✅ Successfully extracted changelog for version $VERSION"
|
||||
|
||||
else
|
||||
echo "has-notes=false" >> $GITHUB_OUTPUT
|
||||
echo "notes=No changelog entries found for version $VERSION" >> $GITHUB_OUTPUT
|
||||
echo "⚠️ Could not extract changelog for version $VERSION"
|
||||
echo "✅ Previous tag found: $PREVIOUS_TAG"
|
||||
|
||||
# Generate release notes between tags
|
||||
if NOTES=$(node scripts/generate-release-notes.js "$PREVIOUS_TAG" "HEAD" 2>/dev/null); then
|
||||
echo "has-notes=true" >> $GITHUB_OUTPUT
|
||||
|
||||
# Use heredoc to properly handle multiline content
|
||||
{
|
||||
echo "notes<<EOF"
|
||||
echo "$NOTES"
|
||||
echo "EOF"
|
||||
} >> $GITHUB_OUTPUT
|
||||
|
||||
echo "✅ Successfully generated release notes from $PREVIOUS_TAG to $CURRENT_TAG"
|
||||
else
|
||||
echo "has-notes=false" >> $GITHUB_OUTPUT
|
||||
echo "notes=Failed to generate release notes for version $CURRENT_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "⚠️ Could not generate release notes for version $CURRENT_VERSION"
|
||||
fi
|
||||
fi
|
||||
|
||||
create-release:
|
||||
name: Create GitHub Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [detect-version-change, extract-changelog]
|
||||
needs: [detect-version-change, generate-release-notes]
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
outputs:
|
||||
release-id: ${{ steps.create.outputs.id }}
|
||||
@@ -156,7 +215,7 @@ jobs:
|
||||
cat > release_body.md << 'EOF'
|
||||
# Release v${{ needs.detect-version-change.outputs.new-version }}
|
||||
|
||||
${{ needs.extract-changelog.outputs.release-notes }}
|
||||
${{ needs.generate-release-notes.outputs.release-notes }}
|
||||
|
||||
---
|
||||
|
||||
@@ -206,8 +265,8 @@ jobs:
|
||||
echo "id=$RELEASE_ID" >> $GITHUB_OUTPUT
|
||||
echo "upload_url=https://uploads.github.com/repos/${{ github.repository }}/releases/$RELEASE_ID/assets{?name,label}" >> $GITHUB_OUTPUT
|
||||
|
||||
build-and-test:
|
||||
name: Build and Test
|
||||
build-and-verify:
|
||||
name: Build and Verify
|
||||
runs-on: ubuntu-latest
|
||||
needs: detect-version-change
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
@@ -226,22 +285,28 @@ jobs:
|
||||
|
||||
- name: Build project
|
||||
run: npm run build
|
||||
|
||||
- name: Rebuild database
|
||||
run: npm run rebuild
|
||||
|
||||
- name: Run tests
|
||||
run: npm test
|
||||
env:
|
||||
CI: true
|
||||
|
||||
|
||||
# Database is already built and committed during development
|
||||
# Rebuilding here causes segfault due to memory pressure (exit code 139)
|
||||
- name: Verify database exists
|
||||
run: |
|
||||
if [ ! -f "data/nodes.db" ]; then
|
||||
echo "❌ Error: data/nodes.db not found"
|
||||
echo "Please run 'npm run rebuild' locally and commit the database"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Database exists ($(du -h data/nodes.db | cut -f1))"
|
||||
|
||||
# Skip tests - they already passed in PR before merge
|
||||
# Running them again on the same commit adds no safety, only time (~6-7 min)
|
||||
|
||||
- name: Run type checking
|
||||
run: npm run typecheck
|
||||
|
||||
publish-npm:
|
||||
name: Publish to NPM
|
||||
runs-on: ubuntu-latest
|
||||
needs: [detect-version-change, build-and-test, create-release]
|
||||
needs: [detect-version-change, build-and-verify, create-release]
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -259,10 +324,16 @@ jobs:
|
||||
|
||||
- name: Build project
|
||||
run: npm run build
|
||||
|
||||
- name: Rebuild database
|
||||
run: npm run rebuild
|
||||
|
||||
|
||||
# Database is already built and committed during development
|
||||
- name: Verify database exists
|
||||
run: |
|
||||
if [ ! -f "data/nodes.db" ]; then
|
||||
echo "❌ Error: data/nodes.db not found"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Database exists ($(du -h data/nodes.db | cut -f1))"
|
||||
|
||||
- name: Sync runtime version
|
||||
run: npm run sync:runtime-version
|
||||
|
||||
@@ -290,6 +361,15 @@ jobs:
|
||||
const pkg = require('./package.json');
|
||||
pkg.name = 'n8n-mcp';
|
||||
pkg.description = 'Integration between n8n workflow automation and Model Context Protocol (MCP)';
|
||||
pkg.main = 'dist/index.js';
|
||||
pkg.types = 'dist/index.d.ts';
|
||||
pkg.exports = {
|
||||
'.': {
|
||||
types: './dist/index.d.ts',
|
||||
require: './dist/index.js',
|
||||
import: './dist/index.js'
|
||||
}
|
||||
};
|
||||
pkg.bin = { 'n8n-mcp': './dist/mcp/index.js' };
|
||||
pkg.repository = { type: 'git', url: 'git+https://github.com/czlonkowski/n8n-mcp.git' };
|
||||
pkg.keywords = ['n8n', 'mcp', 'model-context-protocol', 'ai', 'workflow', 'automation'];
|
||||
@@ -324,7 +404,7 @@ jobs:
|
||||
build-docker:
|
||||
name: Build and Push Docker Images
|
||||
runs-on: ubuntu-latest
|
||||
needs: [detect-version-change, build-and-test]
|
||||
needs: [detect-version-change, build-and-verify]
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -382,7 +462,76 @@ jobs:
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
|
||||
- name: Verify multi-arch manifest for latest tag
|
||||
run: |
|
||||
echo "Verifying multi-arch manifest for latest tag..."
|
||||
|
||||
# Retry with exponential backoff (registry propagation can take time)
|
||||
MAX_ATTEMPTS=5
|
||||
ATTEMPT=1
|
||||
WAIT_TIME=2
|
||||
|
||||
while [ $ATTEMPT -le $MAX_ATTEMPTS ]; do
|
||||
echo "Attempt $ATTEMPT of $MAX_ATTEMPTS..."
|
||||
|
||||
MANIFEST=$(docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest 2>&1 || true)
|
||||
|
||||
# Check for both platforms
|
||||
if echo "$MANIFEST" | grep -q "linux/amd64" && echo "$MANIFEST" | grep -q "linux/arm64"; then
|
||||
echo "✅ Multi-arch manifest verified: both amd64 and arm64 present"
|
||||
echo "$MANIFEST"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $ATTEMPT -lt $MAX_ATTEMPTS ]; then
|
||||
echo "⏳ Registry still propagating, waiting ${WAIT_TIME}s before retry..."
|
||||
sleep $WAIT_TIME
|
||||
WAIT_TIME=$((WAIT_TIME * 2)) # Exponential backoff: 2s, 4s, 8s, 16s
|
||||
fi
|
||||
|
||||
ATTEMPT=$((ATTEMPT + 1))
|
||||
done
|
||||
|
||||
echo "❌ ERROR: Multi-arch manifest incomplete after $MAX_ATTEMPTS attempts!"
|
||||
echo "$MANIFEST"
|
||||
exit 1
|
||||
|
||||
- name: Verify multi-arch manifest for version tag
|
||||
run: |
|
||||
VERSION="${{ needs.detect-version-change.outputs.new-version }}"
|
||||
echo "Verifying multi-arch manifest for version tag :$VERSION (without 'v' prefix)..."
|
||||
|
||||
# Retry with exponential backoff (registry propagation can take time)
|
||||
MAX_ATTEMPTS=5
|
||||
ATTEMPT=1
|
||||
WAIT_TIME=2
|
||||
|
||||
while [ $ATTEMPT -le $MAX_ATTEMPTS ]; do
|
||||
echo "Attempt $ATTEMPT of $MAX_ATTEMPTS..."
|
||||
|
||||
MANIFEST=$(docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:$VERSION 2>&1 || true)
|
||||
|
||||
# Check for both platforms
|
||||
if echo "$MANIFEST" | grep -q "linux/amd64" && echo "$MANIFEST" | grep -q "linux/arm64"; then
|
||||
echo "✅ Multi-arch manifest verified for $VERSION: both amd64 and arm64 present"
|
||||
echo "$MANIFEST"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $ATTEMPT -lt $MAX_ATTEMPTS ]; then
|
||||
echo "⏳ Registry still propagating, waiting ${WAIT_TIME}s before retry..."
|
||||
sleep $WAIT_TIME
|
||||
WAIT_TIME=$((WAIT_TIME * 2)) # Exponential backoff: 2s, 4s, 8s, 16s
|
||||
fi
|
||||
|
||||
ATTEMPT=$((ATTEMPT + 1))
|
||||
done
|
||||
|
||||
echo "❌ ERROR: Multi-arch manifest incomplete for version $VERSION after $MAX_ATTEMPTS attempts!"
|
||||
echo "$MANIFEST"
|
||||
exit 1
|
||||
|
||||
- name: Extract metadata for Railway image
|
||||
id: meta-railway
|
||||
uses: docker/metadata-action@v5
|
||||
|
||||
6
.github/workflows/test.yml
vendored
6
.github/workflows/test.yml
vendored
@@ -72,6 +72,12 @@ jobs:
|
||||
run: npm run test:integration -- --reporter=default --reporter=junit
|
||||
env:
|
||||
CI: true
|
||||
N8N_API_URL: ${{ secrets.N8N_API_URL }}
|
||||
N8N_API_KEY: ${{ secrets.N8N_API_KEY }}
|
||||
N8N_TEST_WEBHOOK_GET_URL: ${{ secrets.N8N_TEST_WEBHOOK_GET_URL }}
|
||||
N8N_TEST_WEBHOOK_POST_URL: ${{ secrets.N8N_TEST_WEBHOOK_POST_URL }}
|
||||
N8N_TEST_WEBHOOK_PUT_URL: ${{ secrets.N8N_TEST_WEBHOOK_PUT_URL }}
|
||||
N8N_TEST_WEBHOOK_DELETE_URL: ${{ secrets.N8N_TEST_WEBHOOK_DELETE_URL }}
|
||||
|
||||
# Generate test summary
|
||||
- name: Generate test summary
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -93,6 +93,9 @@ tmp/
|
||||
docs/batch_*.jsonl
|
||||
**/batch_*_error.jsonl
|
||||
|
||||
# Local documentation and analysis files
|
||||
docs/local/
|
||||
|
||||
# Database files
|
||||
# Database files - nodes.db is now tracked directly
|
||||
# data/*.db
|
||||
|
||||
209
ANALYSIS_QUICK_REFERENCE.md
Normal file
209
ANALYSIS_QUICK_REFERENCE.md
Normal file
@@ -0,0 +1,209 @@
|
||||
# N8N-MCP Validation Analysis: Quick Reference
|
||||
|
||||
**Analysis Date**: November 8, 2025 | **Data Period**: 90 days | **Sample Size**: 29,218 events
|
||||
|
||||
---
|
||||
|
||||
## The Core Finding
|
||||
|
||||
**Validation is working perfectly. Guidance is the problem.**
|
||||
|
||||
- 29,218 validation events successfully prevented bad deployments
|
||||
- 100% of agents fix errors same-day (proving feedback works)
|
||||
- 12.6% error rate for advanced users (who attempt complex workflows)
|
||||
- High error volume = high usage, not broken system
|
||||
|
||||
---
|
||||
|
||||
## Top 3 Problem Areas (75% of errors)
|
||||
|
||||
| Area | Errors | Root Cause | Quick Fix |
|
||||
|------|--------|-----------|-----------|
|
||||
| **Workflow Structure** | 1,268 (26%) | JSON malformation | Better error messages with examples |
|
||||
| **Connections** | 676 (14%) | Syntax unintuitive | Create connections guide with diagrams |
|
||||
| **Required Fields** | 378 (8%) | Not marked upfront | Add "⚠️ REQUIRED" to tool responses |
|
||||
|
||||
---
|
||||
|
||||
## Problem Nodes (By Frequency)
|
||||
|
||||
```
|
||||
Webhook/Trigger ......... 127 failures (40 users)
|
||||
Slack .................. 73 failures (2 users)
|
||||
AI Agent ............... 36 failures (20 users)
|
||||
HTTP Request ........... 31 failures (13 users)
|
||||
OpenAI ................. 35 failures (8 users)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Top 5 Validation Errors
|
||||
|
||||
1. **"Duplicate node ID: undefined"** (179)
|
||||
- Fix: Point to exact location + show example format
|
||||
|
||||
2. **"Single-node workflows only valid for webhooks"** (58)
|
||||
- Fix: Create webhook guide explaining rule
|
||||
|
||||
3. **"responseNode requires onError: continueRegularOutput"** (57)
|
||||
- Fix: Same guide + inline error context
|
||||
|
||||
4. **"Required property X cannot be empty"** (25)
|
||||
- Fix: Mark required fields before validation
|
||||
|
||||
5. **"Duplicate node name: undefined"** (61)
|
||||
- Fix: Related to structural issues, same solution as #1
|
||||
|
||||
---
|
||||
|
||||
## Success Indicators
|
||||
|
||||
✓ **Agents learn from errors**: 100% same-day correction rate
|
||||
✓ **Validation catches issues**: Prevents bad deployments
|
||||
✓ **Feedback is clear**: Quick fixes show error messages work
|
||||
✓ **No systemic failures**: No "unfixable" errors
|
||||
|
||||
---
|
||||
|
||||
## What Works Well
|
||||
|
||||
- Error messages lead to immediate corrections
|
||||
- Agents retry and succeed same-day
|
||||
- Validation prevents broken workflows
|
||||
- 9,021 users actively using system
|
||||
|
||||
---
|
||||
|
||||
## What Needs Improvement
|
||||
|
||||
1. Required fields not marked in tool responses
|
||||
2. Error messages don't show valid options for enums
|
||||
3. Workflow structure documentation lacks examples
|
||||
4. Connection syntax unintuitive/undocumented
|
||||
5. Some error messages too generic
|
||||
|
||||
---
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1 (2 weeks): Quick Wins
|
||||
- Enhanced error messages (location + example)
|
||||
- Required field markers in tools
|
||||
- Webhook configuration guide
|
||||
- **Expected Impact**: 25-30% failure reduction
|
||||
|
||||
### Phase 2 (2 weeks): Documentation
|
||||
- Enum value suggestions in validation
|
||||
- Workflow connections guide
|
||||
- Error handler configuration guide
|
||||
- AI Agent validation improvements
|
||||
- **Expected Impact**: Additional 15-20% reduction
|
||||
|
||||
### Phase 3 (2 weeks): Advanced Features
|
||||
- Improved search with config hints
|
||||
- Node type fuzzy matching
|
||||
- KPI tracking setup
|
||||
- Test coverage
|
||||
- **Expected Impact**: Additional 10-15% reduction
|
||||
|
||||
**Total Impact**: 50-65% failure reduction (target: 6-7% error rate)
|
||||
|
||||
---
|
||||
|
||||
## Key Metrics
|
||||
|
||||
| Metric | Current | Target | Timeline |
|
||||
|--------|---------|--------|----------|
|
||||
| Validation failure rate | 12.6% | 6-7% | 6 weeks |
|
||||
| First-attempt success | ~77% | 85%+ | 6 weeks |
|
||||
| Retry success | 100% | 100% | N/A |
|
||||
| Webhook failures | 127 | <30 | Week 2 |
|
||||
| Connection errors | 676 | <270 | Week 4 |
|
||||
|
||||
---
|
||||
|
||||
## Files Delivered
|
||||
|
||||
1. **VALIDATION_ANALYSIS_REPORT.md** (27KB)
|
||||
- Complete analysis with 16 SQL queries
|
||||
- Detailed findings by category
|
||||
- 8 actionable recommendations
|
||||
|
||||
2. **VALIDATION_ANALYSIS_SUMMARY.md** (13KB)
|
||||
- Executive summary (one-page)
|
||||
- Key metrics scorecard
|
||||
- Top recommendations with ROI
|
||||
|
||||
3. **IMPLEMENTATION_ROADMAP.md** (4.3KB)
|
||||
- 6-week implementation plan
|
||||
- Phase-by-phase breakdown
|
||||
- Code locations and effort estimates
|
||||
|
||||
4. **ANALYSIS_QUICK_REFERENCE.md** (this file)
|
||||
- Quick lookup reference
|
||||
- Top problems at a glance
|
||||
- Decision-making summary
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **Week 1**: Review analysis + get team approval
|
||||
2. **Week 2**: Start Phase 1 (error messages + markers)
|
||||
3. **Week 4**: Deploy Phase 1 + start Phase 2
|
||||
4. **Week 6**: Deploy Phase 2 + start Phase 3
|
||||
5. **Week 8**: Deploy Phase 3 + measure impact
|
||||
6. **Week 9+**: Monitor KPIs + iterate
|
||||
|
||||
---
|
||||
|
||||
## Key Recommendations Priority
|
||||
|
||||
### HIGH (Do First - Week 1-2)
|
||||
1. Enhance structure error messages
|
||||
2. Add required field markers to tools
|
||||
3. Create webhook configuration guide
|
||||
|
||||
### MEDIUM (Do Next - Week 3-4)
|
||||
4. Add enum suggestions to validation responses
|
||||
5. Create workflow connections guide
|
||||
6. Add AI Agent node validation
|
||||
|
||||
### LOW (Do Later - Week 5-6)
|
||||
7. Enhance search with config hints
|
||||
8. Build fuzzy node matcher
|
||||
9. Setup KPI tracking
|
||||
|
||||
---
|
||||
|
||||
## Discussion Points
|
||||
|
||||
**Q: Why don't we just weaken validation?**
|
||||
A: Validation prevents 29,218 bad deployments. That's its job. We improve guidance instead.
|
||||
|
||||
**Q: Are agents really learning from errors?**
|
||||
A: Yes, 100% same-day recovery across 661 user-date pairs with errors.
|
||||
|
||||
**Q: Why do documentation readers have higher error rates?**
|
||||
A: They attempt more complex workflows (6.8x more attempts). Success rate is still 87.4%.
|
||||
|
||||
**Q: Which node needs the most help?**
|
||||
A: Webhook/Trigger configuration (127 failures). Most urgent fix.
|
||||
|
||||
**Q: Can we hit 50% reduction in 6 weeks?**
|
||||
A: Yes, analysis shows 50-65% reduction is achievable with these changes.
|
||||
|
||||
---
|
||||
|
||||
## Contact & Questions
|
||||
|
||||
For detailed information:
|
||||
- Full analysis: `VALIDATION_ANALYSIS_REPORT.md`
|
||||
- Executive summary: `VALIDATION_ANALYSIS_SUMMARY.md`
|
||||
- Implementation plan: `IMPLEMENTATION_ROADMAP.md`
|
||||
|
||||
---
|
||||
|
||||
**Report Status**: Complete and Ready for Action
|
||||
**Confidence Level**: High (9,021 users, 29,218 events, comprehensive analysis)
|
||||
**Generated**: November 8, 2025
|
||||
5893
CHANGELOG.md
5893
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
44
CLAUDE.md
44
CLAUDE.md
@@ -28,8 +28,15 @@ src/
|
||||
│ ├── enhanced-config-validator.ts # Operation-aware validation (NEW in v2.4.2)
|
||||
│ ├── node-specific-validators.ts # Node-specific validation logic (NEW in v2.4.2)
|
||||
│ ├── property-dependencies.ts # Dependency analysis (NEW in v2.4)
|
||||
│ ├── type-structure-service.ts # Type structure validation (NEW in v2.22.21)
|
||||
│ ├── expression-validator.ts # n8n expression syntax validation (NEW in v2.5.0)
|
||||
│ └── workflow-validator.ts # Complete workflow validation (NEW in v2.5.0)
|
||||
├── types/
|
||||
│ ├── type-structures.ts # Type structure definitions (NEW in v2.22.21)
|
||||
│ ├── instance-context.ts # Multi-tenant instance configuration
|
||||
│ └── session-state.ts # Session persistence types (NEW in v2.24.1)
|
||||
├── constants/
|
||||
│ └── type-structures.ts # 22 complete type structures (NEW in v2.22.21)
|
||||
├── templates/
|
||||
│ ├── template-fetcher.ts # Fetches templates from n8n.io API (NEW in v2.4.1)
|
||||
│ ├── template-repository.ts # Template database operations (NEW in v2.4.1)
|
||||
@@ -40,6 +47,7 @@ src/
|
||||
│ ├── test-nodes.ts # Critical node tests
|
||||
│ ├── test-essentials.ts # Test new essentials tools (NEW in v2.4)
|
||||
│ ├── test-enhanced-validation.ts # Test enhanced validation (NEW in v2.4.2)
|
||||
│ ├── test-structure-validation.ts # Test type structure validation (NEW in v2.22.21)
|
||||
│ ├── test-workflow-validation.ts # Test workflow validation (NEW in v2.5.0)
|
||||
│ ├── test-ai-workflow-validation.ts # Test AI workflow validation (NEW in v2.5.1)
|
||||
│ ├── test-mcp-tools.ts # Test MCP tool enhancements (NEW in v2.5.1)
|
||||
@@ -58,7 +66,9 @@ src/
|
||||
│ ├── console-manager.ts # Console output isolation (NEW in v2.3.1)
|
||||
│ └── logger.ts # Logging utility with HTTP awareness
|
||||
├── http-server-single-session.ts # Single-session HTTP server (NEW in v2.3.1)
|
||||
│ # Session persistence API (NEW in v2.24.1)
|
||||
├── mcp-engine.ts # Clean API for service integration (NEW in v2.3.1)
|
||||
│ # Session persistence wrappers (NEW in v2.24.1)
|
||||
└── index.ts # Library exports
|
||||
```
|
||||
|
||||
@@ -76,6 +86,7 @@ npm run test:unit # Run unit tests only
|
||||
npm run test:integration # Run integration tests
|
||||
npm run test:coverage # Run tests with coverage report
|
||||
npm run test:watch # Run tests in watch mode
|
||||
npm run test:structure-validation # Test type structure validation (Phase 3)
|
||||
|
||||
# Run a single test file
|
||||
npm test -- tests/unit/services/property-filter.test.ts
|
||||
@@ -126,6 +137,7 @@ npm run test:templates # Test template functionality
|
||||
4. **Service Layer** (`services/`)
|
||||
- **Property Filter**: Reduces node properties to AI-friendly essentials
|
||||
- **Config Validator**: Multi-profile validation system
|
||||
- **Type Structure Service**: Validates complex type structures (filter, resourceMapper, etc.)
|
||||
- **Expression Validator**: Validates n8n expression syntax
|
||||
- **Workflow Validator**: Complete workflow structure validation
|
||||
|
||||
@@ -183,6 +195,35 @@ The MCP server exposes tools in several categories:
|
||||
### Development Best Practices
|
||||
- Run typecheck and lint after every code change
|
||||
|
||||
### Session Persistence Feature (v2.24.1)
|
||||
|
||||
**Location:**
|
||||
- Types: `src/types/session-state.ts`
|
||||
- Implementation: `src/http-server-single-session.ts` (lines 698-702, 1444-1584)
|
||||
- Wrapper: `src/mcp-engine.ts` (lines 123-169)
|
||||
- Tests: `tests/unit/http-server/session-persistence.test.ts`, `tests/unit/mcp-engine/session-persistence.test.ts`
|
||||
|
||||
**Key Features:**
|
||||
- **Export/Restore API**: `exportSessionState()` and `restoreSessionState()` methods
|
||||
- **Multi-tenant support**: Enables zero-downtime deployments for SaaS platforms
|
||||
- **Security-first**: API keys exported as plaintext - downstream MUST encrypt
|
||||
- **Dormant sessions**: Restored sessions recreate transports on first request
|
||||
- **Automatic expiration**: Respects `sessionTimeout` setting (default 30 min)
|
||||
- **MAX_SESSIONS limit**: Caps at 100 concurrent sessions
|
||||
|
||||
**Important Implementation Notes:**
|
||||
- Only exports sessions with valid n8nApiUrl and n8nApiKey in context
|
||||
- Skips expired sessions during both export and restore
|
||||
- Uses `validateInstanceContext()` for data integrity checks
|
||||
- Handles null/invalid session gracefully with warnings
|
||||
- Session metadata (timestamps) and context (credentials) are persisted
|
||||
- Transport and server objects are NOT persisted (recreated on-demand)
|
||||
|
||||
**Testing:**
|
||||
- 22 unit tests covering export, restore, edge cases, and round-trip cycles
|
||||
- Tests use current timestamps to avoid expiration issues
|
||||
- Integration with multi-tenant backends documented in README.md
|
||||
|
||||
# important-instruction-reminders
|
||||
Do what has been asked; nothing more, nothing less.
|
||||
NEVER create files unless they're absolutely necessary for achieving your goal.
|
||||
@@ -192,4 +233,5 @@ NEVER proactively create documentation files (*.md) or README files. Only create
|
||||
- When the user asks to review issues, you should use GH CLI to get the issue and all the comments
|
||||
- When the task can be divided into separated subtasks, you should spawn separate sub-agents to handle them in paralel
|
||||
- Use the best sub-agent for the task as per their descriptions
|
||||
- Do not use hyperbolic or dramatic language in comments and documentation
|
||||
- Do not use hyperbolic or dramatic language in comments and documentation
|
||||
- Add to every commit and PR: Concieved by Romuald Członkowski - and then link to www.aiadvisors.pl/en. Don't add it in conversations
|
||||
12
Dockerfile
12
Dockerfile
@@ -34,9 +34,13 @@ RUN apk add --no-cache curl su-exec && \
|
||||
# Copy runtime-only package.json
|
||||
COPY package.runtime.json package.json
|
||||
|
||||
# Install runtime dependencies with cache mount
|
||||
# Install runtime dependencies with better-sqlite3 compilation
|
||||
# Build tools (python3, make, g++) are installed, used for compilation, then removed
|
||||
# This enables native SQLite (better-sqlite3) instead of sql.js, preventing memory leaks
|
||||
RUN --mount=type=cache,target=/root/.npm \
|
||||
npm install --production --no-audit --no-fund
|
||||
apk add --no-cache python3 make g++ && \
|
||||
npm install --production --no-audit --no-fund && \
|
||||
apk del python3 make g++
|
||||
|
||||
# Copy built application
|
||||
COPY --from=builder /app/dist ./dist
|
||||
@@ -78,7 +82,7 @@ ENV IS_DOCKER=true
|
||||
# To opt-out, uncomment the following line:
|
||||
# ENV N8N_MCP_TELEMETRY_DISABLED=true
|
||||
|
||||
# Expose HTTP port
|
||||
# Expose HTTP port (default 3000, configurable via PORT environment variable at runtime)
|
||||
EXPOSE 3000
|
||||
|
||||
# Set stop signal to SIGTERM (default, but explicit is better)
|
||||
@@ -86,7 +90,7 @@ STOPSIGNAL SIGTERM
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://127.0.0.1:3000/health || exit 1
|
||||
CMD sh -c 'curl -f http://127.0.0.1:${PORT:-3000}/health || exit 1'
|
||||
|
||||
# Optimized entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
|
||||
|
||||
@@ -25,16 +25,20 @@ RUN npm run build
|
||||
FROM node:22-alpine AS runtime
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
RUN apk add --no-cache curl python3 make g++ && \
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache curl && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
# Copy runtime-only package.json
|
||||
COPY package.runtime.json package.json
|
||||
|
||||
# Install only production dependencies
|
||||
RUN npm install --production --no-audit --no-fund && \
|
||||
npm cache clean --force
|
||||
# Install production dependencies with temporary build tools
|
||||
# Build tools (python3, make, g++) enable better-sqlite3 compilation (native SQLite)
|
||||
# They are removed after installation to reduce image size and attack surface
|
||||
RUN apk add --no-cache python3 make g++ && \
|
||||
npm install --production --no-audit --no-fund && \
|
||||
npm cache clean --force && \
|
||||
apk del python3 make g++
|
||||
|
||||
# Copy built application from builder stage
|
||||
COPY --from=builder /app/dist ./dist
|
||||
|
||||
@@ -1,5 +1,87 @@
|
||||
# n8n Update Process - Quick Reference
|
||||
|
||||
## ⚡ Recommended Fast Workflow (2025-11-04)
|
||||
|
||||
**CRITICAL FIRST STEP**: Check existing releases to avoid version conflicts!
|
||||
|
||||
```bash
|
||||
# 1. CHECK EXISTING RELEASES FIRST (prevents version conflicts!)
|
||||
gh release list | head -5
|
||||
# Look at the latest version - your new version must be higher!
|
||||
|
||||
# 2. Switch to main and pull
|
||||
git checkout main && git pull
|
||||
|
||||
# 3. Check for updates (dry run)
|
||||
npm run update:n8n:check
|
||||
|
||||
# 4. Run update and skip tests (we'll test in CI)
|
||||
yes y | npm run update:n8n
|
||||
|
||||
# 5. Create feature branch
|
||||
git checkout -b update/n8n-X.X.X
|
||||
|
||||
# 6. Update version in package.json (must be HIGHER than latest release!)
|
||||
# Edit: "version": "2.XX.X" (not the version from the release list!)
|
||||
|
||||
# 7. Update CHANGELOG.md
|
||||
# - Change version number to match package.json
|
||||
# - Update date to today
|
||||
# - Update dependency versions
|
||||
|
||||
# 8. Update README badge
|
||||
# Edit line 8: Change n8n version badge to new n8n version
|
||||
|
||||
# 9. Commit and push
|
||||
git add -A
|
||||
git commit -m "chore: update n8n to X.X.X and bump version to 2.XX.X
|
||||
|
||||
- Updated n8n from X.X.X to X.X.X
|
||||
- Updated n8n-core from X.X.X to X.X.X
|
||||
- Updated n8n-workflow from X.X.X to X.X.X
|
||||
- Updated @n8n/n8n-nodes-langchain from X.X.X to X.X.X
|
||||
- Rebuilt node database with XXX nodes (XXX from n8n-nodes-base, XXX from @n8n/n8n-nodes-langchain)
|
||||
- Updated README badge with new n8n version
|
||||
- Updated CHANGELOG with dependency changes
|
||||
|
||||
Conceived by Romuald Członkowski - https://www.aiadvisors.pl/en
|
||||
|
||||
🤖 Generated with [Claude Code](https://claude.com/claude-code)
|
||||
|
||||
Co-Authored-By: Claude <noreply@anthropic.com>"
|
||||
|
||||
git push -u origin update/n8n-X.X.X
|
||||
|
||||
# 10. Create PR
|
||||
gh pr create --title "chore: update n8n to X.X.X" --body "Updates n8n and all related dependencies to the latest versions..."
|
||||
|
||||
# 11. After PR is merged, verify release triggered
|
||||
gh release list | head -1
|
||||
# If the new version appears, you're done!
|
||||
# If not, the version might have already been released - bump version again and create new PR
|
||||
```
|
||||
|
||||
### Why This Workflow?
|
||||
|
||||
✅ **Fast**: Skip local tests (2-3 min saved) - CI runs them anyway
|
||||
✅ **Safe**: Unit tests in CI verify compatibility
|
||||
✅ **Clean**: All changes in one PR with proper tracking
|
||||
✅ **Automatic**: Release workflow triggers on merge if version is new
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Problem**: Release workflow doesn't trigger after merge
|
||||
**Cause**: Version number was already released (check `gh release list`)
|
||||
**Solution**: Create new PR bumping version by one patch number
|
||||
|
||||
**Problem**: Integration tests fail in CI with "unauthorized"
|
||||
**Cause**: n8n test instance credentials expired (infrastructure issue)
|
||||
**Solution**: Ignore if unit tests pass - this is not a code problem
|
||||
|
||||
**Problem**: CI takes 8+ minutes
|
||||
**Reason**: Integration tests need live n8n instance (slow)
|
||||
**Normal**: Unit tests (~2 min) + integration tests (~6 min) = ~8 min total
|
||||
|
||||
## Quick One-Command Update
|
||||
|
||||
For a complete update with tests and publish preparation:
|
||||
@@ -99,12 +181,14 @@ This command:
|
||||
|
||||
## Important Notes
|
||||
|
||||
1. **Always run on main branch** - Make sure you're on main and it's clean
|
||||
2. **The update script is smart** - It automatically syncs all n8n dependencies to compatible versions
|
||||
3. **Tests are required** - The publish script now runs tests automatically
|
||||
4. **Database rebuild is automatic** - The update script handles this for you
|
||||
5. **Template sanitization is automatic** - Any API tokens in workflow templates are replaced with placeholders
|
||||
6. **Docker image builds automatically** - Pushing to GitHub triggers the workflow
|
||||
1. **ALWAYS check existing releases first** - Use `gh release list` to see what versions are already released. Your new version must be higher!
|
||||
2. **Release workflow only triggers on version CHANGE** - If you merge a PR with an already-released version (e.g., 2.22.8), the workflow won't run. You'll need to bump to a new version (e.g., 2.22.9) and create another PR.
|
||||
3. **Integration test failures in CI are usually infrastructure issues** - If unit tests pass but integration tests fail with "unauthorized", this is typically because the test n8n instance credentials need updating. The code itself is fine.
|
||||
4. **Skip local tests - let CI handle them** - Running tests locally adds 2-3 minutes with no benefit since CI runs them anyway. The fast workflow skips local tests.
|
||||
5. **The update script is smart** - It automatically syncs all n8n dependencies to compatible versions
|
||||
6. **Database rebuild is automatic** - The update script handles this for you
|
||||
7. **Template sanitization is automatic** - Any API tokens in workflow templates are replaced with placeholders
|
||||
8. **Docker image builds automatically** - Pushing to GitHub triggers the workflow
|
||||
|
||||
## GitHub Push Protection
|
||||
|
||||
@@ -115,11 +199,27 @@ As of July 2025, GitHub's push protection may block database pushes if they cont
|
||||
3. If push is still blocked, use the GitHub web interface to review and allow the push
|
||||
|
||||
## Time Estimate
|
||||
|
||||
### Fast Workflow (Recommended)
|
||||
- Local work: ~2-3 minutes
|
||||
- npm install and database rebuild: ~2-3 minutes
|
||||
- File edits (CHANGELOG, README, package.json): ~30 seconds
|
||||
- Git operations (commit, push, create PR): ~30 seconds
|
||||
- CI testing after PR creation: ~8-10 minutes (runs automatically)
|
||||
- Unit tests: ~2 minutes
|
||||
- Integration tests: ~6 minutes (may fail with infrastructure issues - ignore if unit tests pass)
|
||||
- Other checks: ~1 minute
|
||||
|
||||
**Total hands-on time: ~3 minutes** (then wait for CI)
|
||||
|
||||
### Full Workflow with Local Tests
|
||||
- Total time: ~5-7 minutes
|
||||
- Test suite: ~2.5 minutes
|
||||
- npm install and database rebuild: ~2-3 minutes
|
||||
- The rest: seconds
|
||||
|
||||
**Note**: The fast workflow is recommended since CI runs the same tests anyway.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If tests fail:
|
||||
|
||||
484
P0-R3-TEST-PLAN.md
Normal file
484
P0-R3-TEST-PLAN.md
Normal file
@@ -0,0 +1,484 @@
|
||||
# P0-R3 Feature Test Coverage Plan
|
||||
|
||||
## Executive Summary
|
||||
|
||||
This document outlines comprehensive test coverage for the P0-R3 feature (Template-based Configuration Examples). The feature adds real-world configuration examples from popular templates to node search and essentials tools.
|
||||
|
||||
**Feature Overview:**
|
||||
- New database table: `template_node_configs` (197 pre-extracted configurations)
|
||||
- Enhanced tools: `search_nodes({includeExamples: true})` and `get_node_essentials({includeExamples: true})`
|
||||
- Breaking changes: Removed `get_node_for_task` tool
|
||||
|
||||
## Test Files Created
|
||||
|
||||
### Unit Tests
|
||||
|
||||
#### 1. `/tests/unit/scripts/fetch-templates-extraction.test.ts` ✅
|
||||
**Purpose:** Test template extraction logic from `fetch-templates.ts`
|
||||
|
||||
**Coverage:**
|
||||
- `extractNodeConfigs()` - 90%+ coverage
|
||||
- Valid workflows with multiple nodes
|
||||
- Empty workflows
|
||||
- Malformed compressed data
|
||||
- Invalid JSON
|
||||
- Nodes without parameters
|
||||
- Sticky note filtering
|
||||
- Credential handling
|
||||
- Expression detection
|
||||
- Special characters
|
||||
- Large workflows (100 nodes)
|
||||
|
||||
- `detectExpressions()` - 100% coverage
|
||||
- `={{...}}` syntax detection
|
||||
- `$json` references
|
||||
- `$node` references
|
||||
- Nested objects
|
||||
- Arrays
|
||||
- Null/undefined handling
|
||||
- Multiple expression types
|
||||
|
||||
**Test Count:** 27 tests
|
||||
**Expected Coverage:** 92%+
|
||||
|
||||
---
|
||||
|
||||
#### 2. `/tests/unit/mcp/search-nodes-examples.test.ts` ✅
|
||||
**Purpose:** Test `search_nodes` tool with includeExamples parameter
|
||||
|
||||
**Coverage:**
|
||||
- includeExamples parameter behavior
|
||||
- false: no examples returned
|
||||
- undefined: no examples returned (default)
|
||||
- true: examples returned
|
||||
- Example data structure validation
|
||||
- Top 2 limit enforcement
|
||||
- Backward compatibility
|
||||
- Performance (<100ms)
|
||||
- Error handling (malformed JSON, database errors)
|
||||
- searchNodesLIKE integration
|
||||
- searchNodesFTS integration
|
||||
|
||||
**Test Count:** 12 tests
|
||||
**Expected Coverage:** 85%+
|
||||
|
||||
---
|
||||
|
||||
#### 3. `/tests/unit/mcp/get-node-essentials-examples.test.ts` ✅
|
||||
**Purpose:** Test `get_node_essentials` tool with includeExamples parameter
|
||||
|
||||
**Coverage:**
|
||||
- includeExamples parameter behavior
|
||||
- Full metadata structure
|
||||
- configuration object
|
||||
- source (template, views, complexity)
|
||||
- useCases (limited to 2)
|
||||
- metadata (hasCredentials, hasExpressions)
|
||||
- Cache key differentiation
|
||||
- Backward compatibility
|
||||
- Performance (<100ms)
|
||||
- Error handling
|
||||
- Top 3 limit enforcement
|
||||
|
||||
**Test Count:** 13 tests
|
||||
**Expected Coverage:** 88%+
|
||||
|
||||
---
|
||||
|
||||
### Integration Tests
|
||||
|
||||
#### 4. `/tests/integration/database/template-node-configs.test.ts` ✅
|
||||
**Purpose:** Test database schema, migrations, and operations
|
||||
|
||||
**Coverage:**
|
||||
- Schema validation
|
||||
- Table creation
|
||||
- All columns present
|
||||
- Correct types and constraints
|
||||
- CHECK constraint on complexity
|
||||
- Indexes
|
||||
- idx_config_node_type_rank
|
||||
- idx_config_complexity
|
||||
- idx_config_auth
|
||||
- View: ranked_node_configs
|
||||
- Top 5 per node_type
|
||||
- Correct ordering
|
||||
- Foreign key constraints
|
||||
- CASCADE delete
|
||||
- Referential integrity
|
||||
- Data operations
|
||||
- INSERT with all fields
|
||||
- Nullable fields
|
||||
- Rank updates
|
||||
- Delete rank > 10
|
||||
- Performance
|
||||
- 1000 records < 10ms queries
|
||||
- Migration idempotency
|
||||
|
||||
**Test Count:** 19 tests
|
||||
**Expected Coverage:** 95%+
|
||||
|
||||
---
|
||||
|
||||
#### 5. `/tests/integration/mcp/template-examples-e2e.test.ts` ✅
|
||||
**Purpose:** End-to-end integration testing
|
||||
|
||||
**Coverage:**
|
||||
- Direct SQL queries
|
||||
- Top 2 examples for search_nodes
|
||||
- Top 3 examples with metadata for get_node_essentials
|
||||
- Data structure validation
|
||||
- Valid JSON in all fields
|
||||
- Credentials when has_credentials=1
|
||||
- Ranked view functionality
|
||||
- Performance with 100+ configs
|
||||
- Query performance < 5ms
|
||||
- Complexity filtering
|
||||
- Edge cases
|
||||
- Non-existent node types
|
||||
- Long parameters_json (100 params)
|
||||
- Special characters (Unicode, emojis, symbols)
|
||||
- Data integrity
|
||||
- Foreign key constraints
|
||||
- Cascade deletes
|
||||
|
||||
**Test Count:** 14 tests
|
||||
**Expected Coverage:** 90%+
|
||||
|
||||
---
|
||||
|
||||
### Test Fixtures
|
||||
|
||||
#### 6. `/tests/fixtures/template-configs.ts` ✅
|
||||
**Purpose:** Reusable test data
|
||||
|
||||
**Provides:**
|
||||
- `sampleConfigs`: 7 realistic node configurations
|
||||
- simpleWebhook
|
||||
- webhookWithAuth
|
||||
- httpRequestBasic
|
||||
- httpRequestWithExpressions
|
||||
- slackMessage
|
||||
- codeNodeTransform
|
||||
- codeNodeWithExpressions
|
||||
|
||||
- `sampleWorkflows`: 3 complete workflows
|
||||
- webhookToSlack
|
||||
- apiWorkflow
|
||||
- complexWorkflow
|
||||
|
||||
- **Helper Functions:**
|
||||
- `compressWorkflow()` - Compress to base64
|
||||
- `createTemplateMetadata()` - Generate metadata
|
||||
- `createConfigBatch()` - Batch create configs
|
||||
- `getConfigByComplexity()` - Filter by complexity
|
||||
- `getConfigsWithExpressions()` - Filter with expressions
|
||||
- `getConfigsWithCredentials()` - Filter with credentials
|
||||
- `createInsertStatement()` - SQL insert helper
|
||||
|
||||
---
|
||||
|
||||
## Existing Tests Requiring Updates
|
||||
|
||||
### High Priority
|
||||
|
||||
#### 1. `tests/unit/mcp/parameter-validation.test.ts`
|
||||
**Line 480:** Remove `get_node_for_task` from legacyValidationTools array
|
||||
|
||||
```typescript
|
||||
// REMOVE THIS:
|
||||
{ name: 'get_node_for_task', args: {}, expected: 'Missing required parameters for get_node_for_task: task' },
|
||||
```
|
||||
|
||||
**Status:** ⚠️ BREAKING CHANGE - Tool removed
|
||||
|
||||
---
|
||||
|
||||
#### 2. `tests/unit/mcp/tools.test.ts`
|
||||
**Update:** Remove `get_node_for_task` from templates category
|
||||
|
||||
```typescript
|
||||
// BEFORE:
|
||||
templates: ['list_tasks', 'get_node_for_task', 'search_templates', ...]
|
||||
|
||||
// AFTER:
|
||||
templates: ['list_tasks', 'search_templates', ...]
|
||||
```
|
||||
|
||||
**Add:** Tests for new includeExamples parameter in tool definitions
|
||||
|
||||
```typescript
|
||||
it('should have includeExamples parameter in search_nodes', () => {
|
||||
const searchNodesTool = tools.find(t => t.name === 'search_nodes');
|
||||
expect(searchNodesTool.inputSchema.properties.includeExamples).toBeDefined();
|
||||
expect(searchNodesTool.inputSchema.properties.includeExamples.type).toBe('boolean');
|
||||
expect(searchNodesTool.inputSchema.properties.includeExamples.default).toBe(false);
|
||||
});
|
||||
|
||||
it('should have includeExamples parameter in get_node_essentials', () => {
|
||||
const essentialsTool = tools.find(t => t.name === 'get_node_essentials');
|
||||
expect(essentialsTool.inputSchema.properties.includeExamples).toBeDefined();
|
||||
});
|
||||
```
|
||||
|
||||
**Status:** ⚠️ REQUIRED UPDATE
|
||||
|
||||
---
|
||||
|
||||
#### 3. `tests/integration/mcp-protocol/session-management.test.ts`
|
||||
**Remove:** Test case calling `get_node_for_task` with invalid task
|
||||
|
||||
```typescript
|
||||
// REMOVE THIS TEST:
|
||||
client.callTool({ name: 'get_node_for_task', arguments: { task: 'invalid_task' } }).catch(e => e)
|
||||
```
|
||||
|
||||
**Status:** ⚠️ BREAKING CHANGE
|
||||
|
||||
---
|
||||
|
||||
#### 4. `tests/integration/mcp-protocol/tool-invocation.test.ts`
|
||||
**Remove:** Entire `get_node_for_task` describe block
|
||||
|
||||
**Add:** Tests for new includeExamples functionality
|
||||
|
||||
```typescript
|
||||
describe('search_nodes with includeExamples', () => {
|
||||
it('should return examples when includeExamples is true', async () => {
|
||||
const response = await client.callTool({
|
||||
name: 'search_nodes',
|
||||
arguments: { query: 'webhook', includeExamples: true }
|
||||
});
|
||||
|
||||
expect(response.results).toBeDefined();
|
||||
// Examples may or may not be present depending on database
|
||||
});
|
||||
|
||||
it('should not return examples when includeExamples is false', async () => {
|
||||
const response = await client.callTool({
|
||||
name: 'search_nodes',
|
||||
arguments: { query: 'webhook', includeExamples: false }
|
||||
});
|
||||
|
||||
expect(response.results).toBeDefined();
|
||||
response.results.forEach(node => {
|
||||
expect(node.examples).toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('get_node_essentials with includeExamples', () => {
|
||||
it('should return examples with metadata when includeExamples is true', async () => {
|
||||
const response = await client.callTool({
|
||||
name: 'get_node_essentials',
|
||||
arguments: { nodeType: 'nodes-base.webhook', includeExamples: true }
|
||||
});
|
||||
|
||||
expect(response.nodeType).toBeDefined();
|
||||
// Examples may or may not be present depending on database
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
**Status:** ⚠️ REQUIRED UPDATE
|
||||
|
||||
---
|
||||
|
||||
### Medium Priority
|
||||
|
||||
#### 5. `tests/unit/services/task-templates.test.ts`
|
||||
**Status:** ✅ No changes needed (TaskTemplates marked as deprecated but not removed)
|
||||
|
||||
**Note:** TaskTemplates remains for backward compatibility. Tests should continue to pass.
|
||||
|
||||
---
|
||||
|
||||
## Test Execution Plan
|
||||
|
||||
### Phase 1: Unit Tests
|
||||
```bash
|
||||
# Run new unit tests
|
||||
npm test tests/unit/scripts/fetch-templates-extraction.test.ts
|
||||
npm test tests/unit/mcp/search-nodes-examples.test.ts
|
||||
npm test tests/unit/mcp/get-node-essentials-examples.test.ts
|
||||
|
||||
# Expected: All pass, 52 tests
|
||||
```
|
||||
|
||||
### Phase 2: Integration Tests
|
||||
```bash
|
||||
# Run new integration tests
|
||||
npm test tests/integration/database/template-node-configs.test.ts
|
||||
npm test tests/integration/mcp/template-examples-e2e.test.ts
|
||||
|
||||
# Expected: All pass, 33 tests
|
||||
```
|
||||
|
||||
### Phase 3: Update Existing Tests
|
||||
```bash
|
||||
# Update files as outlined above, then run:
|
||||
npm test tests/unit/mcp/parameter-validation.test.ts
|
||||
npm test tests/unit/mcp/tools.test.ts
|
||||
npm test tests/integration/mcp-protocol/session-management.test.ts
|
||||
npm test tests/integration/mcp-protocol/tool-invocation.test.ts
|
||||
|
||||
# Expected: All pass after updates
|
||||
```
|
||||
|
||||
### Phase 4: Full Test Suite
|
||||
```bash
|
||||
# Run all tests
|
||||
npm test
|
||||
|
||||
# Run with coverage
|
||||
npm run test:coverage
|
||||
|
||||
# Expected coverage improvements:
|
||||
# - src/scripts/fetch-templates.ts: +20% (60% → 80%)
|
||||
# - src/mcp/server.ts: +5% (75% → 80%)
|
||||
# - Overall project: +2% (current → current+2%)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Coverage Expectations
|
||||
|
||||
### New Code Coverage
|
||||
|
||||
| File | Function | Target | Tests |
|
||||
|------|----------|--------|-------|
|
||||
| fetch-templates.ts | extractNodeConfigs | 95% | 15 tests |
|
||||
| fetch-templates.ts | detectExpressions | 100% | 12 tests |
|
||||
| server.ts | searchNodes (with examples) | 90% | 8 tests |
|
||||
| server.ts | getNodeEssentials (with examples) | 90% | 10 tests |
|
||||
| Database migration | template_node_configs | 100% | 19 tests |
|
||||
|
||||
### Overall Coverage Goals
|
||||
|
||||
- **Unit Tests:** 90%+ coverage for new code
|
||||
- **Integration Tests:** All happy paths + critical error paths
|
||||
- **E2E Tests:** Complete feature workflows
|
||||
- **Performance:** All queries <10ms (database), <100ms (MCP)
|
||||
|
||||
---
|
||||
|
||||
## Test Infrastructure
|
||||
|
||||
### Dependencies Required
|
||||
All dependencies already present in `package.json`:
|
||||
- vitest (test runner)
|
||||
- better-sqlite3 (database)
|
||||
- @vitest/coverage-v8 (coverage)
|
||||
|
||||
### Test Utilities Used
|
||||
- TestDatabase helper (from existing test utils)
|
||||
- createTestDatabaseAdapter (from existing test utils)
|
||||
- Standard vitest matchers
|
||||
|
||||
### No New Dependencies Required ✅
|
||||
|
||||
---
|
||||
|
||||
## Regression Prevention
|
||||
|
||||
### Critical Paths Protected
|
||||
|
||||
1. **Backward Compatibility**
|
||||
- Tools work without includeExamples parameter
|
||||
- Existing workflows unchanged
|
||||
- Cache keys differentiated
|
||||
|
||||
2. **Performance**
|
||||
- No degradation when includeExamples=false
|
||||
- Indexed queries <10ms
|
||||
- Example fetch errors don't break responses
|
||||
|
||||
3. **Data Integrity**
|
||||
- Foreign key constraints enforced
|
||||
- JSON validation in all fields
|
||||
- Rank calculations correct
|
||||
|
||||
---
|
||||
|
||||
## CI/CD Integration
|
||||
|
||||
### GitHub Actions Updates
|
||||
No changes required. Existing test commands will run new tests:
|
||||
|
||||
```yaml
|
||||
- run: npm test
|
||||
- run: npm run test:coverage
|
||||
```
|
||||
|
||||
### Coverage Thresholds
|
||||
Current thresholds maintained. Expected improvements:
|
||||
- Lines: +2%
|
||||
- Functions: +3%
|
||||
- Branches: +2%
|
||||
|
||||
---
|
||||
|
||||
## Manual Testing Checklist
|
||||
|
||||
### Pre-Deployment Verification
|
||||
|
||||
- [ ] Run `npm run rebuild` - Verify migration applies cleanly
|
||||
- [ ] Run `npm run fetch:templates --extract-only` - Verify extraction works
|
||||
- [ ] Check database: `SELECT COUNT(*) FROM template_node_configs` - Should be ~197
|
||||
- [ ] Test MCP tool: `search_nodes({query: "webhook", includeExamples: true})`
|
||||
- [ ] Test MCP tool: `get_node_essentials({nodeType: "nodes-base.webhook", includeExamples: true})`
|
||||
- [ ] Verify backward compatibility: Tools work without includeExamples parameter
|
||||
- [ ] Performance test: Query 100 nodes with examples < 200ms
|
||||
|
||||
---
|
||||
|
||||
## Rollback Plan
|
||||
|
||||
If issues are detected:
|
||||
|
||||
1. **Database Rollback:**
|
||||
```sql
|
||||
DROP TABLE IF EXISTS template_node_configs;
|
||||
DROP VIEW IF EXISTS ranked_node_configs;
|
||||
```
|
||||
|
||||
2. **Code Rollback:**
|
||||
- Revert server.ts changes
|
||||
- Revert tools.ts changes
|
||||
- Restore get_node_for_task tool (if critical)
|
||||
|
||||
3. **Test Rollback:**
|
||||
- Revert parameter-validation.test.ts
|
||||
- Revert tools.test.ts
|
||||
- Revert tool-invocation.test.ts
|
||||
|
||||
---
|
||||
|
||||
## Success Metrics
|
||||
|
||||
### Test Metrics
|
||||
- ✅ 85+ new tests added
|
||||
- ✅ 0 tests failing after updates
|
||||
- ✅ Coverage increase 2%+
|
||||
- ✅ All performance tests pass
|
||||
|
||||
### Feature Metrics
|
||||
- ✅ 197 template configs extracted
|
||||
- ✅ Top 2/3 examples returned correctly
|
||||
- ✅ Query performance <10ms
|
||||
- ✅ No backward compatibility breaks
|
||||
|
||||
---
|
||||
|
||||
## Conclusion
|
||||
|
||||
This test plan provides **comprehensive coverage** for the P0-R3 feature with:
|
||||
- **85+ new tests** across unit, integration, and E2E levels
|
||||
- **Complete coverage** of extraction, storage, and retrieval
|
||||
- **Backward compatibility** protection
|
||||
- **Performance validation** (<10ms queries)
|
||||
- **Clear migration path** for existing tests
|
||||
|
||||
**All test files are ready for execution.** Update the 4 existing test files as outlined, then run the full test suite.
|
||||
|
||||
**Estimated Total Implementation Time:** 2-3 hours for updating existing tests + validation
|
||||
@@ -54,6 +54,10 @@ Collected data is used solely to:
|
||||
- Identify common error patterns
|
||||
- Improve tool performance and reliability
|
||||
- Guide development priorities
|
||||
- Train machine learning models for workflow generation
|
||||
|
||||
All ML training uses sanitized, anonymized data only.
|
||||
Users can opt-out at any time with `npx n8n-mcp telemetry disable`
|
||||
|
||||
## Data Retention
|
||||
- Data is retained for analysis purposes
|
||||
@@ -66,4 +70,4 @@ We may update this privacy policy from time to time. Updates will be reflected i
|
||||
For questions about telemetry or privacy, please open an issue on GitHub:
|
||||
https://github.com/czlonkowski/n8n-mcp/issues
|
||||
|
||||
Last updated: 2025-09-25
|
||||
Last updated: 2025-11-06
|
||||
811
README.md
811
README.md
@@ -4,22 +4,24 @@
|
||||
[](https://github.com/czlonkowski/n8n-mcp)
|
||||
[](https://www.npmjs.com/package/n8n-mcp)
|
||||
[](https://codecov.io/gh/czlonkowski/n8n-mcp)
|
||||
[](https://github.com/czlonkowski/n8n-mcp/actions)
|
||||
[](https://github.com/n8n-io/n8n)
|
||||
[](https://github.com/czlonkowski/n8n-mcp/actions)
|
||||
[](https://github.com/n8n-io/n8n)
|
||||
[](https://github.com/czlonkowski/n8n-mcp/pkgs/container/n8n-mcp)
|
||||
[](https://railway.com/deploy/n8n-mcp?referralCode=n8n-mcp)
|
||||
|
||||
A Model Context Protocol (MCP) server that provides AI assistants with comprehensive access to n8n node documentation, properties, and operations. Deploy in minutes to give Claude and other AI assistants deep knowledge about n8n's 525+ workflow automation nodes.
|
||||
A Model Context Protocol (MCP) server that provides AI assistants with comprehensive access to n8n node documentation, properties, and operations. Deploy in minutes to give Claude and other AI assistants deep knowledge about n8n's 545 workflow automation nodes.
|
||||
|
||||
## Overview
|
||||
|
||||
n8n-MCP serves as a bridge between n8n's workflow automation platform and AI models, enabling them to understand and work with n8n nodes effectively. It provides structured access to:
|
||||
|
||||
- 📚 **536 n8n nodes** from both n8n-nodes-base and @n8n/n8n-nodes-langchain
|
||||
- 📚 **543 n8n nodes** from both n8n-nodes-base and @n8n/n8n-nodes-langchain
|
||||
- 🔧 **Node properties** - 99% coverage with detailed schemas
|
||||
- ⚡ **Node operations** - 63.6% coverage of available actions
|
||||
- 📄 **Documentation** - 90% coverage from official n8n docs (including AI nodes)
|
||||
- 🤖 **AI tools** - 263 AI-capable nodes detected with full documentation
|
||||
- 📄 **Documentation** - 87% coverage from official n8n docs (including AI nodes)
|
||||
- 🤖 **AI tools** - 271 AI-capable nodes detected with full documentation
|
||||
- 💡 **Real-world examples** - 2,646 pre-extracted configurations from popular templates
|
||||
- 🎯 **Template library** - 2,709 workflow templates with 100% metadata coverage
|
||||
|
||||
|
||||
## ⚠️ Important Safety Warning
|
||||
@@ -34,12 +36,31 @@ AI results can be unpredictable. Protect your work!
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
Get n8n-MCP running in 5 minutes:
|
||||
### Option 1: Hosted Service (Easiest - No Setup!) ☁️
|
||||
|
||||
**The fastest way to try n8n-MCP** - no installation, no configuration:
|
||||
|
||||
👉 **[dashboard.n8n-mcp.com](https://dashboard.n8n-mcp.com)**
|
||||
|
||||
- ✅ **Free tier**: 100 tool calls/day
|
||||
- ✅ **Instant access**: Start building workflows immediately
|
||||
- ✅ **Always up-to-date**: Latest n8n nodes and templates
|
||||
- ✅ **No infrastructure**: We handle everything
|
||||
|
||||
Just sign up, get your API key, and connect your MCP client.
|
||||
|
||||
---
|
||||
|
||||
## 🏠 Self-Hosting Options
|
||||
|
||||
Prefer to run n8n-MCP yourself? Choose your deployment method:
|
||||
|
||||
### Option A: npx (Quick Local Setup) 🚀
|
||||
|
||||
Get n8n-MCP running in minutes:
|
||||
|
||||
[](https://youtu.be/5CccjiLLyaY?si=Z62SBGlw9G34IQnQ&t=343)
|
||||
|
||||
### Option 1: npx (Fastest - No Installation!) 🚀
|
||||
|
||||
**Prerequisites:** [Node.js](https://nodejs.org/) installed on your system
|
||||
|
||||
```bash
|
||||
@@ -49,6 +70,8 @@ npx n8n-mcp
|
||||
|
||||
Add to Claude Desktop config:
|
||||
|
||||
> ⚠️ **Important**: The `MCP_MODE: "stdio"` environment variable is **required** for Claude Desktop. Without it, you will see JSON parsing errors like `"Unexpected token..."` in the UI. This variable ensures that only JSON-RPC messages are sent to stdout, preventing debug logs from interfering with the protocol.
|
||||
|
||||
**Basic configuration (documentation tools only):**
|
||||
```json
|
||||
{
|
||||
@@ -94,7 +117,7 @@ Add to Claude Desktop config:
|
||||
|
||||
**Restart Claude Desktop after updating configuration** - That's it! 🎉
|
||||
|
||||
### Option 2: Docker (Easy & Isolated) 🐳
|
||||
### Option B: Docker (Isolated & Reproducible) 🐳
|
||||
|
||||
**Prerequisites:** Docker installed on your system
|
||||
|
||||
@@ -196,10 +219,36 @@ Add to Claude Desktop config:
|
||||
}
|
||||
```
|
||||
|
||||
>💡 Tip: If you’re running n8n locally on the same machine (e.g., via Docker), use http://host.docker.internal:5678 as the N8N_API_URL.
|
||||
>💡 Tip: If you're running n8n locally on the same machine (e.g., via Docker), use http://host.docker.internal:5678 as the N8N_API_URL.
|
||||
|
||||
> **Note**: The n8n API credentials are optional. Without them, you'll have access to all documentation and validation tools. With them, you'll additionally get workflow management capabilities (create, update, execute workflows).
|
||||
|
||||
### 🏠 Local n8n Instance Configuration
|
||||
|
||||
If you're running n8n locally (e.g., `http://localhost:5678` or Docker), you need to allow localhost webhooks:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"n8n-mcp": {
|
||||
"command": "docker",
|
||||
"args": [
|
||||
"run", "-i", "--rm", "--init",
|
||||
"-e", "MCP_MODE=stdio",
|
||||
"-e", "LOG_LEVEL=error",
|
||||
"-e", "DISABLE_CONSOLE_OUTPUT=true",
|
||||
"-e", "N8N_API_URL=http://host.docker.internal:5678",
|
||||
"-e", "N8N_API_KEY=your-api-key",
|
||||
"-e", "WEBHOOK_SECURITY_MODE=moderate",
|
||||
"ghcr.io/czlonkowski/n8n-mcp:latest"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
> ⚠️ **Important:** Set `WEBHOOK_SECURITY_MODE=moderate` to allow webhooks to your local n8n instance. This is safe for local development while still blocking private networks and cloud metadata.
|
||||
|
||||
**Important:** The `-i` flag is required for MCP stdio communication.
|
||||
|
||||
> 🔧 If you encounter any issues with Docker, check our [Docker Troubleshooting Guide](./docs/DOCKER_TROUBLESHOOTING.md).
|
||||
@@ -256,6 +305,65 @@ environment:
|
||||
N8N_MCP_TELEMETRY_DISABLED: "true"
|
||||
```
|
||||
|
||||
## ⚙️ Database & Memory Configuration
|
||||
|
||||
### Database Adapters
|
||||
|
||||
n8n-mcp uses SQLite for storing node documentation. Two adapters are available:
|
||||
|
||||
1. **better-sqlite3** (Default in Docker)
|
||||
- Native C++ bindings for best performance
|
||||
- Direct disk writes (no memory overhead)
|
||||
- **Now enabled by default** in Docker images (v2.20.2+)
|
||||
- Memory usage: ~100-120 MB stable
|
||||
|
||||
2. **sql.js** (Fallback)
|
||||
- Pure JavaScript implementation
|
||||
- In-memory database with periodic saves
|
||||
- Used when better-sqlite3 compilation fails
|
||||
- Memory usage: ~150-200 MB stable
|
||||
|
||||
### Memory Optimization (sql.js)
|
||||
|
||||
If using sql.js fallback, you can configure the save interval to balance between data safety and memory efficiency:
|
||||
|
||||
**Environment Variable:**
|
||||
```bash
|
||||
SQLJS_SAVE_INTERVAL_MS=5000 # Default: 5000ms (5 seconds)
|
||||
```
|
||||
|
||||
**Usage:**
|
||||
- Controls how long to wait after database changes before saving to disk
|
||||
- Lower values = more frequent saves = higher memory churn
|
||||
- Higher values = less frequent saves = lower memory usage
|
||||
- Minimum: 100ms
|
||||
- Recommended: 5000-10000ms for production
|
||||
|
||||
**Docker Configuration:**
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"n8n-mcp": {
|
||||
"command": "docker",
|
||||
"args": [
|
||||
"run",
|
||||
"-i",
|
||||
"--rm",
|
||||
"--init",
|
||||
"-e", "SQLJS_SAVE_INTERVAL_MS=10000",
|
||||
"ghcr.io/czlonkowski/n8n-mcp:latest"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**docker-compose:**
|
||||
```yaml
|
||||
environment:
|
||||
SQLJS_SAVE_INTERVAL_MS: "10000"
|
||||
```
|
||||
|
||||
## 💖 Support This Project
|
||||
|
||||
<div align="center">
|
||||
@@ -276,7 +384,7 @@ Every sponsorship directly translates to hours invested in making n8n-mcp better
|
||||
|
||||
---
|
||||
|
||||
### Option 3: Local Installation (For Development)
|
||||
### Option C: Local Installation (For Development)
|
||||
|
||||
**Prerequisites:** [Node.js](https://nodejs.org/) installed on your system
|
||||
|
||||
@@ -334,7 +442,7 @@ Add to Claude Desktop config:
|
||||
|
||||
> 💡 Tip: If you’re running n8n locally on the same machine (e.g., via Docker), use http://host.docker.internal:5678 as the N8N_API_URL.
|
||||
|
||||
### Option 4: Railway Cloud Deployment (One-Click Deploy) ☁️
|
||||
### Option D: Railway Cloud Deployment (One-Click Deploy) ☁️
|
||||
|
||||
**Prerequisites:** Railway account (free tier available)
|
||||
|
||||
@@ -393,184 +501,392 @@ Complete guide for integrating n8n-MCP with Windsurf using project rules.
|
||||
### [Codex](./docs/CODEX_SETUP.md)
|
||||
Complete guide for integrating n8n-MCP with Codex.
|
||||
|
||||
## 🎓 Add Claude Skills (Optional)
|
||||
|
||||
Supercharge your n8n workflow building with specialized skills that teach AI how to build production-ready workflows!
|
||||
|
||||
[](https://www.youtube.com/watch?v=e6VvRqmUY2Y)
|
||||
|
||||
Learn more: [n8n-skills repository](https://github.com/czlonkowski/n8n-skills)
|
||||
|
||||
## 🤖 Claude Project Setup
|
||||
|
||||
For the best results when using n8n-MCP with Claude Projects, use these enhanced system instructions:
|
||||
|
||||
```markdown
|
||||
````markdown
|
||||
You are an expert in n8n automation software using n8n-MCP tools. Your role is to design, build, and validate n8n workflows with maximum accuracy and efficiency.
|
||||
|
||||
## Core Workflow Process
|
||||
## Core Principles
|
||||
|
||||
1. **ALWAYS start new conversation with**: `tools_documentation()` to understand best practices and available tools.
|
||||
### 1. Silent Execution
|
||||
CRITICAL: Execute tools without commentary. Only respond AFTER all tools complete.
|
||||
|
||||
2. **Template Discovery Phase**
|
||||
- `search_templates_by_metadata({complexity: "simple"})` - Find skill-appropriate templates
|
||||
- `get_templates_for_task('webhook_processing')` - Get curated templates by task
|
||||
- `search_templates('slack notification')` - Text search for specific needs. Start by quickly searching with "id" and "name" to find the template you are looking for, only then dive deeper into the template details adding "description" to your search query.
|
||||
- `list_node_templates(['n8n-nodes-base.slack'])` - Find templates using specific nodes
|
||||
|
||||
**Template filtering strategies**:
|
||||
- **For beginners**: `complexity: "simple"` and `maxSetupMinutes: 30`
|
||||
- **By role**: `targetAudience: "marketers"` or `"developers"` or `"analysts"`
|
||||
- **By time**: `maxSetupMinutes: 15` for quick wins
|
||||
- **By service**: `requiredService: "openai"` to find compatible templates
|
||||
❌ BAD: "Let me search for Slack nodes... Great! Now let me get details..."
|
||||
✅ GOOD: [Execute search_nodes and get_node in parallel, then respond]
|
||||
|
||||
3. **Discovery Phase** - Find the right nodes (if no suitable template):
|
||||
- Think deeply about user request and the logic you are going to build to fulfill it. Ask follow-up questions to clarify the user's intent, if something is unclear. Then, proceed with the rest of your instructions.
|
||||
- `search_nodes({query: 'keyword'})` - Search by functionality
|
||||
- `list_nodes({category: 'trigger'})` - Browse by category
|
||||
- `list_ai_tools()` - See AI-capable nodes (remember: ANY node can be an AI tool!)
|
||||
### 2. Parallel Execution
|
||||
When operations are independent, execute them in parallel for maximum performance.
|
||||
|
||||
4. **Configuration Phase** - Get node details efficiently:
|
||||
- `get_node_essentials(nodeType)` - Start here! Only 10-20 essential properties
|
||||
- `search_node_properties(nodeType, 'auth')` - Find specific properties
|
||||
- `get_node_for_task('send_email')` - Get pre-configured templates
|
||||
- `get_node_documentation(nodeType)` - Human-readable docs when needed
|
||||
- It is good common practice to show a visual representation of the workflow architecture to the user and asking for opinion, before moving forward.
|
||||
✅ GOOD: Call search_nodes, list_nodes, and search_templates simultaneously
|
||||
❌ BAD: Sequential tool calls (await each one before the next)
|
||||
|
||||
5. **Pre-Validation Phase** - Validate BEFORE building:
|
||||
- `validate_node_minimal(nodeType, config)` - Quick required fields check
|
||||
- `validate_node_operation(nodeType, config, profile)` - Full operation-aware validation
|
||||
- Fix any validation errors before proceeding
|
||||
### 3. Templates First
|
||||
ALWAYS check templates before building from scratch (2,709 available).
|
||||
|
||||
6. **Building Phase** - Create or customize the workflow:
|
||||
### 4. Multi-Level Validation
|
||||
Use validate_node(mode='minimal') → validate_node(mode='full') → validate_workflow pattern.
|
||||
|
||||
### 5. Never Trust Defaults
|
||||
⚠️ CRITICAL: Default parameter values are the #1 source of runtime failures.
|
||||
ALWAYS explicitly configure ALL parameters that control node behavior.
|
||||
|
||||
## Workflow Process
|
||||
|
||||
1. **Start**: Call `tools_documentation()` for best practices
|
||||
|
||||
2. **Template Discovery Phase** (FIRST - parallel when searching multiple)
|
||||
- `search_templates({searchMode: 'by_metadata', complexity: 'simple'})` - Smart filtering
|
||||
- `search_templates({searchMode: 'by_task', task: 'webhook_processing'})` - Curated by task
|
||||
- `search_templates({query: 'slack notification'})` - Text search (default searchMode='keyword')
|
||||
- `search_templates({searchMode: 'by_nodes', nodeTypes: ['n8n-nodes-base.slack']})` - By node type
|
||||
|
||||
**Filtering strategies**:
|
||||
- Beginners: `complexity: "simple"` + `maxSetupMinutes: 30`
|
||||
- By role: `targetAudience: "marketers"` | `"developers"` | `"analysts"`
|
||||
- By time: `maxSetupMinutes: 15` for quick wins
|
||||
- By service: `requiredService: "openai"` for compatibility
|
||||
|
||||
3. **Node Discovery** (if no suitable template - parallel execution)
|
||||
- Think deeply about requirements. Ask clarifying questions if unclear.
|
||||
- `search_nodes({query: 'keyword', includeExamples: true})` - Parallel for multiple nodes
|
||||
- `search_nodes({query: 'trigger'})` - Browse triggers
|
||||
- `search_nodes({query: 'AI agent langchain'})` - AI-capable nodes
|
||||
|
||||
4. **Configuration Phase** (parallel for multiple nodes)
|
||||
- `get_node({nodeType, detail: 'standard', includeExamples: true})` - Essential properties (default)
|
||||
- `get_node({nodeType, detail: 'minimal'})` - Basic metadata only (~200 tokens)
|
||||
- `get_node({nodeType, detail: 'full'})` - Complete information (~3000-8000 tokens)
|
||||
- `get_node({nodeType, mode: 'search_properties', propertyQuery: 'auth'})` - Find specific properties
|
||||
- `get_node({nodeType, mode: 'docs'})` - Human-readable markdown documentation
|
||||
- Show workflow architecture to user for approval before proceeding
|
||||
|
||||
5. **Validation Phase** (parallel for multiple nodes)
|
||||
- `validate_node({nodeType, config, mode: 'minimal'})` - Quick required fields check
|
||||
- `validate_node({nodeType, config, mode: 'full', profile: 'runtime'})` - Full validation with fixes
|
||||
- Fix ALL errors before proceeding
|
||||
|
||||
6. **Building Phase**
|
||||
- If using template: `get_template(templateId, {mode: "full"})`
|
||||
- **MANDATORY ATTRIBUTION**: When using a template, ALWAYS inform the user:
|
||||
- "This workflow is based on a template by **[author.name]** (@[author.username])"
|
||||
- "View the original template at: [url]"
|
||||
- Example: "This workflow is based on a template by **David Ashby** (@cfomodz). View the original at: https://n8n.io/workflows/2414"
|
||||
- Customize template or build from validated configurations
|
||||
- **MANDATORY ATTRIBUTION**: "Based on template by **[author.name]** (@[username]). View at: [url]"
|
||||
- Build from validated configurations
|
||||
- ⚠️ EXPLICITLY set ALL parameters - never rely on defaults
|
||||
- Connect nodes with proper structure
|
||||
- Add error handling where appropriate
|
||||
- Use expressions like $json, $node["NodeName"].json
|
||||
- Build the workflow in an artifact for easy editing downstream (unless the user asked to create in n8n instance)
|
||||
- Add error handling
|
||||
- Use n8n expressions: $json, $node["NodeName"].json
|
||||
- Build in artifact (unless deploying to n8n instance)
|
||||
|
||||
7. **Workflow Validation Phase** - Validate complete workflow:
|
||||
- `validate_workflow(workflow)` - Complete validation including connections
|
||||
- `validate_workflow_connections(workflow)` - Check structure and AI tool connections
|
||||
- `validate_workflow_expressions(workflow)` - Validate all n8n expressions
|
||||
- Fix any issues found before deployment
|
||||
7. **Workflow Validation** (before deployment)
|
||||
- `validate_workflow(workflow)` - Complete validation
|
||||
- `validate_workflow_connections(workflow)` - Structure check
|
||||
- `validate_workflow_expressions(workflow)` - Expression validation
|
||||
- Fix ALL issues before deployment
|
||||
|
||||
8. **Deployment Phase** (if n8n API configured):
|
||||
- `n8n_create_workflow(workflow)` - Deploy validated workflow
|
||||
- `n8n_validate_workflow({id: 'workflow-id'})` - Post-deployment validation
|
||||
- `n8n_update_partial_workflow()` - Make incremental updates using diffs
|
||||
- `n8n_trigger_webhook_workflow()` - Test webhook workflows
|
||||
8. **Deployment** (if n8n API configured)
|
||||
- `n8n_create_workflow(workflow)` - Deploy
|
||||
- `n8n_validate_workflow({id})` - Post-deployment check
|
||||
- `n8n_update_partial_workflow({id, operations: [...]})` - Batch updates
|
||||
- `n8n_trigger_webhook_workflow()` - Test webhooks
|
||||
|
||||
## Key Insights
|
||||
## Critical Warnings
|
||||
|
||||
- **TEMPLATES FIRST** - Always check for existing templates before building from scratch (2,500+ available!)
|
||||
- **ATTRIBUTION REQUIRED** - Always credit template authors with name, username, and link to n8n.io
|
||||
- **SMART FILTERING** - Use metadata filters to find templates matching user skill level and time constraints
|
||||
- **USE CODE NODE ONLY WHEN IT IS NECESSARY** - always prefer to use standard nodes over code node. Use code node only when you are sure you need it.
|
||||
- **VALIDATE EARLY AND OFTEN** - Catch errors before they reach deployment
|
||||
- **USE DIFF UPDATES** - Use n8n_update_partial_workflow for 80-90% token savings
|
||||
- **ANY node can be an AI tool** - not just those with usableAsTool=true
|
||||
- **Pre-validate configurations** - Use validate_node_minimal before building
|
||||
- **Post-validate workflows** - Always validate complete workflows before deployment
|
||||
- **Incremental updates** - Use diff operations for existing workflows
|
||||
- **Test thoroughly** - Validate both locally and after deployment to n8n
|
||||
### ⚠️ Never Trust Defaults
|
||||
Default values cause runtime failures. Example:
|
||||
```json
|
||||
// ❌ FAILS at runtime
|
||||
{resource: "message", operation: "post", text: "Hello"}
|
||||
|
||||
// ✅ WORKS - all parameters explicit
|
||||
{resource: "message", operation: "post", select: "channel", channelId: "C123", text: "Hello"}
|
||||
```
|
||||
|
||||
### ⚠️ Example Availability
|
||||
`includeExamples: true` returns real configurations from workflow templates.
|
||||
- Coverage varies by node popularity
|
||||
- When no examples available, use `get_node` + `validate_node({mode: 'minimal'})`
|
||||
|
||||
## Validation Strategy
|
||||
|
||||
### Before Building:
|
||||
1. validate_node_minimal() - Check required fields
|
||||
2. validate_node_operation() - Full configuration validation
|
||||
3. Fix all errors before proceeding
|
||||
### Level 1 - Quick Check (before building)
|
||||
`validate_node({nodeType, config, mode: 'minimal'})` - Required fields only (<100ms)
|
||||
|
||||
### After Building:
|
||||
1. validate_workflow() - Complete workflow validation
|
||||
2. validate_workflow_connections() - Structure validation
|
||||
3. validate_workflow_expressions() - Expression syntax check
|
||||
### Level 2 - Comprehensive (before building)
|
||||
`validate_node({nodeType, config, mode: 'full', profile: 'runtime'})` - Full validation with fixes
|
||||
|
||||
### After Deployment:
|
||||
1. n8n_validate_workflow({id}) - Validate deployed workflow
|
||||
2. n8n_autofix_workflow({id}) - Auto-fix common errors (expressions, typeVersion, webhooks)
|
||||
3. n8n_list_executions() - Monitor execution status
|
||||
4. n8n_update_partial_workflow() - Fix issues using diffs
|
||||
### Level 3 - Complete (after building)
|
||||
`validate_workflow(workflow)` - Connections, expressions, AI tools
|
||||
|
||||
## Response Structure
|
||||
### Level 4 - Post-Deployment
|
||||
1. `n8n_validate_workflow({id})` - Validate deployed workflow
|
||||
2. `n8n_autofix_workflow({id})` - Auto-fix common errors
|
||||
3. `n8n_executions({action: 'list'})` - Monitor execution status
|
||||
|
||||
1. **Discovery**: Show available nodes and options
|
||||
2. **Pre-Validation**: Validate node configurations first
|
||||
3. **Configuration**: Show only validated, working configs
|
||||
4. **Building**: Construct workflow with validated components
|
||||
5. **Workflow Validation**: Full workflow validation results
|
||||
6. **Deployment**: Deploy only after all validations pass
|
||||
7. **Post-Validation**: Verify deployment succeeded
|
||||
## Response Format
|
||||
|
||||
### Initial Creation
|
||||
```
|
||||
[Silent tool execution in parallel]
|
||||
|
||||
Created workflow:
|
||||
- Webhook trigger → Slack notification
|
||||
- Configured: POST /webhook → #general channel
|
||||
|
||||
Validation: ✅ All checks passed
|
||||
```
|
||||
|
||||
### Modifications
|
||||
```
|
||||
[Silent tool execution]
|
||||
|
||||
Updated workflow:
|
||||
- Added error handling to HTTP node
|
||||
- Fixed required Slack parameters
|
||||
|
||||
Changes validated successfully.
|
||||
```
|
||||
|
||||
## Batch Operations
|
||||
|
||||
Use `n8n_update_partial_workflow` with multiple operations in a single call:
|
||||
|
||||
✅ GOOD - Batch multiple operations:
|
||||
```json
|
||||
n8n_update_partial_workflow({
|
||||
id: "wf-123",
|
||||
operations: [
|
||||
{type: "updateNode", nodeId: "slack-1", changes: {...}},
|
||||
{type: "updateNode", nodeId: "http-1", changes: {...}},
|
||||
{type: "cleanStaleConnections"}
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
❌ BAD - Separate calls:
|
||||
```json
|
||||
n8n_update_partial_workflow({id: "wf-123", operations: [{...}]})
|
||||
n8n_update_partial_workflow({id: "wf-123", operations: [{...}]})
|
||||
```
|
||||
|
||||
### CRITICAL: addConnection Syntax
|
||||
|
||||
The `addConnection` operation requires **four separate string parameters**. Common mistakes cause misleading errors.
|
||||
|
||||
❌ WRONG - Object format (fails with "Expected string, received object"):
|
||||
```json
|
||||
{
|
||||
"type": "addConnection",
|
||||
"connection": {
|
||||
"source": {"nodeId": "node-1", "outputIndex": 0},
|
||||
"destination": {"nodeId": "node-2", "inputIndex": 0}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
❌ WRONG - Combined string (fails with "Source node not found"):
|
||||
```json
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "node-1:main:0",
|
||||
"target": "node-2:main:0"
|
||||
}
|
||||
```
|
||||
|
||||
✅ CORRECT - Four separate string parameters:
|
||||
```json
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "node-id-string",
|
||||
"target": "target-node-id-string",
|
||||
"sourcePort": "main",
|
||||
"targetPort": "main"
|
||||
}
|
||||
```
|
||||
|
||||
**Reference**: [GitHub Issue #327](https://github.com/czlonkowski/n8n-mcp/issues/327)
|
||||
|
||||
### ⚠️ CRITICAL: IF Node Multi-Output Routing
|
||||
|
||||
IF nodes have **two outputs** (TRUE and FALSE). Use the **`branch` parameter** to route to the correct output:
|
||||
|
||||
✅ CORRECT - Route to TRUE branch (when condition is met):
|
||||
```json
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "if-node-id",
|
||||
"target": "success-handler-id",
|
||||
"sourcePort": "main",
|
||||
"targetPort": "main",
|
||||
"branch": "true"
|
||||
}
|
||||
```
|
||||
|
||||
✅ CORRECT - Route to FALSE branch (when condition is NOT met):
|
||||
```json
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "if-node-id",
|
||||
"target": "failure-handler-id",
|
||||
"sourcePort": "main",
|
||||
"targetPort": "main",
|
||||
"branch": "false"
|
||||
}
|
||||
```
|
||||
|
||||
**Common Pattern** - Complete IF node routing:
|
||||
```json
|
||||
n8n_update_partial_workflow({
|
||||
id: "workflow-id",
|
||||
operations: [
|
||||
{type: "addConnection", source: "If Node", target: "True Handler", sourcePort: "main", targetPort: "main", branch: "true"},
|
||||
{type: "addConnection", source: "If Node", target: "False Handler", sourcePort: "main", targetPort: "main", branch: "false"}
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
**Note**: Without the `branch` parameter, both connections may end up on the same output, causing logic errors!
|
||||
|
||||
### removeConnection Syntax
|
||||
|
||||
Use the same four-parameter format:
|
||||
```json
|
||||
{
|
||||
"type": "removeConnection",
|
||||
"source": "source-node-id",
|
||||
"target": "target-node-id",
|
||||
"sourcePort": "main",
|
||||
"targetPort": "main"
|
||||
}
|
||||
```
|
||||
|
||||
## Example Workflow
|
||||
|
||||
### Smart Template-First Approach
|
||||
### Template-First Approach
|
||||
|
||||
#### 1. Find existing templates
|
||||
// Find simple Slack templates for marketers
|
||||
const templates = search_templates_by_metadata({
|
||||
```
|
||||
// STEP 1: Template Discovery (parallel execution)
|
||||
[Silent execution]
|
||||
search_templates({
|
||||
searchMode: 'by_metadata',
|
||||
requiredService: 'slack',
|
||||
complexity: 'simple',
|
||||
targetAudience: 'marketers',
|
||||
maxSetupMinutes: 30
|
||||
targetAudience: 'marketers'
|
||||
})
|
||||
search_templates({searchMode: 'by_task', task: 'slack_integration'})
|
||||
|
||||
// Or search by text
|
||||
search_templates('slack notification')
|
||||
|
||||
// Or get curated templates
|
||||
get_templates_for_task('slack_integration')
|
||||
|
||||
#### 2. Use and customize template
|
||||
const workflow = get_template(templates.items[0].id, {mode: 'full'})
|
||||
// STEP 2: Use template
|
||||
get_template(templateId, {mode: 'full'})
|
||||
validate_workflow(workflow)
|
||||
|
||||
### Building from Scratch (if no suitable template)
|
||||
// Response after all tools complete:
|
||||
"Found template by **David Ashby** (@cfomodz).
|
||||
View at: https://n8n.io/workflows/2414
|
||||
|
||||
#### 1. Discovery & Configuration
|
||||
search_nodes({query: 'slack'})
|
||||
get_node_essentials('n8n-nodes-base.slack')
|
||||
Validation: ✅ All checks passed"
|
||||
```
|
||||
|
||||
#### 2. Pre-Validation
|
||||
validate_node_minimal('n8n-nodes-base.slack', {resource:'message', operation:'send'})
|
||||
validate_node_operation('n8n-nodes-base.slack', fullConfig, 'runtime')
|
||||
### Building from Scratch (if no template)
|
||||
|
||||
#### 3. Build Workflow
|
||||
// Create workflow JSON with validated configs
|
||||
```
|
||||
// STEP 1: Discovery (parallel execution)
|
||||
[Silent execution]
|
||||
search_nodes({query: 'slack', includeExamples: true})
|
||||
search_nodes({query: 'communication trigger'})
|
||||
|
||||
#### 4. Workflow Validation
|
||||
// STEP 2: Configuration (parallel execution)
|
||||
[Silent execution]
|
||||
get_node({nodeType: 'n8n-nodes-base.slack', detail: 'standard', includeExamples: true})
|
||||
get_node({nodeType: 'n8n-nodes-base.webhook', detail: 'standard', includeExamples: true})
|
||||
|
||||
// STEP 3: Validation (parallel execution)
|
||||
[Silent execution]
|
||||
validate_node({nodeType: 'n8n-nodes-base.slack', config, mode: 'minimal'})
|
||||
validate_node({nodeType: 'n8n-nodes-base.slack', config: fullConfig, mode: 'full', profile: 'runtime'})
|
||||
|
||||
// STEP 4: Build
|
||||
// Construct workflow with validated configs
|
||||
// ⚠️ Set ALL parameters explicitly
|
||||
|
||||
// STEP 5: Validate
|
||||
[Silent execution]
|
||||
validate_workflow(workflowJson)
|
||||
validate_workflow_connections(workflowJson)
|
||||
validate_workflow_expressions(workflowJson)
|
||||
|
||||
#### 5. Deploy (if configured)
|
||||
n8n_create_workflow(validatedWorkflow)
|
||||
n8n_validate_workflow({id: createdWorkflowId})
|
||||
// Response after all tools complete:
|
||||
"Created workflow: Webhook → Slack
|
||||
Validation: ✅ Passed"
|
||||
```
|
||||
|
||||
#### 6. Update Using Diffs
|
||||
### Batch Updates
|
||||
|
||||
```json
|
||||
// ONE call with multiple operations
|
||||
n8n_update_partial_workflow({
|
||||
workflowId: id,
|
||||
id: "wf-123",
|
||||
operations: [
|
||||
{type: 'updateNode', nodeId: 'slack1', changes: {position: [100, 200]}}
|
||||
{type: "updateNode", nodeId: "slack-1", changes: {position: [100, 200]}},
|
||||
{type: "updateNode", nodeId: "http-1", changes: {position: [300, 200]}},
|
||||
{type: "cleanStaleConnections"}
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
## Important Rules
|
||||
|
||||
- ALWAYS check for existing templates before building from scratch
|
||||
- LEVERAGE metadata filters to find skill-appropriate templates
|
||||
- **ALWAYS ATTRIBUTE TEMPLATES**: When using any template, you MUST share the author's name, username, and link to the original template on n8n.io
|
||||
- VALIDATE templates before deployment (they may need updates)
|
||||
- USE diff operations for updates (80-90% token savings)
|
||||
- STATE validation results clearly
|
||||
- FIX all errors before proceeding
|
||||
### Core Behavior
|
||||
1. **Silent execution** - No commentary between tools
|
||||
2. **Parallel by default** - Execute independent operations simultaneously
|
||||
3. **Templates first** - Always check before building (2,709 available)
|
||||
4. **Multi-level validation** - Quick check → Full validation → Workflow validation
|
||||
5. **Never trust defaults** - Explicitly configure ALL parameters
|
||||
|
||||
## Template Discovery Tips
|
||||
### Attribution & Credits
|
||||
- **MANDATORY TEMPLATE ATTRIBUTION**: Share author name, username, and n8n.io link
|
||||
- **Template validation** - Always validate before deployment (may need updates)
|
||||
|
||||
- **97.5% of templates have metadata** - Use smart filtering!
|
||||
- **Filter combinations work best** - Combine complexity + setup time + service
|
||||
- **Templates save 70-90% development time** - Always check first
|
||||
- **Metadata is AI-generated** - Occasionally imprecise but highly useful
|
||||
- **Use `includeMetadata: false` for fast browsing** - Add metadata only when needed
|
||||
```
|
||||
### Performance
|
||||
- **Batch operations** - Use diff operations with multiple changes in one call
|
||||
- **Parallel execution** - Search, validate, and configure simultaneously
|
||||
- **Template metadata** - Use smart filtering for faster discovery
|
||||
|
||||
### Code Node Usage
|
||||
- **Avoid when possible** - Prefer standard nodes
|
||||
- **Only when necessary** - Use code node as last resort
|
||||
- **AI tool capability** - ANY node can be an AI tool (not just marked ones)
|
||||
|
||||
### Most Popular n8n Nodes (for get_node):
|
||||
|
||||
1. **n8n-nodes-base.code** - JavaScript/Python scripting
|
||||
2. **n8n-nodes-base.httpRequest** - HTTP API calls
|
||||
3. **n8n-nodes-base.webhook** - Event-driven triggers
|
||||
4. **n8n-nodes-base.set** - Data transformation
|
||||
5. **n8n-nodes-base.if** - Conditional routing
|
||||
6. **n8n-nodes-base.manualTrigger** - Manual workflow execution
|
||||
7. **n8n-nodes-base.respondToWebhook** - Webhook responses
|
||||
8. **n8n-nodes-base.scheduleTrigger** - Time-based triggers
|
||||
9. **@n8n/n8n-nodes-langchain.agent** - AI agents
|
||||
10. **n8n-nodes-base.googleSheets** - Spreadsheet integration
|
||||
11. **n8n-nodes-base.merge** - Data merging
|
||||
12. **n8n-nodes-base.switch** - Multi-branch routing
|
||||
13. **n8n-nodes-base.telegram** - Telegram bot integration
|
||||
14. **@n8n/n8n-nodes-langchain.lmChatOpenAi** - OpenAI chat models
|
||||
15. **n8n-nodes-base.splitInBatches** - Batch processing
|
||||
16. **n8n-nodes-base.openAi** - OpenAI legacy node
|
||||
17. **n8n-nodes-base.gmail** - Email automation
|
||||
18. **n8n-nodes-base.function** - Custom functions
|
||||
19. **n8n-nodes-base.stickyNote** - Workflow documentation
|
||||
20. **n8n-nodes-base.executeWorkflowTrigger** - Sub-workflow calls
|
||||
|
||||
**Note:** LangChain nodes use the `@n8n/n8n-nodes-langchain.` prefix, core nodes use `n8n-nodes-base.`
|
||||
|
||||
````
|
||||
|
||||
Save these instructions in your Claude Project for optimal n8n workflow assistance with intelligent template discovery.
|
||||
|
||||
@@ -588,11 +904,16 @@ This tool was created to benefit everyone in the n8n community without friction.
|
||||
## Features
|
||||
|
||||
- **🔍 Smart Node Search**: Find nodes by name, category, or functionality
|
||||
- **📖 Essential Properties**: Get only the 10-20 properties that matter (NEW in v2.4.0)
|
||||
- **🎯 Task Templates**: Pre-configured settings for common automation tasks
|
||||
- **📖 Essential Properties**: Get only the 10-20 properties that matter
|
||||
- **💡 Real-World Examples**: 2,646 pre-extracted configurations from popular templates
|
||||
- **✅ Config Validation**: Validate node configurations before deployment
|
||||
- **🤖 AI Workflow Validation**: Comprehensive validation for AI Agent workflows (NEW in v2.17.0!)
|
||||
- Missing language model detection
|
||||
- AI tool connection validation
|
||||
- Streaming mode constraints
|
||||
- Memory and output parser checks
|
||||
- **🔗 Dependency Analysis**: Understand property relationships and conditions
|
||||
- **💡 Working Examples**: Real-world examples for immediate use
|
||||
- **🎯 Template Discovery**: 2,500+ workflow templates with smart filtering
|
||||
- **⚡ Fast Response**: Average query time ~12ms with optimized SQLite
|
||||
- **🌐 Universal Compatibility**: Works with any Node.js version
|
||||
|
||||
@@ -604,7 +925,7 @@ When Claude, Anthropic's AI assistant, tested n8n-MCP, the results were transfor
|
||||
|
||||
**Without MCP:** "I was basically playing a guessing game. 'Is it `scheduleTrigger` or `schedule`? Does it take `interval` or `rule`?' I'd write what seemed logical, but n8n has its own conventions that you can't just intuit. I made six different configuration errors in a simple HackerNews scraper."
|
||||
|
||||
**With MCP:** "Everything just... worked. Instead of guessing, I could ask `get_node_essentials()` and get exactly what I needed - not a 100KB JSON dump, but the actual 5-10 properties that matter. What took 45 minutes now takes 3 minutes."
|
||||
**With MCP:** "Everything just... worked. Instead of guessing, I could ask `get_node()` and get exactly what I needed - not a 100KB JSON dump, but the actual properties that matter. What took 45 minutes now takes 3 minutes."
|
||||
|
||||
**The Real Value:** "It's about confidence. When you're building automation workflows, uncertainty is expensive. One wrong parameter and your workflow fails at 3 AM. With MCP, I could validate my configuration before deployment. That's not just time saved - that's peace of mind."
|
||||
|
||||
@@ -614,86 +935,107 @@ When Claude, Anthropic's AI assistant, tested n8n-MCP, the results were transfor
|
||||
|
||||
Once connected, Claude can use these powerful tools:
|
||||
|
||||
### Core Tools
|
||||
### Core Tools (7 tools)
|
||||
- **`tools_documentation`** - Get documentation for any MCP tool (START HERE!)
|
||||
- **`list_nodes`** - List all n8n nodes with filtering options
|
||||
- **`get_node_info`** - Get comprehensive information about a specific node
|
||||
- **`get_node_essentials`** - Get only essential properties with examples (10-20 properties instead of 200+)
|
||||
- **`search_nodes`** - Full-text search across all node documentation
|
||||
- **`search_node_properties`** - Find specific properties within nodes
|
||||
- **`list_ai_tools`** - List all AI-capable nodes (ANY node can be used as AI tool!)
|
||||
- **`get_node_as_tool_info`** - Get guidance on using any node as an AI tool
|
||||
- **`search_nodes`** - Full-text search across all nodes. Use `includeExamples: true` for real-world configurations
|
||||
- **`get_node`** - Unified node information tool with multiple modes (v2.26.0):
|
||||
- **Info mode** (default): `detail: 'minimal'|'standard'|'full'`, `includeExamples: true`
|
||||
- **Docs mode**: `mode: 'docs'` - Human-readable markdown documentation
|
||||
- **Property search**: `mode: 'search_properties'`, `propertyQuery: 'auth'`
|
||||
- **Versions**: `mode: 'versions'|'compare'|'breaking'|'migrations'`
|
||||
- **`validate_node`** - Unified node validation (v2.26.0):
|
||||
- `mode: 'minimal'` - Quick required fields check (<100ms)
|
||||
- `mode: 'full'` - Comprehensive validation with profiles (minimal, runtime, ai-friendly, strict)
|
||||
- **`validate_workflow`** - Complete workflow validation including AI Agent validation
|
||||
- **`search_templates`** - Unified template search (v2.26.0):
|
||||
- `searchMode: 'keyword'` (default) - Text search with `query` parameter
|
||||
- `searchMode: 'by_nodes'` - Find templates using specific `nodeTypes`
|
||||
- `searchMode: 'by_task'` - Curated templates for common `task` types
|
||||
- `searchMode: 'by_metadata'` - Filter by `complexity`, `requiredService`, `targetAudience`
|
||||
- **`get_template`** - Get complete workflow JSON (modes: nodes_only, structure, full)
|
||||
|
||||
### Template Tools
|
||||
- **`list_templates`** - Browse all templates with descriptions and optional metadata (2,500+ templates)
|
||||
- **`search_templates`** - Text search across template names and descriptions
|
||||
- **`search_templates_by_metadata`** - Advanced filtering by complexity, setup time, services, audience
|
||||
- **`list_node_templates`** - Find templates using specific nodes
|
||||
- **`get_template`** - Get complete workflow JSON for import
|
||||
- **`get_templates_for_task`** - Curated templates for common automation tasks
|
||||
|
||||
### Advanced Tools
|
||||
- **`get_node_for_task`** - Pre-configured node settings for common tasks
|
||||
- **`list_tasks`** - Discover available task templates
|
||||
- **`validate_node_operation`** - Validate node configurations (operation-aware, profiles support)
|
||||
- **`validate_node_minimal`** - Quick validation for just required fields
|
||||
- **`validate_workflow`** - Complete workflow validation including AI tool connections
|
||||
- **`validate_workflow_connections`** - Check workflow structure and AI tool connections
|
||||
- **`validate_workflow_expressions`** - Validate n8n expressions including $fromAI()
|
||||
- **`get_property_dependencies`** - Analyze property visibility conditions
|
||||
- **`get_node_documentation`** - Get parsed documentation from n8n-docs
|
||||
- **`get_database_statistics`** - View database metrics and coverage
|
||||
|
||||
### n8n Management Tools (Optional - Requires API Configuration)
|
||||
These powerful tools allow you to manage n8n workflows directly from Claude. They're only available when you provide `N8N_API_URL` and `N8N_API_KEY` in your configuration.
|
||||
### n8n Management Tools (12 tools - Requires API Configuration)
|
||||
These tools require `N8N_API_URL` and `N8N_API_KEY` in your configuration.
|
||||
|
||||
#### Workflow Management
|
||||
- **`n8n_create_workflow`** - Create new workflows with nodes and connections
|
||||
- **`n8n_get_workflow`** - Get complete workflow by ID
|
||||
- **`n8n_get_workflow_details`** - Get workflow with execution statistics
|
||||
- **`n8n_get_workflow_structure`** - Get simplified workflow structure
|
||||
- **`n8n_get_workflow_minimal`** - Get minimal workflow info (ID, name, active status)
|
||||
- **`n8n_get_workflow`** - Unified workflow retrieval (v2.26.0):
|
||||
- `mode: 'full'` (default) - Complete workflow JSON
|
||||
- `mode: 'details'` - Include execution statistics
|
||||
- `mode: 'structure'` - Nodes and connections topology only
|
||||
- `mode: 'minimal'` - Just ID, name, active status
|
||||
- **`n8n_update_full_workflow`** - Update entire workflow (complete replacement)
|
||||
- **`n8n_update_partial_workflow`** - Update workflow using diff operations (NEW in v2.7.0!)
|
||||
- **`n8n_update_partial_workflow`** - Update workflow using diff operations
|
||||
- **`n8n_delete_workflow`** - Delete workflows permanently
|
||||
- **`n8n_list_workflows`** - List workflows with filtering and pagination
|
||||
- **`n8n_validate_workflow`** - Validate workflows already in n8n by ID (NEW in v2.6.3)
|
||||
- **`n8n_autofix_workflow`** - Automatically fix common workflow errors (NEW in v2.13.0!)
|
||||
- **`n8n_validate_workflow`** - Validate workflows in n8n by ID
|
||||
- **`n8n_autofix_workflow`** - Automatically fix common workflow errors
|
||||
- **`n8n_workflow_versions`** - Manage version history and rollback
|
||||
|
||||
#### Execution Management
|
||||
- **`n8n_trigger_webhook_workflow`** - Trigger workflows via webhook URL
|
||||
- **`n8n_get_execution`** - Get execution details by ID
|
||||
- **`n8n_list_executions`** - List executions with status filtering
|
||||
- **`n8n_delete_execution`** - Delete execution records
|
||||
- **`n8n_executions`** - Unified execution management (v2.26.0):
|
||||
- `action: 'list'` - List executions with status filtering
|
||||
- `action: 'get'` - Get execution details by ID
|
||||
- `action: 'delete'` - Delete execution records
|
||||
|
||||
#### System Tools
|
||||
- **`n8n_health_check`** - Check n8n API connectivity and features
|
||||
- **`n8n_diagnostic`** - Troubleshoot management tools visibility and configuration issues
|
||||
- **`n8n_list_available_tools`** - List all available management tools
|
||||
|
||||
### Example Usage
|
||||
|
||||
```typescript
|
||||
// Get essentials for quick configuration
|
||||
get_node_essentials("nodes-base.httpRequest")
|
||||
// Get node info with different detail levels
|
||||
get_node({
|
||||
nodeType: "nodes-base.httpRequest",
|
||||
detail: "standard", // Default: Essential properties
|
||||
includeExamples: true // Include real-world examples from templates
|
||||
})
|
||||
|
||||
// Find nodes for a specific task
|
||||
search_nodes({ query: "send email gmail" })
|
||||
// Get documentation
|
||||
get_node({
|
||||
nodeType: "nodes-base.slack",
|
||||
mode: "docs" // Human-readable markdown documentation
|
||||
})
|
||||
|
||||
// Get pre-configured settings
|
||||
get_node_for_task("send_email")
|
||||
// Search for specific properties
|
||||
get_node({
|
||||
nodeType: "nodes-base.httpRequest",
|
||||
mode: "search_properties",
|
||||
propertyQuery: "authentication"
|
||||
})
|
||||
|
||||
// Validate before deployment
|
||||
validate_node_operation({
|
||||
// Version history and breaking changes
|
||||
get_node({
|
||||
nodeType: "nodes-base.httpRequest",
|
||||
mode: "versions" // View all versions with summary
|
||||
})
|
||||
|
||||
// Search nodes with configuration examples
|
||||
search_nodes({
|
||||
query: "send email gmail",
|
||||
includeExamples: true // Returns top 2 configs per node
|
||||
})
|
||||
|
||||
// Validate node configuration
|
||||
validate_node({
|
||||
nodeType: "nodes-base.httpRequest",
|
||||
config: { method: "POST", url: "..." },
|
||||
profile: "runtime" // or "minimal", "ai-friendly", "strict"
|
||||
mode: "full",
|
||||
profile: "runtime" // or "minimal", "ai-friendly", "strict"
|
||||
})
|
||||
|
||||
// Quick required field check
|
||||
validate_node_minimal({
|
||||
validate_node({
|
||||
nodeType: "nodes-base.slack",
|
||||
config: { resource: "message", operation: "send" }
|
||||
config: { resource: "message", operation: "send" },
|
||||
mode: "minimal"
|
||||
})
|
||||
|
||||
// Search templates by task
|
||||
search_templates({
|
||||
searchMode: "by_task",
|
||||
task: "webhook_processing"
|
||||
})
|
||||
```
|
||||
|
||||
@@ -772,48 +1114,21 @@ npm run dev:http # HTTP dev mode
|
||||
|
||||
## 📊 Metrics & Coverage
|
||||
|
||||
Current database coverage (n8n v1.106.3):
|
||||
Current database coverage (n8n v1.117.2):
|
||||
|
||||
- ✅ **535/535** nodes loaded (100%)
|
||||
- ✅ **528** nodes with properties (98.7%)
|
||||
- ✅ **470** nodes with documentation (88%)
|
||||
- ✅ **267** AI-capable tools detected
|
||||
- ✅ **541/541** nodes loaded (100%)
|
||||
- ✅ **541** nodes with properties (100%)
|
||||
- ✅ **470** nodes with documentation (87%)
|
||||
- ✅ **271** AI-capable tools detected
|
||||
- ✅ **2,646** pre-extracted template configurations
|
||||
- ✅ **2,709** workflow templates available (100% metadata coverage)
|
||||
- ✅ **AI Agent & LangChain nodes** fully documented
|
||||
- ⚡ **Average response time**: ~12ms
|
||||
- 💾 **Database size**: ~15MB (optimized)
|
||||
- 💾 **Database size**: ~68MB (includes templates with metadata)
|
||||
|
||||
## 🔄 Recent Updates
|
||||
|
||||
See [CHANGELOG.md](./docs/CHANGELOG.md) for full version history and recent changes.
|
||||
|
||||
## ⚠️ Known Issues
|
||||
|
||||
### Claude Desktop Container Management
|
||||
|
||||
#### Container Accumulation (Fixed in v2.7.20+)
|
||||
Previous versions had an issue where containers would not properly clean up when Claude Desktop sessions ended. This has been fixed in v2.7.20+ with proper signal handling.
|
||||
|
||||
**For best container lifecycle management:**
|
||||
1. **Use the --init flag** (recommended) - Docker's init system ensures proper signal handling:
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"n8n-mcp": {
|
||||
"command": "docker",
|
||||
"args": [
|
||||
"run", "-i", "--rm", "--init",
|
||||
"ghcr.io/czlonkowski/n8n-mcp:latest"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
2. **Ensure you're using v2.7.20 or later** - Check your version:
|
||||
```bash
|
||||
docker run --rm ghcr.io/czlonkowski/n8n-mcp:latest --version
|
||||
```
|
||||
|
||||
See [CHANGELOG.md](./CHANGELOG.md) for complete version history and recent changes.
|
||||
|
||||
## 🧪 Testing
|
||||
|
||||
@@ -853,22 +1168,24 @@ npm run test:bench # Performance benchmarks
|
||||
|
||||
### Testing Architecture
|
||||
|
||||
- **Unit Tests**: Isolated component testing with mocks
|
||||
- Services layer: ~450 tests
|
||||
- Parsers: ~200 tests
|
||||
- Database repositories: ~100 tests
|
||||
- MCP tools: ~180 tests
|
||||
**Total: 3,336 tests** across unit and integration test suites
|
||||
|
||||
- **Integration Tests**: Full system behavior validation
|
||||
- MCP Protocol compliance: 72 tests
|
||||
- Database operations: 89 tests
|
||||
- Error handling: 44 tests
|
||||
- Performance: 44 tests
|
||||
- **Unit Tests** (2,766 tests): Isolated component testing with mocks
|
||||
- Services layer: Enhanced validation, property filtering, workflow validation
|
||||
- Parsers: Node parsing, property extraction, documentation mapping
|
||||
- Database: Repositories, adapters, migrations, FTS5 search
|
||||
- MCP tools: Tool definitions, documentation system
|
||||
- HTTP server: Multi-tenant support, security, configuration
|
||||
|
||||
- **Benchmarks**: Performance testing for critical paths
|
||||
- Database queries
|
||||
- Node loading
|
||||
- Search operations
|
||||
- **Integration Tests** (570 tests): Full system behavior validation
|
||||
- **n8n API Integration** (172 tests): All 18 MCP handler tools tested against real n8n instance
|
||||
- Workflow management: Create, read, update, delete, list, validate, autofix
|
||||
- Execution management: Trigger, retrieve, list, delete
|
||||
- System tools: Health check, tool listing, diagnostics
|
||||
- **MCP Protocol** (119 tests): Protocol compliance, session management, error handling
|
||||
- **Database** (226 tests): Repository operations, transactions, performance, FTS5 search
|
||||
- **Templates** (35 tests): Template fetching, storage, metadata operations
|
||||
- **Docker** (18 tests): Configuration, entrypoint, security validation
|
||||
|
||||
For detailed testing documentation, see [Testing Architecture](./docs/testing-architecture.md).
|
||||
|
||||
|
||||
318
README_ANALYSIS.md
Normal file
318
README_ANALYSIS.md
Normal file
@@ -0,0 +1,318 @@
|
||||
# N8N-MCP Validation Analysis: Complete Report
|
||||
|
||||
**Date**: November 8, 2025
|
||||
**Dataset**: 29,218 validation events | 9,021 unique users | 90 days
|
||||
**Status**: Complete and ready for action
|
||||
|
||||
---
|
||||
|
||||
## Analysis Documents
|
||||
|
||||
### 1. ANALYSIS_QUICK_REFERENCE.md (5.8KB)
|
||||
**Best for**: Quick decisions, meetings, slide presentations
|
||||
|
||||
START HERE if you want the key points in 5 minutes.
|
||||
|
||||
**Contains**:
|
||||
- One-paragraph core finding
|
||||
- Top 3 problem areas with root causes
|
||||
- 5 most common errors
|
||||
- Implementation plan summary
|
||||
- Key metrics & targets
|
||||
- FAQ section
|
||||
|
||||
---
|
||||
|
||||
### 2. VALIDATION_ANALYSIS_SUMMARY.md (13KB)
|
||||
**Best for**: Executive stakeholders, team leads, decision makers
|
||||
|
||||
Read this for comprehensive but concise overview.
|
||||
|
||||
**Contains**:
|
||||
- One-page executive summary
|
||||
- Health scorecard with key metrics
|
||||
- Detailed problem area breakdown
|
||||
- Error category distribution
|
||||
- Agent behavior insights
|
||||
- Tool usage patterns
|
||||
- Documentation impact findings
|
||||
- Top 5 recommendations with ROI estimates
|
||||
- 50-65% improvement projection
|
||||
|
||||
---
|
||||
|
||||
### 3. VALIDATION_ANALYSIS_REPORT.md (27KB)
|
||||
**Best for**: Technical deep-dive, implementation planning, root cause analysis
|
||||
|
||||
Complete reference document with all findings.
|
||||
|
||||
**Contains**:
|
||||
- All 16 SQL queries (reproducible)
|
||||
- Node-specific difficulty ranking (top 20)
|
||||
- Top 25 unique validation error messages
|
||||
- Error categorization with root causes
|
||||
- Tool usage patterns before failures
|
||||
- Search query analysis
|
||||
- Documentation effectiveness study
|
||||
- Retry success rate analysis
|
||||
- Property-level difficulty matrix
|
||||
- 8 detailed recommendations with implementation guides
|
||||
- Phase-by-phase action items
|
||||
- KPI tracking setup
|
||||
- Complete appendix with error message reference
|
||||
|
||||
---
|
||||
|
||||
### 4. IMPLEMENTATION_ROADMAP.md (4.3KB)
|
||||
**Best for**: Project managers, development team, sprint planning
|
||||
|
||||
Actionable roadmap for the next 6 weeks.
|
||||
|
||||
**Contains**:
|
||||
- Phase 1-3 breakdown (2 weeks each)
|
||||
- Specific file locations to modify
|
||||
- Effort estimates per task
|
||||
- Success criteria for each phase
|
||||
- Expected impact projections
|
||||
- Code examples (before/after)
|
||||
- Key changes documentation
|
||||
|
||||
---
|
||||
|
||||
## Reading Paths
|
||||
|
||||
### Path A: Decision Maker (30 minutes)
|
||||
1. Read: ANALYSIS_QUICK_REFERENCE.md
|
||||
2. Review: Key metrics in VALIDATION_ANALYSIS_SUMMARY.md
|
||||
3. Decision: Approve IMPLEMENTATION_ROADMAP.md
|
||||
|
||||
### Path B: Product Manager (1 hour)
|
||||
1. Read: VALIDATION_ANALYSIS_SUMMARY.md
|
||||
2. Skim: Top recommendations in VALIDATION_ANALYSIS_REPORT.md
|
||||
3. Review: IMPLEMENTATION_ROADMAP.md
|
||||
4. Check: Success metrics and timelines
|
||||
|
||||
### Path C: Technical Lead (2-3 hours)
|
||||
1. Read: ANALYSIS_QUICK_REFERENCE.md
|
||||
2. Deep-dive: VALIDATION_ANALYSIS_REPORT.md
|
||||
3. Study: IMPLEMENTATION_ROADMAP.md
|
||||
4. Review: Code examples and SQL queries
|
||||
5. Plan: Ticket creation and sprint allocation
|
||||
|
||||
### Path D: Developer (3-4 hours)
|
||||
1. Skim: ANALYSIS_QUICK_REFERENCE.md for context
|
||||
2. Read: VALIDATION_ANALYSIS_REPORT.md sections 3-8
|
||||
3. Study: IMPLEMENTATION_ROADMAP.md thoroughly
|
||||
4. Review: All code locations and examples
|
||||
5. Plan: First task implementation
|
||||
|
||||
---
|
||||
|
||||
## Key Findings Overview
|
||||
|
||||
### The Core Insight
|
||||
Validation failures are NOT broken—they're evidence the system works perfectly. 29,218 validation events prevented bad deployments. The challenge is GUIDANCE GAPS that cause first-attempt failures.
|
||||
|
||||
### Success Evidence
|
||||
- 100% same-day error recovery rate
|
||||
- 100% retry success rate
|
||||
- All agents fix errors when given feedback
|
||||
- Zero "unfixable" errors
|
||||
|
||||
### Problem Areas (75% of errors)
|
||||
1. **Workflow structure** (26%) - JSON malformation
|
||||
2. **Connections** (14%) - Unintuitive syntax
|
||||
3. **Required fields** (8%) - Not marked upfront
|
||||
|
||||
### Most Problematic Nodes
|
||||
- Webhook/Trigger (127 failures)
|
||||
- Slack (73 failures)
|
||||
- AI Agent (36 failures)
|
||||
- HTTP Request (31 failures)
|
||||
- OpenAI (35 failures)
|
||||
|
||||
### Solution Strategy
|
||||
- Phase 1: Better error messages + required field markers (25-30% reduction)
|
||||
- Phase 2: Documentation + validation improvements (additional 15-20%)
|
||||
- Phase 3: Advanced features + monitoring (additional 10-15%)
|
||||
- **Target**: 50-65% total failure reduction in 6 weeks
|
||||
|
||||
---
|
||||
|
||||
## Critical Numbers
|
||||
|
||||
```
|
||||
Validation Events ............. 29,218
|
||||
Unique Users .................. 9,021
|
||||
Data Quality .................. 100% (all marked as errors)
|
||||
|
||||
Current Metrics:
|
||||
Error Rate (doc users) ....... 12.6%
|
||||
Error Rate (non-doc users) ... 10.8%
|
||||
First-attempt success ........ ~77%
|
||||
Retry success ................ 100%
|
||||
Same-day recovery ............ 100%
|
||||
|
||||
Target Metrics (after 6 weeks):
|
||||
Error Rate ................... 6-7% (-50%)
|
||||
First-attempt success ........ 85%+
|
||||
Retry success ................ 100%
|
||||
Implementation effort ........ 60-80 hours
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Timeline
|
||||
|
||||
```
|
||||
Week 1-2: Phase 1 (Error messages, field markers, webhook guide)
|
||||
Expected: 25-30% failure reduction
|
||||
|
||||
Week 3-4: Phase 2 (Enum suggestions, connection guide, AI validation)
|
||||
Expected: Additional 15-20% reduction
|
||||
|
||||
Week 5-6: Phase 3 (Search improvements, fuzzy matching, KPI setup)
|
||||
Expected: Additional 10-15% reduction
|
||||
|
||||
Target: 50-65% total reduction by Week 6
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## How to Use These Documents
|
||||
|
||||
### For Review & Approval
|
||||
1. Start with ANALYSIS_QUICK_REFERENCE.md
|
||||
2. Check key metrics in VALIDATION_ANALYSIS_SUMMARY.md
|
||||
3. Review IMPLEMENTATION_ROADMAP.md for feasibility
|
||||
4. Decision: Approve phase 1-3
|
||||
|
||||
### For Team Planning
|
||||
1. Read IMPLEMENTATION_ROADMAP.md
|
||||
2. Create GitHub issues from each task
|
||||
3. Assign based on effort estimates
|
||||
4. Schedule sprints for phase 1-3
|
||||
|
||||
### For Development
|
||||
1. Review specific recommendations in VALIDATION_ANALYSIS_REPORT.md
|
||||
2. Find code locations in IMPLEMENTATION_ROADMAP.md
|
||||
3. Study code examples (before/after)
|
||||
4. Implement and test
|
||||
|
||||
### For Measurement
|
||||
1. Record baseline metrics (current state)
|
||||
2. Deploy Phase 1 and measure impact
|
||||
3. Use KPI queries from VALIDATION_ANALYSIS_REPORT.md
|
||||
4. Adjust strategy based on actual results
|
||||
|
||||
---
|
||||
|
||||
## Key Recommendations (Priority Order)
|
||||
|
||||
### IMMEDIATE (Week 1-2)
|
||||
1. **Enhance error messages** - Add location + examples
|
||||
2. **Mark required fields** - Add "⚠️ REQUIRED" to tools
|
||||
3. **Create webhook guide** - Document configuration rules
|
||||
|
||||
### HIGH (Week 3-4)
|
||||
4. **Add enum suggestions** - Show valid values in errors
|
||||
5. **Create connections guide** - Document syntax + examples
|
||||
6. **Add AI Agent validation** - Detect missing LLM connections
|
||||
|
||||
### MEDIUM (Week 5-6)
|
||||
7. **Improve search results** - Add configuration hints
|
||||
8. **Build fuzzy matcher** - Suggest similar node types
|
||||
9. **Setup KPI tracking** - Monitor improvement
|
||||
|
||||
---
|
||||
|
||||
## Questions & Answers
|
||||
|
||||
**Q: Why so many validation failures?**
|
||||
A: High usage (9,021 users, complex workflows). System is working—preventing bad deployments.
|
||||
|
||||
**Q: Shouldn't we just allow invalid configurations?**
|
||||
A: No, validation prevents 29,218 broken workflows from deploying. We improve guidance instead.
|
||||
|
||||
**Q: Do agents actually learn from errors?**
|
||||
A: Yes, 100% same-day recovery rate proves feedback works perfectly.
|
||||
|
||||
**Q: Can we really reduce failures by 50-65%?**
|
||||
A: Yes, analysis shows these specific improvements target the actual root causes.
|
||||
|
||||
**Q: How long will this take?**
|
||||
A: 60-80 developer-hours across 6 weeks. Can start immediately.
|
||||
|
||||
**Q: What's the biggest win?**
|
||||
A: Marking required fields (378 errors) + better structure messages (1,268 errors).
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **This Week**: Review all documents and get approval
|
||||
2. **Week 1**: Create GitHub issues from IMPLEMENTATION_ROADMAP.md
|
||||
3. **Week 2**: Assign to team, start Phase 1
|
||||
4. **Week 4**: Deploy Phase 1, start Phase 2
|
||||
5. **Week 6**: Deploy Phase 2, start Phase 3
|
||||
6. **Week 8**: Deploy Phase 3, begin monitoring
|
||||
7. **Week 9+**: Review metrics, iterate
|
||||
|
||||
---
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
/Users/romualdczlonkowski/Pliki/n8n-mcp/n8n-mcp/
|
||||
├── ANALYSIS_QUICK_REFERENCE.md ............ Quick lookup (5.8KB)
|
||||
├── VALIDATION_ANALYSIS_SUMMARY.md ........ Executive summary (13KB)
|
||||
├── VALIDATION_ANALYSIS_REPORT.md ......... Complete analysis (27KB)
|
||||
├── IMPLEMENTATION_ROADMAP.md ............. Action plan (4.3KB)
|
||||
└── README_ANALYSIS.md ................... This file
|
||||
```
|
||||
|
||||
**Total Documentation**: 50KB of analysis, recommendations, and implementation guidance
|
||||
|
||||
---
|
||||
|
||||
## Contact & Support
|
||||
|
||||
For specific questions:
|
||||
- **Why?** → See VALIDATION_ANALYSIS_REPORT.md Section 2-8
|
||||
- **How?** → See IMPLEMENTATION_ROADMAP.md for code locations
|
||||
- **When?** → See IMPLEMENTATION_ROADMAP.md for timeline
|
||||
- **Metrics?** → See VALIDATION_ANALYSIS_SUMMARY.md key metrics section
|
||||
|
||||
---
|
||||
|
||||
## Metadata
|
||||
|
||||
| Item | Value |
|
||||
|------|-------|
|
||||
| Analysis Date | November 8, 2025 |
|
||||
| Data Period | Sept 26 - Nov 8, 2025 (90 days) |
|
||||
| Sample Size | 29,218 validation events |
|
||||
| Users Analyzed | 9,021 unique users |
|
||||
| SQL Queries | 16 comprehensive queries |
|
||||
| Confidence Level | HIGH |
|
||||
| Status | Complete & Ready for Implementation |
|
||||
|
||||
---
|
||||
|
||||
## Analysis Methodology
|
||||
|
||||
1. **Data Collection**: Extracted all validation_details events from PostgreSQL
|
||||
2. **Categorization**: Grouped errors by type, node, and message pattern
|
||||
3. **Pattern Analysis**: Identified root causes for each error category
|
||||
4. **User Behavior**: Tracked tool usage before/after failures
|
||||
5. **Recovery Analysis**: Measured success rates and correction time
|
||||
6. **Recommendation Development**: Mapped solutions to specific problems
|
||||
7. **Impact Projection**: Estimated improvement from each solution
|
||||
8. **Roadmap Creation**: Phased implementation plan with effort estimates
|
||||
|
||||
**Data Quality**: 100% of validation events properly categorized, no data loss or corruption
|
||||
|
||||
---
|
||||
|
||||
**Analysis Complete** | **Ready for Review** | **Awaiting Approval to Proceed**
|
||||
|
||||
BIN
data/nodes.db
BIN
data/nodes.db
Binary file not shown.
0
data/templates.db
Normal file
0
data/templates.db
Normal file
@@ -20,19 +20,19 @@ services:
|
||||
image: n8n-mcp:latest
|
||||
container_name: n8n-mcp
|
||||
ports:
|
||||
- "3000:3000"
|
||||
- "${PORT:-3000}:${PORT:-3000}"
|
||||
environment:
|
||||
- MCP_MODE=${MCP_MODE:-http}
|
||||
- AUTH_TOKEN=${AUTH_TOKEN}
|
||||
- NODE_ENV=${NODE_ENV:-production}
|
||||
- LOG_LEVEL=${LOG_LEVEL:-info}
|
||||
- PORT=3000
|
||||
- PORT=${PORT:-3000}
|
||||
volumes:
|
||||
# Mount data directory for persistence
|
||||
- ./data:/app/data
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
|
||||
test: ["CMD", "sh", "-c", "curl -f http://localhost:$${PORT:-3000}/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
@@ -37,11 +37,12 @@ services:
|
||||
container_name: n8n-mcp
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "${MCP_PORT:-3000}:3000"
|
||||
- "${MCP_PORT:-3000}:${MCP_PORT:-3000}"
|
||||
environment:
|
||||
- NODE_ENV=production
|
||||
- N8N_MODE=true
|
||||
- MCP_MODE=http
|
||||
- PORT=${MCP_PORT:-3000}
|
||||
- N8N_API_URL=http://n8n:5678
|
||||
- N8N_API_KEY=${N8N_API_KEY}
|
||||
- MCP_AUTH_TOKEN=${MCP_AUTH_TOKEN}
|
||||
@@ -56,7 +57,7 @@ services:
|
||||
n8n:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
|
||||
test: ["CMD", "sh", "-c", "curl -f http://localhost:$${MCP_PORT:-3000}/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
@@ -41,7 +41,7 @@ services:
|
||||
|
||||
# Port mapping
|
||||
ports:
|
||||
- "${PORT:-3000}:3000"
|
||||
- "${PORT:-3000}:${PORT:-3000}"
|
||||
|
||||
# Resource limits
|
||||
deploy:
|
||||
@@ -53,7 +53,7 @@ services:
|
||||
|
||||
# Health check
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://127.0.0.1:3000/health"]
|
||||
test: ["CMD", "sh", "-c", "curl -f http://127.0.0.1:$${PORT:-3000}/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
@@ -5,6 +5,107 @@ All notable changes to this project will be documented in this file.
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased] - Phase 0: Connection Operations Critical Fixes
|
||||
|
||||
### Fixed
|
||||
- **🐛 CRITICAL: Fixed `addConnection` sourceIndex handling (Issue #272, discovered in hands-on testing)**
|
||||
- Multi-output nodes (IF, Switch) now work correctly with sourceIndex parameter
|
||||
- Changed from `||` to `??` operator to properly handle explicit 0 values
|
||||
- Added defensive array validation before accessing indices
|
||||
- Improves rating from 3/10 to 8/10 for multi-output node scenarios
|
||||
- **Impact**: IF nodes, Switch nodes, and all conditional routing now reliable
|
||||
|
||||
- **🐛 CRITICAL: Added runtime validation for `updateConnection` (Issue #272, #204)**
|
||||
- Prevents server crashes when `updates` object is missing
|
||||
- Provides helpful error message with:
|
||||
- Clear explanation of what's wrong
|
||||
- Correct format example
|
||||
- Suggestion to use removeConnection + addConnection for rewiring
|
||||
- Validates `updates` is an object, not string or other type
|
||||
- **Impact**: No more cryptic "Cannot read properties of undefined" crashes
|
||||
|
||||
### Enhanced
|
||||
- **Error Messages**: `updateConnection` errors now include actionable guidance
|
||||
- Example format shown in error
|
||||
- Alternative approaches suggested (removeConnection + addConnection)
|
||||
- Clear explanation that updateConnection modifies properties, not targets
|
||||
|
||||
### Testing
|
||||
- Added 8 comprehensive tests for Phase 0 fixes
|
||||
- 2 tests for updateConnection validation (missing updates, invalid type)
|
||||
- 5 tests for sourceIndex handling (IF nodes, parallel execution, Switch nodes, explicit 0)
|
||||
- 1 test for complex multi-output routing scenarios
|
||||
- All 126 existing tests still passing
|
||||
|
||||
### Documentation
|
||||
- Updated tool documentation to clarify:
|
||||
- `addConnection` now properly handles sourceIndex (Phase 0 fix noted)
|
||||
- `updateConnection` REQUIRES 'updates' object (Phase 0 validation noted)
|
||||
- Added pitfalls about updateConnection limitations
|
||||
- Clarified that updateConnection modifies properties, NOT connection targets
|
||||
|
||||
### Developer Experience
|
||||
- More defensive programming throughout connection operations
|
||||
- Better use of nullish coalescing (??) vs. logical OR (||)
|
||||
- Clear inline comments explaining expected behavior
|
||||
- Improved type safety with runtime guards
|
||||
|
||||
### References
|
||||
- Comprehensive analysis: `docs/local/connection-operations-deep-dive-and-improvement-plan.md`
|
||||
- Based on hands-on testing with n8n-mcp-tester agent
|
||||
- Overall experience rating improved from 4.5/10 to estimated 6/10
|
||||
|
||||
## [2.14.4] - 2025-09-30
|
||||
|
||||
### Added
|
||||
- **Workflow Cleanup Operations**: Two new operations for `n8n_update_partial_workflow` to handle broken workflow recovery
|
||||
- `cleanStaleConnections`: Automatically removes all connections referencing non-existent nodes
|
||||
- Essential after node renames or deletions that leave dangling connection references
|
||||
- Supports `dryRun: true` mode to preview what would be removed
|
||||
- Removes both source and target stale connections
|
||||
- `replaceConnections`: Replace entire connections object in a single operation
|
||||
- Faster than crafting many individual connection operations
|
||||
- Useful for bulk connection rewiring
|
||||
|
||||
- **Graceful Error Handling for Connection Operations**: Enhanced `removeConnection` operation
|
||||
- New `ignoreErrors` flag: When `true`, operation succeeds even if connection doesn't exist
|
||||
- Perfect for cleanup scenarios where you're not sure if connections exist
|
||||
- Maintains backwards compatibility (defaults to `false` for strict validation)
|
||||
|
||||
- **Best-Effort Mode**: New `continueOnError` mode for `WorkflowDiffRequest`
|
||||
- Apply valid operations even if some fail
|
||||
- Returns detailed results with `applied` and `failed` operation indices
|
||||
- Breaks atomic guarantees intentionally for bulk cleanup scenarios
|
||||
- Maintains atomic mode as default for safety
|
||||
|
||||
### Enhanced
|
||||
- **Tool Documentation**: Updated `n8n_update_partial_workflow` documentation
|
||||
- Added examples for cleanup scenarios
|
||||
- Documented new operation types and modes
|
||||
- Added best practices for workflow recovery
|
||||
- Clarified atomic vs. best-effort behavior
|
||||
|
||||
- **Type System**: Extended workflow diff types
|
||||
- Added `CleanStaleConnectionsOperation` interface
|
||||
- Added `ReplaceConnectionsOperation` interface
|
||||
- Extended `WorkflowDiffResult` with `applied`, `failed`, and `staleConnectionsRemoved` fields
|
||||
- Updated type guards for new connection operations
|
||||
|
||||
### Testing
|
||||
- Added comprehensive test suite for v2.14.4 features
|
||||
- 15 new tests covering all new operations and modes
|
||||
- Tests for cleanStaleConnections with various stale scenarios
|
||||
- Tests for replaceConnections validation
|
||||
- Tests for ignoreErrors flag behavior
|
||||
- Tests for continueOnError mode with mixed success/failure
|
||||
- Backwards compatibility verification tests
|
||||
|
||||
### Impact
|
||||
- **Time Saved**: Reduces broken workflow fix time from 10-15 minutes to 30 seconds
|
||||
- **Token Efficiency**: `cleanStaleConnections` is 1 operation vs 10+ manual operations
|
||||
- **User Experience**: Dramatically improved workflow recovery capabilities
|
||||
- **Backwards Compatibility**: 100% - all additions are optional and default to existing behavior
|
||||
|
||||
## [2.13.2] - 2025-01-24
|
||||
|
||||
### Added
|
||||
|
||||
111
docs/CI_TEST_INFRASTRUCTURE.md
Normal file
111
docs/CI_TEST_INFRASTRUCTURE.md
Normal file
@@ -0,0 +1,111 @@
|
||||
# CI Test Infrastructure - Known Issues
|
||||
|
||||
## Integration Test Failures for External Contributor PRs
|
||||
|
||||
### Issue Summary
|
||||
|
||||
Integration tests fail for external contributor PRs with "No response from n8n server" errors, despite the code changes being correct. This is a **test infrastructure issue**, not a code quality issue.
|
||||
|
||||
### Root Cause
|
||||
|
||||
1. **GitHub Actions Security**: External contributor PRs don't get access to repository secrets (`N8N_API_URL`, `N8N_API_KEY`, etc.)
|
||||
2. **MSW Mock Server**: Mock Service Worker (MSW) is not properly intercepting HTTP requests in the CI environment
|
||||
3. **Test Configuration**: Integration tests expect `http://localhost:3001/mock-api` but the mock server isn't responding
|
||||
|
||||
### Evidence
|
||||
|
||||
From CI logs (PR #343):
|
||||
```
|
||||
[CI-DEBUG] Global setup complete, N8N_API_URL: http://localhost:3001/mock-api
|
||||
❌ No response from n8n server (repeated 60+ times across 20 tests)
|
||||
```
|
||||
|
||||
The tests ARE using the correct mock URL, but MSW isn't intercepting the requests.
|
||||
|
||||
### Why This Happens
|
||||
|
||||
**For External PRs:**
|
||||
- GitHub Actions doesn't expose repository secrets for security reasons
|
||||
- Prevents malicious PRs from exfiltrating secrets
|
||||
- MSW setup runs but requests don't get intercepted in CI
|
||||
|
||||
**Test Configuration:**
|
||||
- `.env.test` line 19: `N8N_API_URL=http://localhost:3001/mock-api`
|
||||
- `.env.test` line 67: `MSW_ENABLED=true`
|
||||
- CI workflow line 75-80: Secrets set but empty for external PRs
|
||||
|
||||
### Impact
|
||||
|
||||
- ✅ **Code Quality**: NOT affected - the actual code changes are correct
|
||||
- ✅ **Local Testing**: Works fine - MSW intercepts requests locally
|
||||
- ❌ **CI for External PRs**: Integration tests fail (infrastructure issue)
|
||||
- ✅ **CI for Internal PRs**: Works fine (has access to secrets)
|
||||
|
||||
### Current Workarounds
|
||||
|
||||
1. **For Maintainers**: Use `--admin` flag to merge despite failing tests when code is verified correct
|
||||
2. **For Contributors**: Run tests locally where MSW works properly
|
||||
3. **For CI**: Unit tests pass (don't require n8n API), integration tests fail
|
||||
|
||||
### Files Affected
|
||||
|
||||
- `tests/integration/setup/integration-setup.ts` - MSW server setup
|
||||
- `tests/setup/msw-setup.ts` - MSW configuration
|
||||
- `tests/mocks/n8n-api/handlers.ts` - Mock request handlers
|
||||
- `.github/workflows/test.yml` - CI configuration
|
||||
- `.env.test` - Test environment configuration
|
||||
|
||||
### Potential Solutions (Not Implemented)
|
||||
|
||||
1. **Separate Unit/Integration Runs**
|
||||
- Run integration tests only for internal PRs
|
||||
- Skip integration tests for external PRs
|
||||
- Rely on unit tests for external PR validation
|
||||
|
||||
2. **MSW CI Debugging**
|
||||
- Add extensive logging to MSW setup
|
||||
- Check if MSW server actually starts in CI
|
||||
- Verify request interception is working
|
||||
|
||||
3. **Mock Server Process**
|
||||
- Start actual HTTP server in CI instead of MSW
|
||||
- More reliable but adds complexity
|
||||
- Would require test infrastructure refactoring
|
||||
|
||||
4. **Public Test Instance**
|
||||
- Use publicly accessible test n8n instance
|
||||
- Exposes test data, security concerns
|
||||
- Would work for external PRs
|
||||
|
||||
### Decision
|
||||
|
||||
**Status**: Documented but not fixed
|
||||
|
||||
**Rationale**:
|
||||
- Integration test infrastructure refactoring is separate concern from code quality
|
||||
- External PRs are relatively rare compared to internal development
|
||||
- Unit tests provide sufficient coverage for most changes
|
||||
- Maintainers can verify integration tests locally before merging
|
||||
|
||||
### Testing Strategy
|
||||
|
||||
**For External Contributor PRs:**
|
||||
1. ✅ Unit tests must pass
|
||||
2. ✅ TypeScript compilation must pass
|
||||
3. ✅ Build must succeed
|
||||
4. ⚠️ Integration test failures are expected (infrastructure issue)
|
||||
5. ✅ Maintainer verifies locally before merge
|
||||
|
||||
**For Internal PRs:**
|
||||
1. ✅ All tests must pass (unit + integration)
|
||||
2. ✅ Full CI validation
|
||||
|
||||
### References
|
||||
|
||||
- PR #343: First occurrence of this issue
|
||||
- PR #345: Documented the infrastructure issue
|
||||
- Issue: External PRs don't get secrets (GitHub Actions security)
|
||||
|
||||
### Last Updated
|
||||
|
||||
2025-10-21 - Documented as part of PR #345 investigation
|
||||
@@ -4,7 +4,9 @@ Connect n8n-MCP to Claude Code CLI for enhanced n8n workflow development from th
|
||||
|
||||
## Quick Setup via CLI
|
||||
|
||||
### Basic configuration (documentation tools only):
|
||||
### Basic configuration (documentation tools only)
|
||||
|
||||
**For Linux, macOS, or Windows (WSL/Git Bash):**
|
||||
```bash
|
||||
claude mcp add n8n-mcp \
|
||||
-e MCP_MODE=stdio \
|
||||
@@ -13,9 +15,21 @@ claude mcp add n8n-mcp \
|
||||
-- npx n8n-mcp
|
||||
```
|
||||
|
||||
**For native Windows PowerShell:**
|
||||
```powershell
|
||||
# Note: The backtick ` is PowerShell's line continuation character.
|
||||
claude mcp add n8n-mcp `
|
||||
'-e MCP_MODE=stdio' `
|
||||
'-e LOG_LEVEL=error' `
|
||||
'-e DISABLE_CONSOLE_OUTPUT=true' `
|
||||
-- npx n8n-mcp
|
||||
```
|
||||
|
||||

|
||||
|
||||
### Full configuration (with n8n management tools):
|
||||
### Full configuration (with n8n management tools)
|
||||
|
||||
**For Linux, macOS, or Windows (WSL/Git Bash):**
|
||||
```bash
|
||||
claude mcp add n8n-mcp \
|
||||
-e MCP_MODE=stdio \
|
||||
@@ -26,6 +40,18 @@ claude mcp add n8n-mcp \
|
||||
-- npx n8n-mcp
|
||||
```
|
||||
|
||||
**For native Windows PowerShell:**
|
||||
```powershell
|
||||
# Note: The backtick ` is PowerShell's line continuation character.
|
||||
claude mcp add n8n-mcp `
|
||||
'-e MCP_MODE=stdio' `
|
||||
'-e LOG_LEVEL=error' `
|
||||
'-e DISABLE_CONSOLE_OUTPUT=true' `
|
||||
'-e N8N_API_URL=https://your-n8n-instance.com' `
|
||||
'-e N8N_API_KEY=your-api-key' `
|
||||
-- npx n8n-mcp
|
||||
```
|
||||
|
||||
Make sure to replace `https://your-n8n-instance.com` with your actual n8n URL and `your-api-key` with your n8n API key.
|
||||
|
||||
## Alternative Setup Methods
|
||||
@@ -80,15 +106,64 @@ Remove the server:
|
||||
claude mcp remove n8n-mcp
|
||||
```
|
||||
|
||||
## 🎓 Add Claude Skills (Optional)
|
||||
|
||||
Supercharge your n8n workflow building with specialized Claude Code skills! The [n8n-skills](https://github.com/czlonkowski/n8n-skills) repository provides 7 complementary skills that teach AI assistants how to build production-ready n8n workflows.
|
||||
|
||||
### What You Get
|
||||
|
||||
- ✅ **n8n Expression Syntax** - Correct {{}} patterns and common mistakes
|
||||
- ✅ **n8n MCP Tools Expert** - How to use n8n-mcp tools effectively
|
||||
- ✅ **n8n Workflow Patterns** - 5 proven architectural patterns
|
||||
- ✅ **n8n Validation Expert** - Interpret and fix validation errors
|
||||
- ✅ **n8n Node Configuration** - Operation-aware setup guidance
|
||||
- ✅ **n8n Code JavaScript** - Write effective JavaScript in Code nodes
|
||||
- ✅ **n8n Code Python** - Python patterns with limitation awareness
|
||||
|
||||
### Installation
|
||||
|
||||
**Method 1: Plugin Installation** (Recommended)
|
||||
```bash
|
||||
/plugin install czlonkowski/n8n-skills
|
||||
```
|
||||
|
||||
**Method 2: Via Marketplace**
|
||||
```bash
|
||||
# Add as marketplace, then browse and install
|
||||
/plugin marketplace add czlonkowski/n8n-skills
|
||||
|
||||
# Then browse available plugins
|
||||
/plugin install
|
||||
# Select "n8n-mcp-skills" from the list
|
||||
```
|
||||
|
||||
**Method 3: Manual Installation**
|
||||
```bash
|
||||
# 1. Clone the repository
|
||||
git clone https://github.com/czlonkowski/n8n-skills.git
|
||||
|
||||
# 2. Copy skills to your Claude Code skills directory
|
||||
cp -r n8n-skills/skills/* ~/.claude/skills/
|
||||
|
||||
# 3. Reload Claude Code
|
||||
# Skills will activate automatically
|
||||
```
|
||||
|
||||
For complete installation instructions, configuration options, and usage examples, see the [n8n-skills README](https://github.com/czlonkowski/n8n-skills#-installation).
|
||||
|
||||
Skills work seamlessly with n8n-mcp to provide expert guidance throughout the workflow building process!
|
||||
|
||||
## Project Instructions
|
||||
|
||||
For optimal results, create a `CLAUDE.md` file in your project root with the instructions from the [main README's Claude Project Setup section](../README.md#-claude-project-setup).
|
||||
|
||||
## Tips
|
||||
|
||||
- If you're running n8n locally, use `http://localhost:5678` as the N8N_API_URL
|
||||
- The n8n API credentials are optional - without them, you'll have documentation and validation tools only
|
||||
- With API credentials, you'll get full workflow management capabilities
|
||||
- Use `--scope local` (default) to keep your API credentials private
|
||||
- Use `--scope project` to share configuration with your team (put credentials in environment variables)
|
||||
- Claude Code will automatically start the MCP server when you begin a conversation
|
||||
- If you're running n8n locally, use `http://localhost:5678` as the `N8N_API_URL`.
|
||||
- The n8n API credentials are optional. Without them, you'll only have access to documentation and validation tools. With credentials, you get full workflow management capabilities.
|
||||
- **Scope Management:**
|
||||
- By default, `claude mcp add` uses `--scope local` (also called "user scope"), which saves the configuration to your global user settings and keeps API keys private.
|
||||
- To share the configuration with your team, use `--scope project`. This saves the configuration to a `.mcp.json` file in your project's root directory.
|
||||
- **Switching Scope:** The cleanest method is to `remove` the server and then `add` it back with the desired scope flag (e.g., `claude mcp remove n8n-mcp` followed by `claude mcp add n8n-mcp --scope project`).
|
||||
- **Manual Switching (Advanced):** You can manually edit your `.claude.json` file (e.g., `C:\Users\YourName\.claude.json`). To switch, cut the `"n8n-mcp": { ... }` block from the top-level `"mcpServers"` object (user scope) and paste it into the nested `"mcpServers"` object under your project's path key (project scope), or vice versa. **Important:** You may need to restart Claude Code for manual changes to take effect.
|
||||
- Claude Code will automatically start the MCP server when you begin a conversation.
|
||||
|
||||
@@ -65,6 +65,9 @@ docker run -d \
|
||||
| `NODE_ENV` | Environment: `development` or `production` | `production` | No |
|
||||
| `LOG_LEVEL` | Logging level: `debug`, `info`, `warn`, `error` | `info` | No |
|
||||
| `NODE_DB_PATH` | Custom database path (v2.7.16+) | `/app/data/nodes.db` | No |
|
||||
| `AUTH_RATE_LIMIT_WINDOW` | Rate limit window in ms (v2.16.3+) | `900000` (15 min) | No |
|
||||
| `AUTH_RATE_LIMIT_MAX` | Max auth attempts per window (v2.16.3+) | `20` | No |
|
||||
| `WEBHOOK_SECURITY_MODE` | SSRF protection: `strict`/`moderate`/`permissive` (v2.16.3+) | `strict` | No |
|
||||
|
||||
*Either `AUTH_TOKEN` or `AUTH_TOKEN_FILE` must be set for HTTP mode. If both are set, `AUTH_TOKEN` takes precedence.
|
||||
|
||||
@@ -283,7 +286,36 @@ docker ps --format "table {{.Names}}\t{{.Status}}"
|
||||
docker inspect n8n-mcp | jq '.[0].State.Health'
|
||||
```
|
||||
|
||||
## 🔒 Security Considerations
|
||||
## 🔒 Security Features (v2.16.3+)
|
||||
|
||||
### Rate Limiting
|
||||
|
||||
Protects against brute force authentication attacks:
|
||||
|
||||
```bash
|
||||
# Configure in .env or docker-compose.yml
|
||||
AUTH_RATE_LIMIT_WINDOW=900000 # 15 minutes in milliseconds
|
||||
AUTH_RATE_LIMIT_MAX=20 # 20 attempts per IP per window
|
||||
```
|
||||
|
||||
### SSRF Protection
|
||||
|
||||
Prevents Server-Side Request Forgery when using webhook triggers:
|
||||
|
||||
```bash
|
||||
# For production (blocks localhost + private IPs + cloud metadata)
|
||||
WEBHOOK_SECURITY_MODE=strict
|
||||
|
||||
# For local development with local n8n instance
|
||||
WEBHOOK_SECURITY_MODE=moderate
|
||||
|
||||
# For internal testing only (allows private IPs)
|
||||
WEBHOOK_SECURITY_MODE=permissive
|
||||
```
|
||||
|
||||
**Note:** Cloud metadata endpoints (169.254.169.254, metadata.google.internal, etc.) are ALWAYS blocked in all modes.
|
||||
|
||||
## 🔒 Authentication
|
||||
|
||||
### Authentication
|
||||
|
||||
|
||||
@@ -196,6 +196,41 @@ docker ps -a | grep n8n-mcp | grep Exited | awk '{print $1}' | xargs -r docker r
|
||||
- Manually clean up containers periodically
|
||||
- Consider using HTTP mode instead
|
||||
|
||||
### Webhooks to Local n8n Fail (v2.16.3+)
|
||||
|
||||
**Symptoms:**
|
||||
- `n8n_trigger_webhook_workflow` fails with "SSRF protection" error
|
||||
- Error message: "SSRF protection: Localhost access is blocked"
|
||||
- Webhooks work from n8n UI but not from n8n-MCP
|
||||
|
||||
**Root Cause:** Default strict SSRF protection blocks localhost access to prevent attacks.
|
||||
|
||||
**Solution:** Use moderate security mode for local development
|
||||
|
||||
```bash
|
||||
# For Docker run
|
||||
docker run -d \
|
||||
--name n8n-mcp \
|
||||
-e MCP_MODE=http \
|
||||
-e AUTH_TOKEN=your-token \
|
||||
-e WEBHOOK_SECURITY_MODE=moderate \
|
||||
-p 3000:3000 \
|
||||
ghcr.io/czlonkowski/n8n-mcp:latest
|
||||
|
||||
# For Docker Compose - add to environment:
|
||||
services:
|
||||
n8n-mcp:
|
||||
environment:
|
||||
WEBHOOK_SECURITY_MODE: moderate
|
||||
```
|
||||
|
||||
**Security Modes Explained:**
|
||||
- `strict` (default): Blocks localhost + private IPs + cloud metadata (production)
|
||||
- `moderate`: Allows localhost, blocks private IPs + cloud metadata (local development)
|
||||
- `permissive`: Allows localhost + private IPs, blocks cloud metadata (testing only)
|
||||
|
||||
**Important:** Always use `strict` mode in production. Cloud metadata is blocked in all modes.
|
||||
|
||||
### n8n API Connection Issues
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
3491
docs/FINAL_AI_VALIDATION_SPEC.md
Normal file
3491
docs/FINAL_AI_VALIDATION_SPEC.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -73,6 +73,13 @@ PORT=3000
|
||||
# Optional: Enable n8n management tools
|
||||
# N8N_API_URL=https://your-n8n-instance.com
|
||||
# N8N_API_KEY=your-api-key-here
|
||||
# Security Configuration (v2.16.3+)
|
||||
# Rate limiting (default: 20 attempts per 15 minutes)
|
||||
AUTH_RATE_LIMIT_WINDOW=900000
|
||||
AUTH_RATE_LIMIT_MAX=20
|
||||
# SSRF protection mode (default: strict)
|
||||
# Use 'moderate' for local n8n, 'strict' for production
|
||||
WEBHOOK_SECURITY_MODE=strict
|
||||
EOF
|
||||
|
||||
# 2. Deploy with Docker
|
||||
@@ -592,6 +599,67 @@ curl -H "Authorization: Bearer $AUTH_TOKEN" \
|
||||
}
|
||||
```
|
||||
|
||||
## 🔒 Security Features (v2.16.3+)
|
||||
|
||||
### Rate Limiting
|
||||
|
||||
Built-in rate limiting protects authentication endpoints from brute force attacks:
|
||||
|
||||
**Configuration:**
|
||||
```bash
|
||||
# Defaults (15 minutes window, 20 attempts per IP)
|
||||
AUTH_RATE_LIMIT_WINDOW=900000 # milliseconds
|
||||
AUTH_RATE_LIMIT_MAX=20
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- Per-IP rate limiting with configurable window and max attempts
|
||||
- Standard rate limit headers (RateLimit-Limit, RateLimit-Remaining, RateLimit-Reset)
|
||||
- JSON-RPC formatted error responses
|
||||
- Automatic IP tracking behind reverse proxies (requires TRUST_PROXY=1)
|
||||
|
||||
**Behavior:**
|
||||
- First 20 attempts: Return 401 Unauthorized for invalid credentials
|
||||
- Attempts 21+: Return 429 Too Many Requests with Retry-After header
|
||||
- Counter resets after 15 minutes (configurable)
|
||||
|
||||
### SSRF Protection
|
||||
|
||||
Prevents Server-Side Request Forgery attacks when using webhook triggers:
|
||||
|
||||
**Three Security Modes:**
|
||||
|
||||
1. **Strict Mode (default)** - Production deployments
|
||||
```bash
|
||||
WEBHOOK_SECURITY_MODE=strict
|
||||
```
|
||||
- ✅ Block localhost (127.0.0.1, ::1)
|
||||
- ✅ Block private IPs (10.x, 192.168.x, 172.16-31.x)
|
||||
- ✅ Block cloud metadata (169.254.169.254, metadata.google.internal)
|
||||
- ✅ DNS rebinding prevention
|
||||
- 🎯 **Use for**: Cloud deployments, production environments
|
||||
|
||||
2. **Moderate Mode** - Local development with local n8n
|
||||
```bash
|
||||
WEBHOOK_SECURITY_MODE=moderate
|
||||
```
|
||||
- ✅ Allow localhost (for local n8n instances)
|
||||
- ✅ Block private IPs
|
||||
- ✅ Block cloud metadata
|
||||
- ✅ DNS rebinding prevention
|
||||
- 🎯 **Use for**: Development with n8n on localhost:5678
|
||||
|
||||
3. **Permissive Mode** - Internal networks only
|
||||
```bash
|
||||
WEBHOOK_SECURITY_MODE=permissive
|
||||
```
|
||||
- ✅ Allow localhost and private IPs
|
||||
- ✅ Block cloud metadata (always blocked)
|
||||
- ✅ DNS rebinding prevention
|
||||
- 🎯 **Use for**: Internal testing (NOT for production)
|
||||
|
||||
**Important:** Cloud metadata endpoints are ALWAYS blocked in all modes for security.
|
||||
|
||||
## 🔒 Security Best Practices
|
||||
|
||||
### 1. Token Management
|
||||
|
||||
@@ -59,10 +59,10 @@ docker compose up -d
|
||||
- n8n-mcp-data:/app/data
|
||||
|
||||
ports:
|
||||
- "${PORT:-3000}:3000"
|
||||
|
||||
- "${PORT:-3000}:${PORT:-3000}"
|
||||
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://127.0.0.1:3000/health"]
|
||||
test: ["CMD", "sh", "-c", "curl -f http://127.0.0.1:$${PORT:-3000}/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
724
docs/LIBRARY_USAGE.md
Normal file
724
docs/LIBRARY_USAGE.md
Normal file
@@ -0,0 +1,724 @@
|
||||
# Library Usage Guide - Multi-Tenant / Hosted Deployments
|
||||
|
||||
This guide covers using n8n-mcp as a library dependency for building multi-tenant hosted services.
|
||||
|
||||
## Overview
|
||||
|
||||
n8n-mcp can be used as a Node.js library to build multi-tenant backends that provide MCP services to multiple users or instances. The package exports all necessary components for integration into your existing services.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
npm install n8n-mcp
|
||||
```
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### Library Mode vs CLI Mode
|
||||
|
||||
- **CLI Mode** (default): Single-player usage via `npx n8n-mcp` or Docker
|
||||
- **Library Mode**: Multi-tenant usage by importing and using the `N8NMCPEngine` class
|
||||
|
||||
### Instance Context
|
||||
|
||||
The `InstanceContext` type allows you to pass per-request configuration to the MCP engine:
|
||||
|
||||
```typescript
|
||||
interface InstanceContext {
|
||||
// Instance-specific n8n API configuration
|
||||
n8nApiUrl?: string;
|
||||
n8nApiKey?: string;
|
||||
n8nApiTimeout?: number;
|
||||
n8nApiMaxRetries?: number;
|
||||
|
||||
// Instance identification
|
||||
instanceId?: string;
|
||||
sessionId?: string;
|
||||
|
||||
// Extensible metadata
|
||||
metadata?: Record<string, any>;
|
||||
}
|
||||
```
|
||||
|
||||
## Basic Example
|
||||
|
||||
```typescript
|
||||
import express from 'express';
|
||||
import { N8NMCPEngine } from 'n8n-mcp';
|
||||
|
||||
const app = express();
|
||||
const mcpEngine = new N8NMCPEngine({
|
||||
sessionTimeout: 3600000, // 1 hour
|
||||
logLevel: 'info'
|
||||
});
|
||||
|
||||
// Handle MCP requests with per-user context
|
||||
app.post('/mcp', async (req, res) => {
|
||||
const instanceContext = {
|
||||
n8nApiUrl: req.user.n8nUrl,
|
||||
n8nApiKey: req.user.n8nApiKey,
|
||||
instanceId: req.user.id
|
||||
};
|
||||
|
||||
await mcpEngine.processRequest(req, res, instanceContext);
|
||||
});
|
||||
|
||||
app.listen(3000);
|
||||
```
|
||||
|
||||
## Multi-Tenant Backend Example
|
||||
|
||||
This example shows a complete multi-tenant implementation with user authentication and instance management:
|
||||
|
||||
```typescript
|
||||
import express from 'express';
|
||||
import { N8NMCPEngine, InstanceContext, validateInstanceContext } from 'n8n-mcp';
|
||||
|
||||
const app = express();
|
||||
const mcpEngine = new N8NMCPEngine({
|
||||
sessionTimeout: 3600000, // 1 hour
|
||||
logLevel: 'info'
|
||||
});
|
||||
|
||||
// Start MCP engine
|
||||
await mcpEngine.start();
|
||||
|
||||
// Authentication middleware
|
||||
const authenticate = async (req, res, next) => {
|
||||
const token = req.headers.authorization?.replace('Bearer ', '');
|
||||
if (!token) {
|
||||
return res.status(401).json({ error: 'Unauthorized' });
|
||||
}
|
||||
|
||||
// Verify token and attach user to request
|
||||
req.user = await getUserFromToken(token);
|
||||
next();
|
||||
};
|
||||
|
||||
// Get instance configuration from database
|
||||
const getInstanceConfig = async (instanceId: string, userId: string) => {
|
||||
// Your database logic here
|
||||
const instance = await db.instances.findOne({
|
||||
where: { id: instanceId, userId }
|
||||
});
|
||||
|
||||
if (!instance) {
|
||||
throw new Error('Instance not found');
|
||||
}
|
||||
|
||||
return {
|
||||
n8nApiUrl: instance.n8nUrl,
|
||||
n8nApiKey: await decryptApiKey(instance.encryptedApiKey),
|
||||
instanceId: instance.id
|
||||
};
|
||||
};
|
||||
|
||||
// MCP endpoint with per-instance context
|
||||
app.post('/api/instances/:instanceId/mcp', authenticate, async (req, res) => {
|
||||
try {
|
||||
// Get instance configuration
|
||||
const instance = await getInstanceConfig(req.params.instanceId, req.user.id);
|
||||
|
||||
// Create instance context
|
||||
const context: InstanceContext = {
|
||||
n8nApiUrl: instance.n8nApiUrl,
|
||||
n8nApiKey: instance.n8nApiKey,
|
||||
instanceId: instance.instanceId,
|
||||
metadata: {
|
||||
userId: req.user.id,
|
||||
userAgent: req.headers['user-agent'],
|
||||
ip: req.ip
|
||||
}
|
||||
};
|
||||
|
||||
// Validate context before processing
|
||||
const validation = validateInstanceContext(context);
|
||||
if (!validation.valid) {
|
||||
return res.status(400).json({
|
||||
error: 'Invalid instance configuration',
|
||||
details: validation.errors
|
||||
});
|
||||
}
|
||||
|
||||
// Process request with instance context
|
||||
await mcpEngine.processRequest(req, res, context);
|
||||
|
||||
} catch (error) {
|
||||
console.error('MCP request error:', error);
|
||||
res.status(500).json({ error: 'Internal server error' });
|
||||
}
|
||||
});
|
||||
|
||||
// Health endpoint
|
||||
app.get('/health', async (req, res) => {
|
||||
const health = await mcpEngine.healthCheck();
|
||||
res.status(health.status === 'healthy' ? 200 : 503).json(health);
|
||||
});
|
||||
|
||||
// Graceful shutdown
|
||||
process.on('SIGTERM', async () => {
|
||||
await mcpEngine.shutdown();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
app.listen(3000);
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### N8NMCPEngine
|
||||
|
||||
#### Constructor
|
||||
|
||||
```typescript
|
||||
new N8NMCPEngine(options?: {
|
||||
sessionTimeout?: number; // Session TTL in ms (default: 1800000 = 30min)
|
||||
logLevel?: 'error' | 'warn' | 'info' | 'debug'; // Default: 'info'
|
||||
})
|
||||
```
|
||||
|
||||
#### Methods
|
||||
|
||||
##### `async processRequest(req, res, context?)`
|
||||
|
||||
Process a single MCP request with optional instance context.
|
||||
|
||||
**Parameters:**
|
||||
- `req`: Express request object
|
||||
- `res`: Express response object
|
||||
- `context` (optional): InstanceContext with per-instance configuration
|
||||
|
||||
**Example:**
|
||||
```typescript
|
||||
const context: InstanceContext = {
|
||||
n8nApiUrl: 'https://instance1.n8n.cloud',
|
||||
n8nApiKey: 'instance1-key',
|
||||
instanceId: 'tenant-123'
|
||||
};
|
||||
|
||||
await engine.processRequest(req, res, context);
|
||||
```
|
||||
|
||||
##### `async healthCheck()`
|
||||
|
||||
Get engine health status for monitoring.
|
||||
|
||||
**Returns:** `EngineHealth`
|
||||
```typescript
|
||||
{
|
||||
status: 'healthy' | 'unhealthy';
|
||||
uptime: number; // seconds
|
||||
sessionActive: boolean;
|
||||
memoryUsage: {
|
||||
used: number;
|
||||
total: number;
|
||||
unit: string;
|
||||
};
|
||||
version: string;
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```typescript
|
||||
app.get('/health', async (req, res) => {
|
||||
const health = await engine.healthCheck();
|
||||
res.status(health.status === 'healthy' ? 200 : 503).json(health);
|
||||
});
|
||||
```
|
||||
|
||||
##### `getSessionInfo()`
|
||||
|
||||
Get current session information for debugging.
|
||||
|
||||
**Returns:**
|
||||
```typescript
|
||||
{
|
||||
active: boolean;
|
||||
sessionId?: string;
|
||||
age?: number; // milliseconds
|
||||
sessions?: {
|
||||
total: number;
|
||||
active: number;
|
||||
expired: number;
|
||||
max: number;
|
||||
sessionIds: string[];
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
##### `async start()`
|
||||
|
||||
Start the engine (for standalone mode). Not needed when using `processRequest()` directly.
|
||||
|
||||
##### `async shutdown()`
|
||||
|
||||
Graceful shutdown for service lifecycle management.
|
||||
|
||||
**Example:**
|
||||
```typescript
|
||||
process.on('SIGTERM', async () => {
|
||||
await engine.shutdown();
|
||||
process.exit(0);
|
||||
});
|
||||
```
|
||||
|
||||
### Types
|
||||
|
||||
#### InstanceContext
|
||||
|
||||
Configuration for a specific user instance:
|
||||
|
||||
```typescript
|
||||
interface InstanceContext {
|
||||
n8nApiUrl?: string;
|
||||
n8nApiKey?: string;
|
||||
n8nApiTimeout?: number;
|
||||
n8nApiMaxRetries?: number;
|
||||
instanceId?: string;
|
||||
sessionId?: string;
|
||||
metadata?: Record<string, any>;
|
||||
}
|
||||
```
|
||||
|
||||
#### Validation Functions
|
||||
|
||||
##### `validateInstanceContext(context: InstanceContext)`
|
||||
|
||||
Validate and sanitize instance context.
|
||||
|
||||
**Returns:**
|
||||
```typescript
|
||||
{
|
||||
valid: boolean;
|
||||
errors?: string[];
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```typescript
|
||||
import { validateInstanceContext } from 'n8n-mcp';
|
||||
|
||||
const validation = validateInstanceContext(context);
|
||||
if (!validation.valid) {
|
||||
console.error('Invalid context:', validation.errors);
|
||||
}
|
||||
```
|
||||
|
||||
##### `isInstanceContext(obj: any)`
|
||||
|
||||
Type guard to check if an object is a valid InstanceContext.
|
||||
|
||||
**Example:**
|
||||
```typescript
|
||||
import { isInstanceContext } from 'n8n-mcp';
|
||||
|
||||
if (isInstanceContext(req.body.context)) {
|
||||
// TypeScript knows this is InstanceContext
|
||||
await engine.processRequest(req, res, req.body.context);
|
||||
}
|
||||
```
|
||||
|
||||
## Session Management
|
||||
|
||||
### Session Strategies
|
||||
|
||||
The MCP engine supports flexible session ID formats:
|
||||
|
||||
- **UUIDv4**: Internal n8n-mcp format (default)
|
||||
- **Instance-prefixed**: `instance-{userId}-{hash}-{uuid}` for multi-tenant isolation
|
||||
- **Custom formats**: Any non-empty string for mcp-remote and other proxies
|
||||
|
||||
Session validation happens via transport lookup, not format validation. This ensures compatibility with all MCP clients.
|
||||
|
||||
### Multi-Tenant Configuration
|
||||
|
||||
Set these environment variables for multi-tenant mode:
|
||||
|
||||
```bash
|
||||
# Enable multi-tenant mode
|
||||
ENABLE_MULTI_TENANT=true
|
||||
|
||||
# Session strategy: "instance" (default) or "shared"
|
||||
MULTI_TENANT_SESSION_STRATEGY=instance
|
||||
```
|
||||
|
||||
**Session Strategies:**
|
||||
|
||||
- **instance** (recommended): Each tenant gets isolated sessions
|
||||
- Session ID: `instance-{instanceId}-{configHash}-{uuid}`
|
||||
- Better isolation and security
|
||||
- Easier debugging per tenant
|
||||
|
||||
- **shared**: Multiple tenants share sessions with context switching
|
||||
- More efficient for high tenant count
|
||||
- Requires careful context management
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### API Key Management
|
||||
|
||||
Always encrypt API keys server-side:
|
||||
|
||||
```typescript
|
||||
import { createCipheriv, createDecipheriv } from 'crypto';
|
||||
|
||||
// Encrypt before storing
|
||||
const encryptApiKey = (apiKey: string) => {
|
||||
const cipher = createCipheriv('aes-256-gcm', encryptionKey, iv);
|
||||
return cipher.update(apiKey, 'utf8', 'hex') + cipher.final('hex');
|
||||
};
|
||||
|
||||
// Decrypt before using
|
||||
const decryptApiKey = (encrypted: string) => {
|
||||
const decipher = createDecipheriv('aes-256-gcm', encryptionKey, iv);
|
||||
return decipher.update(encrypted, 'hex', 'utf8') + decipher.final('utf8');
|
||||
};
|
||||
|
||||
// Use decrypted key in context
|
||||
const context: InstanceContext = {
|
||||
n8nApiKey: await decryptApiKey(instance.encryptedApiKey),
|
||||
// ...
|
||||
};
|
||||
```
|
||||
|
||||
### Input Validation
|
||||
|
||||
Always validate instance context before processing:
|
||||
|
||||
```typescript
|
||||
import { validateInstanceContext } from 'n8n-mcp';
|
||||
|
||||
const validation = validateInstanceContext(context);
|
||||
if (!validation.valid) {
|
||||
throw new Error(`Invalid context: ${validation.errors?.join(', ')}`);
|
||||
}
|
||||
```
|
||||
|
||||
### Rate Limiting
|
||||
|
||||
Implement rate limiting per tenant:
|
||||
|
||||
```typescript
|
||||
import rateLimit from 'express-rate-limit';
|
||||
|
||||
const limiter = rateLimit({
|
||||
windowMs: 15 * 60 * 1000, // 15 minutes
|
||||
max: 100, // limit each IP to 100 requests per windowMs
|
||||
keyGenerator: (req) => req.user?.id || req.ip
|
||||
});
|
||||
|
||||
app.post('/api/instances/:instanceId/mcp', authenticate, limiter, async (req, res) => {
|
||||
// ...
|
||||
});
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
Always wrap MCP requests in try-catch blocks:
|
||||
|
||||
```typescript
|
||||
app.post('/api/instances/:instanceId/mcp', authenticate, async (req, res) => {
|
||||
try {
|
||||
const context = await getInstanceConfig(req.params.instanceId, req.user.id);
|
||||
await mcpEngine.processRequest(req, res, context);
|
||||
} catch (error) {
|
||||
console.error('MCP error:', error);
|
||||
|
||||
// Don't leak internal errors to clients
|
||||
if (error.message.includes('not found')) {
|
||||
return res.status(404).json({ error: 'Instance not found' });
|
||||
}
|
||||
|
||||
res.status(500).json({ error: 'Internal server error' });
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
## Monitoring
|
||||
|
||||
### Health Checks
|
||||
|
||||
Set up periodic health checks:
|
||||
|
||||
```typescript
|
||||
setInterval(async () => {
|
||||
const health = await mcpEngine.healthCheck();
|
||||
|
||||
if (health.status === 'unhealthy') {
|
||||
console.error('MCP engine unhealthy:', health);
|
||||
// Alert your monitoring system
|
||||
}
|
||||
|
||||
// Log metrics
|
||||
console.log('MCP engine metrics:', {
|
||||
uptime: health.uptime,
|
||||
memory: health.memoryUsage,
|
||||
sessionActive: health.sessionActive
|
||||
});
|
||||
}, 60000); // Every minute
|
||||
```
|
||||
|
||||
### Session Monitoring
|
||||
|
||||
Track active sessions:
|
||||
|
||||
```typescript
|
||||
app.get('/admin/sessions', authenticate, async (req, res) => {
|
||||
if (!req.user.isAdmin) {
|
||||
return res.status(403).json({ error: 'Forbidden' });
|
||||
}
|
||||
|
||||
const sessionInfo = mcpEngine.getSessionInfo();
|
||||
res.json(sessionInfo);
|
||||
});
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit Testing
|
||||
|
||||
```typescript
|
||||
import { N8NMCPEngine, InstanceContext } from 'n8n-mcp';
|
||||
|
||||
describe('MCP Engine', () => {
|
||||
let engine: N8NMCPEngine;
|
||||
|
||||
beforeEach(() => {
|
||||
engine = new N8NMCPEngine({ logLevel: 'error' });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await engine.shutdown();
|
||||
});
|
||||
|
||||
it('should process request with context', async () => {
|
||||
const context: InstanceContext = {
|
||||
n8nApiUrl: 'https://test.n8n.io',
|
||||
n8nApiKey: 'test-key',
|
||||
instanceId: 'test-instance'
|
||||
};
|
||||
|
||||
const mockReq = createMockRequest();
|
||||
const mockRes = createMockResponse();
|
||||
|
||||
await engine.processRequest(mockReq, mockRes, context);
|
||||
|
||||
expect(mockRes.status).toBe(200);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Integration Testing
|
||||
|
||||
```typescript
|
||||
import request from 'supertest';
|
||||
import { createApp } from './app';
|
||||
|
||||
describe('Multi-tenant MCP API', () => {
|
||||
let app;
|
||||
let authToken;
|
||||
|
||||
beforeAll(async () => {
|
||||
app = await createApp();
|
||||
authToken = await getTestAuthToken();
|
||||
});
|
||||
|
||||
it('should handle MCP request for instance', async () => {
|
||||
const response = await request(app)
|
||||
.post('/api/instances/test-instance/mcp')
|
||||
.set('Authorization', `Bearer ${authToken}`)
|
||||
.send({
|
||||
jsonrpc: '2.0',
|
||||
method: 'initialize',
|
||||
params: {
|
||||
protocolVersion: '2024-11-05',
|
||||
capabilities: {}
|
||||
},
|
||||
id: 1
|
||||
});
|
||||
|
||||
expect(response.status).toBe(200);
|
||||
expect(response.body.result).toBeDefined();
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Deployment Considerations
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
# Required for multi-tenant mode
|
||||
ENABLE_MULTI_TENANT=true
|
||||
MULTI_TENANT_SESSION_STRATEGY=instance
|
||||
|
||||
# Optional: Logging
|
||||
LOG_LEVEL=info
|
||||
DISABLE_CONSOLE_OUTPUT=false
|
||||
|
||||
# Optional: Session configuration
|
||||
SESSION_TIMEOUT=1800000 # 30 minutes in milliseconds
|
||||
MAX_SESSIONS=100
|
||||
|
||||
# Optional: Performance
|
||||
NODE_ENV=production
|
||||
```
|
||||
|
||||
### Docker Deployment
|
||||
|
||||
```dockerfile
|
||||
FROM node:20-alpine
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY package*.json ./
|
||||
RUN npm ci --only=production
|
||||
|
||||
COPY . .
|
||||
|
||||
ENV NODE_ENV=production
|
||||
ENV ENABLE_MULTI_TENANT=true
|
||||
ENV LOG_LEVEL=info
|
||||
|
||||
EXPOSE 3000
|
||||
|
||||
CMD ["node", "dist/server.js"]
|
||||
```
|
||||
|
||||
### Kubernetes Deployment
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: n8n-mcp-backend
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: n8n-mcp-backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: n8n-mcp-backend
|
||||
spec:
|
||||
containers:
|
||||
- name: backend
|
||||
image: your-registry/n8n-mcp-backend:latest
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
env:
|
||||
- name: ENABLE_MULTI_TENANT
|
||||
value: "true"
|
||||
- name: LOG_LEVEL
|
||||
value: "info"
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 3000
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 30
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 3000
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Complete Multi-Tenant SaaS Example
|
||||
|
||||
For a complete implementation example, see:
|
||||
- [n8n-mcp-backend](https://github.com/czlonkowski/n8n-mcp-backend) - Full hosted service implementation
|
||||
|
||||
### Migration from Single-Player
|
||||
|
||||
If you're migrating from single-player (CLI/Docker) to multi-tenant:
|
||||
|
||||
1. **Keep backward compatibility** - Use environment fallback:
|
||||
```typescript
|
||||
const context: InstanceContext = {
|
||||
n8nApiUrl: instanceUrl || process.env.N8N_API_URL,
|
||||
n8nApiKey: instanceKey || process.env.N8N_API_KEY,
|
||||
instanceId: instanceId || 'default'
|
||||
};
|
||||
```
|
||||
|
||||
2. **Gradual rollout** - Start with a feature flag:
|
||||
```typescript
|
||||
const isMultiTenant = process.env.ENABLE_MULTI_TENANT === 'true';
|
||||
|
||||
if (isMultiTenant) {
|
||||
const context = await getInstanceConfig(req.params.instanceId);
|
||||
await engine.processRequest(req, res, context);
|
||||
} else {
|
||||
// Legacy single-player mode
|
||||
await engine.processRequest(req, res);
|
||||
}
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### Module Resolution Errors
|
||||
|
||||
If you see `Cannot find module 'n8n-mcp'`:
|
||||
|
||||
```bash
|
||||
# Clear node_modules and reinstall
|
||||
rm -rf node_modules package-lock.json
|
||||
npm install
|
||||
|
||||
# Verify package has types field
|
||||
npm info n8n-mcp
|
||||
|
||||
# Check TypeScript can resolve it
|
||||
npx tsc --noEmit
|
||||
```
|
||||
|
||||
#### Session ID Validation Errors
|
||||
|
||||
If you see `Invalid session ID format` errors:
|
||||
|
||||
- Ensure you're using n8n-mcp v2.18.9 or later
|
||||
- Session IDs can be any non-empty string
|
||||
- No need to generate UUIDs - use your own format
|
||||
|
||||
#### Memory Leaks
|
||||
|
||||
If memory usage grows over time:
|
||||
|
||||
```typescript
|
||||
// Ensure proper cleanup
|
||||
process.on('SIGTERM', async () => {
|
||||
await engine.shutdown();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
// Monitor session count
|
||||
const sessionInfo = engine.getSessionInfo();
|
||||
console.log('Active sessions:', sessionInfo.sessions?.active);
|
||||
```
|
||||
|
||||
## Further Reading
|
||||
|
||||
- [MCP Protocol Specification](https://modelcontextprotocol.io/docs)
|
||||
- [n8n API Documentation](https://docs.n8n.io/api/)
|
||||
- [Express.js Guide](https://expressjs.com/en/guide/routing.html)
|
||||
- [n8n-mcp Main README](../README.md)
|
||||
|
||||
## Support
|
||||
|
||||
- **Issues**: [GitHub Issues](https://github.com/czlonkowski/n8n-mcp/issues)
|
||||
- **Discussions**: [GitHub Discussions](https://github.com/czlonkowski/n8n-mcp/discussions)
|
||||
- **Security**: For security issues, see [SECURITY.md](../SECURITY.md)
|
||||
@@ -1,62 +0,0 @@
|
||||
# PR #104 Test Suite Improvements Summary
|
||||
|
||||
## Overview
|
||||
Based on comprehensive review feedback from PR #104, we've significantly improved the test suite quality, organization, and coverage.
|
||||
|
||||
## Test Results
|
||||
- **Before:** 78 failing tests
|
||||
- **After:** 0 failing tests (1,356 passed, 19 skipped)
|
||||
- **Coverage:** 85.34% statements, 85.3% branches
|
||||
|
||||
## Key Improvements
|
||||
|
||||
### 1. Fixed All Test Failures
|
||||
- Fixed logger test spy issues by properly handling DEBUG environment variable
|
||||
- Fixed MSW configuration test by restoring environment variables
|
||||
- Fixed workflow validator tests by adding proper node connections
|
||||
- Fixed mock setup issues in edge case tests
|
||||
|
||||
### 2. Improved Test Organization
|
||||
- Split large config-validator.test.ts (1,075 lines) into 4 focused files:
|
||||
- config-validator-basic.test.ts
|
||||
- config-validator-node-specific.test.ts
|
||||
- config-validator-security.test.ts
|
||||
- config-validator-edge-cases.test.ts
|
||||
|
||||
### 3. Enhanced Test Coverage
|
||||
- Added comprehensive edge case tests for all major validators
|
||||
- Added null/undefined handling tests
|
||||
- Added boundary value tests
|
||||
- Added performance tests with CI-aware timeouts
|
||||
- Added security validation tests
|
||||
|
||||
### 4. Improved Test Quality
|
||||
- Fixed test naming conventions (100% compliance with "should X when Y" pattern)
|
||||
- Added JSDoc comments to test utilities and factories
|
||||
- Created comprehensive test documentation (tests/README.md)
|
||||
- Improved test isolation to prevent cross-test pollution
|
||||
|
||||
### 5. New Features
|
||||
- Implemented validateBatch method for ConfigValidator
|
||||
- Added test factories for better test data management
|
||||
- Created test utilities for common scenarios
|
||||
|
||||
## Files Modified
|
||||
- 7 existing test files fixed
|
||||
- 8 new test files created
|
||||
- 1 source file enhanced (ConfigValidator)
|
||||
- 4 debug files removed before commit
|
||||
|
||||
## Skipped Tests
|
||||
19 tests remain skipped with documented reasons:
|
||||
- FTS5 search sync test (database corruption in CI)
|
||||
- Template clearing (not implemented)
|
||||
- Mock API configuration tests
|
||||
- Duplicate edge case tests with mocking issues (working versions exist)
|
||||
|
||||
## Next Steps
|
||||
The only remaining task from the improvement plan is:
|
||||
- Add performance regression tests and boundaries (low priority, future sprint)
|
||||
|
||||
## Conclusion
|
||||
The test suite is now robust, well-organized, and provides excellent coverage. All critical issues have been resolved, and the codebase is ready for merge.
|
||||
@@ -105,6 +105,9 @@ These are automatically set by the Railway template:
|
||||
| `CORS_ORIGIN` | `*` | Allow any origin |
|
||||
| `HOST` | `0.0.0.0` | Listen on all interfaces |
|
||||
| `PORT` | (Railway provides) | Don't set manually |
|
||||
| `AUTH_RATE_LIMIT_WINDOW` | `900000` (15 min) | Rate limit window (v2.16.3+) |
|
||||
| `AUTH_RATE_LIMIT_MAX` | `20` | Max auth attempts (v2.16.3+) |
|
||||
| `WEBHOOK_SECURITY_MODE` | `strict` | SSRF protection mode (v2.16.3+) |
|
||||
|
||||
### Optional Variables
|
||||
|
||||
@@ -284,6 +287,32 @@ Since the Railway template uses a specific Docker image tag, updates are manual:
|
||||
|
||||
You could use the `latest` tag, but this may cause unexpected breaking changes.
|
||||
|
||||
## 🔒 Security Features (v2.16.3+)
|
||||
|
||||
Railway deployments include enhanced security features:
|
||||
|
||||
### Rate Limiting
|
||||
- **Automatic brute force protection** - 20 attempts per 15 minutes per IP
|
||||
- **Configurable limits** via `AUTH_RATE_LIMIT_WINDOW` and `AUTH_RATE_LIMIT_MAX`
|
||||
- **Standard rate limit headers** for client awareness
|
||||
|
||||
### SSRF Protection
|
||||
- **Default strict mode** blocks localhost, private IPs, and cloud metadata
|
||||
- **Cloud metadata always blocked** (169.254.169.254, metadata.google.internal, etc.)
|
||||
- **Use `moderate` mode only if** connecting to local n8n instance
|
||||
|
||||
**Security Configuration:**
|
||||
```bash
|
||||
# In Railway Variables tab:
|
||||
WEBHOOK_SECURITY_MODE=strict # Production (recommended)
|
||||
# or
|
||||
WEBHOOK_SECURITY_MODE=moderate # If using local n8n with port forwarding
|
||||
|
||||
# Rate limiting (defaults are good for most use cases)
|
||||
AUTH_RATE_LIMIT_WINDOW=900000 # 15 minutes
|
||||
AUTH_RATE_LIMIT_MAX=20 # 20 attempts per IP
|
||||
```
|
||||
|
||||
## 📝 Best Practices
|
||||
|
||||
1. **Always change the default AUTH_TOKEN immediately**
|
||||
|
||||
757
docs/SESSION_PERSISTENCE.md
Normal file
757
docs/SESSION_PERSISTENCE.md
Normal file
@@ -0,0 +1,757 @@
|
||||
# Session Persistence API - Production Guide
|
||||
|
||||
## Overview
|
||||
|
||||
The Session Persistence API enables zero-downtime container deployments in multi-tenant n8n-mcp environments. It allows you to export active MCP session state before shutdown and restore it after restart, maintaining session continuity across container lifecycle events.
|
||||
|
||||
**Version:** 2.24.1+
|
||||
**Status:** Production-ready
|
||||
**Use Cases:** Multi-tenant SaaS, Kubernetes deployments, container orchestration, rolling updates
|
||||
|
||||
## Architecture
|
||||
|
||||
### Session State Components
|
||||
|
||||
Each persisted session contains:
|
||||
|
||||
1. **Session Metadata**
|
||||
- `sessionId`: Unique session identifier (UUID v4)
|
||||
- `createdAt`: ISO 8601 timestamp of session creation
|
||||
- `lastAccess`: ISO 8601 timestamp of last activity
|
||||
|
||||
2. **Instance Context**
|
||||
- `n8nApiUrl`: n8n instance API endpoint
|
||||
- `n8nApiKey`: n8n API authentication key (plaintext)
|
||||
- `instanceId`: Optional tenant/instance identifier
|
||||
- `sessionId`: Optional session-specific identifier
|
||||
- `metadata`: Optional custom application data
|
||||
|
||||
3. **Dormant Session Pattern**
|
||||
- Transport and MCP server objects are NOT persisted
|
||||
- Recreated automatically on first request after restore
|
||||
- Reduces memory footprint during restore
|
||||
|
||||
## API Reference
|
||||
|
||||
### N8NMCPEngine.exportSessionState()
|
||||
|
||||
Exports all active session state for persistence before shutdown.
|
||||
|
||||
```typescript
|
||||
exportSessionState(): SessionState[]
|
||||
```
|
||||
|
||||
**Returns:** Array of session state objects containing metadata and credentials
|
||||
|
||||
**Example:**
|
||||
```typescript
|
||||
const sessions = engine.exportSessionState();
|
||||
// sessions = [
|
||||
// {
|
||||
// sessionId: '550e8400-e29b-41d4-a716-446655440000',
|
||||
// metadata: {
|
||||
// createdAt: '2025-11-24T10:30:00.000Z',
|
||||
// lastAccess: '2025-11-24T17:15:32.000Z'
|
||||
// },
|
||||
// context: {
|
||||
// n8nApiUrl: 'https://tenant1.n8n.cloud',
|
||||
// n8nApiKey: 'n8n_api_...',
|
||||
// instanceId: 'tenant-123',
|
||||
// metadata: { userId: 'user-456' }
|
||||
// }
|
||||
// }
|
||||
// ]
|
||||
```
|
||||
|
||||
**Key Behaviors:**
|
||||
- Exports only non-expired sessions (within sessionTimeout)
|
||||
- Detects and warns about duplicate session IDs
|
||||
- Logs security event with session count
|
||||
- Returns empty array if no active sessions
|
||||
|
||||
### N8NMCPEngine.restoreSessionState()
|
||||
|
||||
Restores sessions from previously exported state after container restart.
|
||||
|
||||
```typescript
|
||||
restoreSessionState(sessions: SessionState[]): number
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `sessions`: Array of session state objects from `exportSessionState()`
|
||||
|
||||
**Returns:** Number of sessions successfully restored
|
||||
|
||||
**Example:**
|
||||
```typescript
|
||||
const sessions = await loadFromEncryptedStorage();
|
||||
const count = engine.restoreSessionState(sessions);
|
||||
console.log(`Restored ${count} sessions`);
|
||||
```
|
||||
|
||||
**Key Behaviors:**
|
||||
- Validates session metadata (timestamps, required fields)
|
||||
- Skips expired sessions (age > sessionTimeout)
|
||||
- Skips duplicate sessions (idempotent)
|
||||
- Respects MAX_SESSIONS limit (100 per container)
|
||||
- Recreates transports/servers lazily on first request
|
||||
- Logs security events for restore success/failure
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Critical: Encrypt Before Storage
|
||||
|
||||
**The exported session state contains plaintext n8n API keys.** You MUST encrypt this data before persisting to disk.
|
||||
|
||||
```typescript
|
||||
// ❌ NEVER DO THIS
|
||||
await fs.writeFile('sessions.json', JSON.stringify(sessions));
|
||||
|
||||
// ✅ ALWAYS ENCRYPT
|
||||
const encrypted = await encryptSessionData(sessions, encryptionKey);
|
||||
await saveToSecureStorage(encrypted);
|
||||
```
|
||||
|
||||
### Recommended Encryption Approach
|
||||
|
||||
```typescript
|
||||
import crypto from 'crypto';
|
||||
|
||||
/**
|
||||
* Encrypt session data using AES-256-GCM
|
||||
*/
|
||||
async function encryptSessionData(
|
||||
sessions: SessionState[],
|
||||
encryptionKey: Buffer
|
||||
): Promise<string> {
|
||||
const iv = crypto.randomBytes(16);
|
||||
const cipher = crypto.createCipheriv('aes-256-gcm', encryptionKey, iv);
|
||||
|
||||
const json = JSON.stringify(sessions);
|
||||
const encrypted = Buffer.concat([
|
||||
cipher.update(json, 'utf8'),
|
||||
cipher.final()
|
||||
]);
|
||||
|
||||
const authTag = cipher.getAuthTag();
|
||||
|
||||
// Return base64: iv:authTag:encrypted
|
||||
return [
|
||||
iv.toString('base64'),
|
||||
authTag.toString('base64'),
|
||||
encrypted.toString('base64')
|
||||
].join(':');
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrypt session data
|
||||
*/
|
||||
async function decryptSessionData(
|
||||
encryptedData: string,
|
||||
encryptionKey: Buffer
|
||||
): Promise<SessionState[]> {
|
||||
const [ivB64, authTagB64, encryptedB64] = encryptedData.split(':');
|
||||
|
||||
const iv = Buffer.from(ivB64, 'base64');
|
||||
const authTag = Buffer.from(authTagB64, 'base64');
|
||||
const encrypted = Buffer.from(encryptedB64, 'base64');
|
||||
|
||||
const decipher = crypto.createDecipheriv('aes-256-gcm', encryptionKey, iv);
|
||||
decipher.setAuthTag(authTag);
|
||||
|
||||
const decrypted = Buffer.concat([
|
||||
decipher.update(encrypted),
|
||||
decipher.final()
|
||||
]);
|
||||
|
||||
return JSON.parse(decrypted.toString('utf8'));
|
||||
}
|
||||
```
|
||||
|
||||
### Key Management
|
||||
|
||||
Store encryption keys securely:
|
||||
- **Kubernetes:** Use Kubernetes Secrets with encryption at rest
|
||||
- **AWS:** Use AWS Secrets Manager or Parameter Store with KMS
|
||||
- **Azure:** Use Azure Key Vault
|
||||
- **GCP:** Use Secret Manager
|
||||
- **Local Dev:** Use environment variables (NEVER commit to git)
|
||||
|
||||
### Security Logging
|
||||
|
||||
All session persistence operations are logged with `[SECURITY]` prefix:
|
||||
|
||||
```
|
||||
[SECURITY] session_export { timestamp, count }
|
||||
[SECURITY] session_restore { timestamp, sessionId, instanceId }
|
||||
[SECURITY] session_restore_failed { timestamp, sessionId, reason }
|
||||
[SECURITY] max_sessions_reached { timestamp, count }
|
||||
```
|
||||
|
||||
Monitor these logs in production for audit trails and security analysis.
|
||||
|
||||
## Implementation Examples
|
||||
|
||||
### 1. Express.js Multi-Tenant Backend
|
||||
|
||||
```typescript
|
||||
import express from 'express';
|
||||
import { N8NMCPEngine } from 'n8n-mcp';
|
||||
|
||||
const app = express();
|
||||
const engine = new N8NMCPEngine({
|
||||
sessionTimeout: 1800000, // 30 minutes
|
||||
logLevel: 'info'
|
||||
});
|
||||
|
||||
// Startup: Restore sessions from encrypted storage
|
||||
async function startup() {
|
||||
try {
|
||||
const encrypted = await redis.get('mcp:sessions');
|
||||
if (encrypted) {
|
||||
const sessions = await decryptSessionData(
|
||||
encrypted,
|
||||
process.env.ENCRYPTION_KEY
|
||||
);
|
||||
const count = engine.restoreSessionState(sessions);
|
||||
console.log(`Restored ${count} sessions`);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to restore sessions:', error);
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown: Export sessions to encrypted storage
|
||||
async function shutdown() {
|
||||
try {
|
||||
const sessions = engine.exportSessionState();
|
||||
const encrypted = await encryptSessionData(
|
||||
sessions,
|
||||
process.env.ENCRYPTION_KEY
|
||||
);
|
||||
await redis.set('mcp:sessions', encrypted, 'EX', 3600); // 1 hour TTL
|
||||
console.log(`Exported ${sessions.length} sessions`);
|
||||
} catch (error) {
|
||||
console.error('Failed to export sessions:', error);
|
||||
}
|
||||
|
||||
await engine.shutdown();
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// Handle graceful shutdown
|
||||
process.on('SIGTERM', shutdown);
|
||||
process.on('SIGINT', shutdown);
|
||||
|
||||
// Start server
|
||||
await startup();
|
||||
app.listen(3000);
|
||||
```
|
||||
|
||||
### 2. Kubernetes Deployment with Init Container
|
||||
|
||||
**deployment.yaml:**
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: n8n-mcp
|
||||
spec:
|
||||
replicas: 3
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
maxSurge: 1
|
||||
template:
|
||||
spec:
|
||||
initContainers:
|
||||
- name: restore-sessions
|
||||
image: your-app:latest
|
||||
command: ['/app/restore-sessions.sh']
|
||||
env:
|
||||
- name: ENCRYPTION_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: mcp-secrets
|
||||
key: encryption-key
|
||||
- name: REDIS_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: mcp-secrets
|
||||
key: redis-url
|
||||
volumeMounts:
|
||||
- name: sessions
|
||||
mountPath: /sessions
|
||||
|
||||
containers:
|
||||
- name: mcp-server
|
||||
image: your-app:latest
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: ['/app/export-sessions.sh']
|
||||
env:
|
||||
- name: ENCRYPTION_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: mcp-secrets
|
||||
key: encryption-key
|
||||
- name: SESSION_TIMEOUT
|
||||
value: "1800000"
|
||||
volumeMounts:
|
||||
- name: sessions
|
||||
mountPath: /sessions
|
||||
|
||||
# Graceful shutdown configuration
|
||||
terminationGracePeriodSeconds: 30
|
||||
|
||||
volumes:
|
||||
- name: sessions
|
||||
emptyDir: {}
|
||||
```
|
||||
|
||||
**restore-sessions.sh:**
|
||||
```bash
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "Restoring sessions from Redis..."
|
||||
|
||||
# Fetch encrypted sessions from Redis
|
||||
ENCRYPTED=$(redis-cli -u "$REDIS_URL" GET "mcp:sessions:${HOSTNAME}")
|
||||
|
||||
if [ -n "$ENCRYPTED" ]; then
|
||||
echo "$ENCRYPTED" > /sessions/encrypted.txt
|
||||
echo "Sessions fetched, will be restored on startup"
|
||||
else
|
||||
echo "No sessions to restore"
|
||||
fi
|
||||
```
|
||||
|
||||
**export-sessions.sh:**
|
||||
```bash
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "Exporting sessions to Redis..."
|
||||
|
||||
# Trigger session export via HTTP endpoint
|
||||
curl -X POST http://localhost:3000/internal/export-sessions
|
||||
|
||||
echo "Sessions exported successfully"
|
||||
```
|
||||
|
||||
### 3. Docker Compose with Redis
|
||||
|
||||
**docker-compose.yml:**
|
||||
```yaml
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
n8n-mcp:
|
||||
build: .
|
||||
environment:
|
||||
- ENCRYPTION_KEY=${ENCRYPTION_KEY}
|
||||
- REDIS_URL=redis://redis:6379
|
||||
- SESSION_TIMEOUT=1800000
|
||||
depends_on:
|
||||
- redis
|
||||
volumes:
|
||||
- ./data:/data
|
||||
deploy:
|
||||
replicas: 2
|
||||
update_config:
|
||||
parallelism: 1
|
||||
delay: 10s
|
||||
order: start-first
|
||||
stop_grace_period: 30s
|
||||
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
volumes:
|
||||
- redis-data:/data
|
||||
command: redis-server --appendonly yes
|
||||
|
||||
volumes:
|
||||
redis-data:
|
||||
```
|
||||
|
||||
**Application code:**
|
||||
```typescript
|
||||
import { N8NMCPEngine } from 'n8n-mcp';
|
||||
import Redis from 'ioredis';
|
||||
|
||||
const redis = new Redis(process.env.REDIS_URL);
|
||||
const engine = new N8NMCPEngine();
|
||||
|
||||
// Export endpoint (called by preStop hook)
|
||||
app.post('/internal/export-sessions', async (req, res) => {
|
||||
try {
|
||||
const sessions = engine.exportSessionState();
|
||||
const encrypted = await encryptSessionData(
|
||||
sessions,
|
||||
Buffer.from(process.env.ENCRYPTION_KEY, 'hex')
|
||||
);
|
||||
|
||||
// Store with hostname as key for per-container tracking
|
||||
await redis.set(
|
||||
`mcp:sessions:${os.hostname()}`,
|
||||
encrypted,
|
||||
'EX',
|
||||
3600
|
||||
);
|
||||
|
||||
res.json({ exported: sessions.length });
|
||||
} catch (error) {
|
||||
console.error('Export failed:', error);
|
||||
res.status(500).json({ error: 'Export failed' });
|
||||
}
|
||||
});
|
||||
|
||||
// Restore on startup
|
||||
async function startup() {
|
||||
const encrypted = await redis.get(`mcp:sessions:${os.hostname()}`);
|
||||
if (encrypted) {
|
||||
const sessions = await decryptSessionData(
|
||||
encrypted,
|
||||
Buffer.from(process.env.ENCRYPTION_KEY, 'hex')
|
||||
);
|
||||
const count = engine.restoreSessionState(sessions);
|
||||
console.log(`Restored ${count} sessions`);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Session Timeout Configuration
|
||||
|
||||
Choose appropriate timeout based on use case:
|
||||
|
||||
```typescript
|
||||
const engine = new N8NMCPEngine({
|
||||
sessionTimeout: 1800000 // 30 minutes (recommended default)
|
||||
});
|
||||
|
||||
// Development: 5 minutes
|
||||
sessionTimeout: 300000
|
||||
|
||||
// Production SaaS: 30-60 minutes
|
||||
sessionTimeout: 1800000 - 3600000
|
||||
|
||||
// Long-running workflows: 2-4 hours
|
||||
sessionTimeout: 7200000 - 14400000
|
||||
```
|
||||
|
||||
### 2. Storage Backend Selection
|
||||
|
||||
**Redis (Recommended for Production)**
|
||||
- Fast read/write for session data
|
||||
- TTL support for automatic cleanup
|
||||
- Pub/sub for distributed coordination
|
||||
- Atomic operations for consistency
|
||||
|
||||
**Database (PostgreSQL/MySQL)**
|
||||
- JSONB column for session state
|
||||
- Good for audit requirements
|
||||
- Slower than Redis
|
||||
- Requires periodic cleanup
|
||||
|
||||
**S3/Cloud Storage**
|
||||
- Good for disaster recovery backups
|
||||
- Not suitable for hot session restore
|
||||
- High latency
|
||||
- Good for long-term session archival
|
||||
|
||||
### 3. Monitoring and Alerting
|
||||
|
||||
Monitor these metrics:
|
||||
|
||||
```typescript
|
||||
// Session export metrics
|
||||
const sessions = engine.exportSessionState();
|
||||
metrics.gauge('mcp.sessions.exported', sessions.length);
|
||||
metrics.gauge('mcp.sessions.export_size_kb',
|
||||
JSON.stringify(sessions).length / 1024
|
||||
);
|
||||
|
||||
// Session restore metrics
|
||||
const restored = engine.restoreSessionState(sessions);
|
||||
metrics.gauge('mcp.sessions.restored', restored);
|
||||
metrics.gauge('mcp.sessions.restore_success_rate',
|
||||
restored / sessions.length
|
||||
);
|
||||
|
||||
// Runtime metrics
|
||||
const info = engine.getSessionInfo();
|
||||
metrics.gauge('mcp.sessions.active', info.active ? 1 : 0);
|
||||
metrics.gauge('mcp.sessions.age_seconds', info.age || 0);
|
||||
```
|
||||
|
||||
Alert on:
|
||||
- Export failures (should be rare)
|
||||
- Low restore success rate (<95%)
|
||||
- MAX_SESSIONS limit reached
|
||||
- High session age (potential leaks)
|
||||
|
||||
### 4. Graceful Shutdown Timing
|
||||
|
||||
Ensure sufficient time for session export:
|
||||
|
||||
```typescript
|
||||
// Kubernetes terminationGracePeriodSeconds
|
||||
terminationGracePeriodSeconds: 30 // 30 seconds minimum
|
||||
|
||||
// Docker stop timeout
|
||||
docker run --stop-timeout 30 your-image
|
||||
|
||||
// Process signal handling
|
||||
process.on('SIGTERM', async () => {
|
||||
console.log('SIGTERM received, starting graceful shutdown...');
|
||||
|
||||
// 1. Stop accepting new requests (5s)
|
||||
await server.close();
|
||||
|
||||
// 2. Wait for in-flight requests (10s)
|
||||
await waitForInFlightRequests(10000);
|
||||
|
||||
// 3. Export sessions (5s)
|
||||
const sessions = engine.exportSessionState();
|
||||
await saveEncryptedSessions(sessions);
|
||||
|
||||
// 4. Cleanup (5s)
|
||||
await engine.shutdown();
|
||||
|
||||
// 5. Exit (5s buffer)
|
||||
process.exit(0);
|
||||
});
|
||||
```
|
||||
|
||||
### 5. Idempotency Handling
|
||||
|
||||
Sessions can be restored multiple times safely:
|
||||
|
||||
```typescript
|
||||
// First restore
|
||||
const count1 = engine.restoreSessionState(sessions);
|
||||
// count1 = 5
|
||||
|
||||
// Second restore (same sessions)
|
||||
const count2 = engine.restoreSessionState(sessions);
|
||||
// count2 = 0 (all already exist)
|
||||
```
|
||||
|
||||
This is safe for:
|
||||
- Init container retries
|
||||
- Manual recovery operations
|
||||
- Disaster recovery scenarios
|
||||
|
||||
### 6. Multi-Instance Coordination
|
||||
|
||||
For multiple container instances:
|
||||
|
||||
```typescript
|
||||
// Option 1: Per-instance storage (simple)
|
||||
const key = `mcp:sessions:${instance.hostname}`;
|
||||
|
||||
// Option 2: Centralized with distributed lock (advanced)
|
||||
const lock = await acquireLock('mcp:session-export');
|
||||
try {
|
||||
const allSessions = await getAllInstanceSessions();
|
||||
await saveToBackup(allSessions);
|
||||
} finally {
|
||||
await lock.release();
|
||||
}
|
||||
```
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Memory Usage
|
||||
|
||||
```typescript
|
||||
// Each session: ~1-2 KB in memory
|
||||
// 100 sessions: ~100-200 KB
|
||||
// 1000 sessions: ~1-2 MB
|
||||
|
||||
// Export serialized size
|
||||
const sessions = engine.exportSessionState();
|
||||
const sizeKB = JSON.stringify(sessions).length / 1024;
|
||||
console.log(`Export size: ${sizeKB.toFixed(2)} KB`);
|
||||
```
|
||||
|
||||
### Export/Restore Speed
|
||||
|
||||
```typescript
|
||||
// Export: O(n) where n = active sessions
|
||||
// Typical: 50-100 sessions in <10ms
|
||||
|
||||
// Restore: O(n) with validation
|
||||
// Typical: 50-100 sessions in 20-50ms
|
||||
|
||||
// Factor in encryption:
|
||||
// AES-256-GCM: ~1ms per 100 sessions
|
||||
```
|
||||
|
||||
### MAX_SESSIONS Limit
|
||||
|
||||
Hard limit: 100 sessions per container
|
||||
|
||||
```typescript
|
||||
// Restore respects limit
|
||||
const sessions = createSessions(150); // 150 sessions
|
||||
const restored = engine.restoreSessionState(sessions);
|
||||
// restored = 100 (only first 100 restored)
|
||||
```
|
||||
|
||||
For >100 sessions per tenant:
|
||||
- Deploy multiple containers
|
||||
- Use session routing/sharding
|
||||
- Implement session affinity
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Issue: No sessions restored
|
||||
|
||||
**Symptoms:**
|
||||
```
|
||||
Restored 0 sessions
|
||||
```
|
||||
|
||||
**Causes:**
|
||||
1. All sessions expired (age > sessionTimeout)
|
||||
2. Invalid date format in metadata
|
||||
3. Missing required context fields
|
||||
|
||||
**Debug:**
|
||||
```typescript
|
||||
const sessions = await loadFromEncryptedStorage();
|
||||
console.log('Loaded sessions:', sessions.length);
|
||||
|
||||
// Check individual sessions
|
||||
sessions.forEach((s, i) => {
|
||||
const age = Date.now() - new Date(s.metadata.lastAccess).getTime();
|
||||
console.log(`Session ${i}: age=${age}ms, expired=${age > sessionTimeout}`);
|
||||
});
|
||||
```
|
||||
|
||||
### Issue: Restore fails with "invalid context"
|
||||
|
||||
**Symptoms:**
|
||||
```
|
||||
[SECURITY] session_restore_failed { sessionId: '...', reason: 'invalid context: ...' }
|
||||
```
|
||||
|
||||
**Causes:**
|
||||
1. Missing n8nApiUrl or n8nApiKey
|
||||
2. Invalid URL format
|
||||
3. Corrupted session data
|
||||
|
||||
**Fix:**
|
||||
```typescript
|
||||
// Validate before restore
|
||||
const valid = sessions.filter(s => {
|
||||
if (!s.context?.n8nApiUrl || !s.context?.n8nApiKey) {
|
||||
console.warn(`Invalid session ${s.sessionId}: missing credentials`);
|
||||
return false;
|
||||
}
|
||||
try {
|
||||
new URL(s.context.n8nApiUrl); // Validate URL
|
||||
return true;
|
||||
} catch {
|
||||
console.warn(`Invalid session ${s.sessionId}: malformed URL`);
|
||||
return false;
|
||||
}
|
||||
});
|
||||
|
||||
const count = engine.restoreSessionState(valid);
|
||||
```
|
||||
|
||||
### Issue: MAX_SESSIONS limit hit
|
||||
|
||||
**Symptoms:**
|
||||
```
|
||||
Reached MAX_SESSIONS limit (100), skipping remaining sessions
|
||||
```
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Scale horizontally (more containers)
|
||||
2. Implement session sharding
|
||||
3. Reduce sessionTimeout
|
||||
4. Clean up inactive sessions
|
||||
|
||||
```typescript
|
||||
// Pre-filter by activity
|
||||
const recentSessions = sessions.filter(s => {
|
||||
const age = Date.now() - new Date(s.metadata.lastAccess).getTime();
|
||||
return age < 600000; // Only restore sessions active in last 10 min
|
||||
});
|
||||
|
||||
const count = engine.restoreSessionState(recentSessions);
|
||||
```
|
||||
|
||||
### Issue: Duplicate session IDs
|
||||
|
||||
**Symptoms:**
|
||||
```
|
||||
Duplicate sessionId detected during export: 550e8400-...
|
||||
```
|
||||
|
||||
**Cause:** Bug in session management logic
|
||||
|
||||
**Fix:** This is a warning, not an error. The duplicate is automatically skipped. If persistent, investigate session creation logic.
|
||||
|
||||
### Issue: High memory usage after restore
|
||||
|
||||
**Symptoms:** Container OOM after restoring many sessions
|
||||
|
||||
**Cause:** Too many sessions for container resources
|
||||
|
||||
**Solution:**
|
||||
```typescript
|
||||
// Restore in batches
|
||||
async function restoreInBatches(sessions: SessionState[], batchSize = 25) {
|
||||
let totalRestored = 0;
|
||||
|
||||
for (let i = 0; i < sessions.length; i += batchSize) {
|
||||
const batch = sessions.slice(i, i + batchSize);
|
||||
const count = engine.restoreSessionState(batch);
|
||||
totalRestored += count;
|
||||
|
||||
// Wait for GC between batches
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
}
|
||||
|
||||
return totalRestored;
|
||||
}
|
||||
```
|
||||
|
||||
## Version Compatibility
|
||||
|
||||
| Feature | Version | Status |
|
||||
|---------|---------|--------|
|
||||
| exportSessionState() | 2.3.0+ | Stable |
|
||||
| restoreSessionState() | 2.3.0+ | Stable |
|
||||
| Security logging | 2.24.1+ | Stable |
|
||||
| Duplicate detection | 2.24.1+ | Stable |
|
||||
| Race condition fix | 2.24.1+ | Stable |
|
||||
| Date validation | 2.24.1+ | Stable |
|
||||
| Optional instanceId | 2.24.1+ | Stable |
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [HTTP Deployment Guide](./HTTP_DEPLOYMENT.md) - Multi-tenant HTTP server setup
|
||||
- [Library Usage Guide](./LIBRARY_USAGE.md) - Embedding n8n-mcp in your app
|
||||
- [Docker Guide](./DOCKER_README.md) - Container deployment
|
||||
- [Flexible Instance Configuration](./FLEXIBLE_INSTANCE_CONFIGURATION.md) - Multi-tenant patterns
|
||||
|
||||
## Support
|
||||
|
||||
For issues or questions:
|
||||
- GitHub Issues: https://github.com/czlonkowski/n8n-mcp/issues
|
||||
- Documentation: https://github.com/czlonkowski/n8n-mcp#readme
|
||||
|
||||
---
|
||||
|
||||
Conceived by Romuald Członkowski - https://www.aiadvisors.pl/en
|
||||
@@ -1,314 +0,0 @@
|
||||
# Template Metadata Generation
|
||||
|
||||
This document describes the template metadata generation system introduced in n8n-MCP v2.10.0, which uses OpenAI's batch API to automatically analyze and categorize workflow templates.
|
||||
|
||||
## Overview
|
||||
|
||||
The template metadata system analyzes n8n workflow templates to extract structured information about their purpose, complexity, requirements, and target audience. This enables intelligent template discovery through advanced filtering capabilities.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Components
|
||||
|
||||
1. **MetadataGenerator** (`src/templates/metadata-generator.ts`)
|
||||
- Interfaces with OpenAI API
|
||||
- Generates structured metadata using JSON schemas
|
||||
- Provides fallback defaults for error cases
|
||||
|
||||
2. **BatchProcessor** (`src/templates/batch-processor.ts`)
|
||||
- Manages OpenAI batch API operations
|
||||
- Handles parallel batch submission
|
||||
- Monitors batch status and retrieves results
|
||||
|
||||
3. **Template Repository** (`src/templates/template-repository.ts`)
|
||||
- Stores metadata in SQLite database
|
||||
- Provides advanced search capabilities
|
||||
- Supports JSON extraction queries
|
||||
|
||||
## Metadata Schema
|
||||
|
||||
Each template's metadata contains:
|
||||
|
||||
```typescript
|
||||
{
|
||||
categories: string[] // Max 5 categories (e.g., "automation", "integration")
|
||||
complexity: "simple" | "medium" | "complex"
|
||||
use_cases: string[] // Max 5 primary use cases
|
||||
estimated_setup_minutes: number // 5-480 minutes
|
||||
required_services: string[] // External services needed
|
||||
key_features: string[] // Max 5 main capabilities
|
||||
target_audience: string[] // Max 3 target user types
|
||||
}
|
||||
```
|
||||
|
||||
## Generation Process
|
||||
|
||||
### 1. Initial Setup
|
||||
|
||||
```bash
|
||||
# Set OpenAI API key in .env
|
||||
OPENAI_API_KEY=your-api-key-here
|
||||
```
|
||||
|
||||
### 2. Generate Metadata for Existing Templates
|
||||
|
||||
```bash
|
||||
# Generate metadata only (no template fetching)
|
||||
npm run fetch:templates -- --metadata-only
|
||||
|
||||
# Generate metadata during update
|
||||
npm run fetch:templates -- --mode=update --generate-metadata
|
||||
```
|
||||
|
||||
### 3. Batch Processing
|
||||
|
||||
The system uses OpenAI's batch API for cost-effective processing:
|
||||
|
||||
- **50% cost reduction** compared to synchronous API calls
|
||||
- **24-hour processing window** for batch completion
|
||||
- **Parallel batch submission** for faster processing
|
||||
- **Automatic retry** for failed items
|
||||
|
||||
### Configuration Options
|
||||
|
||||
Environment variables:
|
||||
- `OPENAI_API_KEY`: Required for metadata generation
|
||||
- `OPENAI_MODEL`: Model to use (default: "gpt-4o-mini")
|
||||
- `OPENAI_BATCH_SIZE`: Templates per batch (default: 100, max: 500)
|
||||
- `METADATA_LIMIT`: Limit templates to process (for testing)
|
||||
|
||||
## How It Works
|
||||
|
||||
### 1. Template Analysis
|
||||
|
||||
For each template, the generator analyzes:
|
||||
- Template name and description
|
||||
- Node types and their frequency
|
||||
- Workflow structure and connections
|
||||
- Overall complexity
|
||||
|
||||
### 2. Node Summarization
|
||||
|
||||
Nodes are grouped into categories:
|
||||
- HTTP/Webhooks
|
||||
- Database operations
|
||||
- Communication (Slack, Email)
|
||||
- AI/ML operations
|
||||
- Spreadsheets
|
||||
- Service-specific nodes
|
||||
|
||||
### 3. Metadata Generation
|
||||
|
||||
The AI model receives:
|
||||
```
|
||||
Template: [name]
|
||||
Description: [description]
|
||||
Nodes Used (X): [summarized node list]
|
||||
Workflow has X nodes with Y connections
|
||||
```
|
||||
|
||||
And generates structured metadata following the JSON schema.
|
||||
|
||||
### 4. Storage and Indexing
|
||||
|
||||
Metadata is stored as JSON in SQLite and indexed for fast querying:
|
||||
|
||||
```sql
|
||||
-- Example query for simple automation templates
|
||||
SELECT * FROM templates
|
||||
WHERE json_extract(metadata, '$.complexity') = 'simple'
|
||||
AND json_extract(metadata, '$.categories') LIKE '%automation%'
|
||||
```
|
||||
|
||||
## MCP Tool Integration
|
||||
|
||||
### search_templates_by_metadata
|
||||
|
||||
Advanced filtering tool with multiple parameters:
|
||||
|
||||
```typescript
|
||||
search_templates_by_metadata({
|
||||
category: "automation", // Filter by category
|
||||
complexity: "simple", // Skill level
|
||||
maxSetupMinutes: 30, // Time constraint
|
||||
targetAudience: "marketers", // Role-based
|
||||
requiredService: "slack" // Service dependency
|
||||
})
|
||||
```
|
||||
|
||||
### list_templates
|
||||
|
||||
Enhanced to include metadata:
|
||||
|
||||
```typescript
|
||||
list_templates({
|
||||
includeMetadata: true, // Include full metadata
|
||||
limit: 20,
|
||||
offset: 0
|
||||
})
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Finding Beginner-Friendly Templates
|
||||
|
||||
```typescript
|
||||
const templates = await search_templates_by_metadata({
|
||||
complexity: "simple",
|
||||
maxSetupMinutes: 15
|
||||
});
|
||||
```
|
||||
|
||||
### Role-Specific Templates
|
||||
|
||||
```typescript
|
||||
const marketingTemplates = await search_templates_by_metadata({
|
||||
targetAudience: "marketers",
|
||||
category: "communication"
|
||||
});
|
||||
```
|
||||
|
||||
### Service Integration Templates
|
||||
|
||||
```typescript
|
||||
const openaiTemplates = await search_templates_by_metadata({
|
||||
requiredService: "openai",
|
||||
complexity: "medium"
|
||||
});
|
||||
```
|
||||
|
||||
## Performance Metrics
|
||||
|
||||
- **Coverage**: 97.5% of templates have metadata (2,534/2,598)
|
||||
- **Generation Time**: ~2-4 hours for full database (using batch API)
|
||||
- **Query Performance**: <100ms for metadata searches
|
||||
- **Storage Overhead**: ~2MB additional database size
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Batch Processing Stuck**
|
||||
- Check batch status: The API provides status updates
|
||||
- Batches auto-expire after 24 hours
|
||||
- Monitor using the batch ID in logs
|
||||
|
||||
2. **Missing Metadata**
|
||||
- ~2.5% of templates may fail metadata generation
|
||||
- Fallback defaults are provided
|
||||
- Can regenerate with `--metadata-only` flag
|
||||
|
||||
3. **API Rate Limits**
|
||||
- Batch API has generous limits (50,000 requests/batch)
|
||||
- Cost is 50% of synchronous API
|
||||
- Processing happens within 24-hour window
|
||||
|
||||
### Monitoring Batch Status
|
||||
|
||||
```bash
|
||||
# Check current batch status (if logged)
|
||||
curl https://api.openai.com/v1/batches/[batch-id] \
|
||||
-H "Authorization: Bearer $OPENAI_API_KEY"
|
||||
```
|
||||
|
||||
## Cost Analysis
|
||||
|
||||
### Batch API Pricing (gpt-4o-mini)
|
||||
|
||||
- Input: $0.075 per 1M tokens (50% of standard)
|
||||
- Output: $0.30 per 1M tokens (50% of standard)
|
||||
- Average template: ~300 input tokens, ~200 output tokens
|
||||
- Total cost for 2,500 templates: ~$0.50
|
||||
|
||||
### Comparison with Synchronous API
|
||||
|
||||
- Synchronous cost: ~$1.00 for same volume
|
||||
- Time saved: Parallel processing vs sequential
|
||||
- Reliability: Automatic retries included
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
### Planned Improvements
|
||||
|
||||
1. **Incremental Updates**
|
||||
- Only generate metadata for new templates
|
||||
- Track metadata version for updates
|
||||
|
||||
2. **Enhanced Analysis**
|
||||
- Workflow complexity scoring
|
||||
- Dependency graph analysis
|
||||
- Performance impact estimates
|
||||
|
||||
3. **User Feedback Loop**
|
||||
- Collect accuracy feedback
|
||||
- Refine categorization over time
|
||||
- Community-driven corrections
|
||||
|
||||
4. **Alternative Models**
|
||||
- Support for local LLMs
|
||||
- Claude API integration
|
||||
- Configurable model selection
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### Database Schema
|
||||
|
||||
```sql
|
||||
-- Metadata stored as JSON column
|
||||
ALTER TABLE templates ADD COLUMN metadata TEXT;
|
||||
|
||||
-- Indexes for common queries
|
||||
CREATE INDEX idx_templates_complexity ON templates(
|
||||
json_extract(metadata, '$.complexity')
|
||||
);
|
||||
CREATE INDEX idx_templates_setup_time ON templates(
|
||||
json_extract(metadata, '$.estimated_setup_minutes')
|
||||
);
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
The system provides robust error handling:
|
||||
|
||||
1. **API Failures**: Fallback to default metadata
|
||||
2. **Parsing Errors**: Logged with template ID
|
||||
3. **Batch Failures**: Individual item retry
|
||||
4. **Validation Errors**: Zod schema enforcement
|
||||
|
||||
## Maintenance
|
||||
|
||||
### Regenerating Metadata
|
||||
|
||||
```bash
|
||||
# Full regeneration (caution: costs ~$0.50)
|
||||
npm run fetch:templates -- --mode=rebuild --generate-metadata
|
||||
|
||||
# Partial regeneration (templates without metadata)
|
||||
npm run fetch:templates -- --metadata-only
|
||||
```
|
||||
|
||||
### Database Backup
|
||||
|
||||
```bash
|
||||
# Backup before regeneration
|
||||
cp data/nodes.db data/nodes.db.backup
|
||||
|
||||
# Restore if needed
|
||||
cp data/nodes.db.backup data/nodes.db
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **API Key Management**
|
||||
- Store in `.env` file (gitignored)
|
||||
- Never commit API keys
|
||||
- Use environment variables in CI/CD
|
||||
|
||||
2. **Data Privacy**
|
||||
- Only template structure is sent to API
|
||||
- No user data or credentials included
|
||||
- Processing happens in OpenAI's secure environment
|
||||
|
||||
## Conclusion
|
||||
|
||||
The template metadata system transforms template discovery from simple text search to intelligent, multi-dimensional filtering. By leveraging OpenAI's batch API, we achieve cost-effective, scalable metadata generation that significantly improves the user experience for finding relevant workflow templates.
|
||||
239
docs/TYPE_STRUCTURE_VALIDATION.md
Normal file
239
docs/TYPE_STRUCTURE_VALIDATION.md
Normal file
@@ -0,0 +1,239 @@
|
||||
# Type Structure Validation
|
||||
|
||||
## Overview
|
||||
|
||||
Type Structure Validation is an automatic validation system that ensures complex n8n node configurations conform to their expected data structures. Implemented as part of the n8n-mcp validation system, it provides zero-configuration validation for special n8n types that have complex nested structures.
|
||||
|
||||
**Status:** Production (v2.22.21+)
|
||||
**Performance:** 100% pass rate on 776 real-world validations
|
||||
**Speed:** 0.01ms average validation time (500x faster than target)
|
||||
|
||||
The system automatically validates node configurations without requiring any additional setup or configuration from users or AI assistants.
|
||||
|
||||
## Supported Types
|
||||
|
||||
The validation system supports four special n8n types that have complex structures:
|
||||
|
||||
### 1. **filter** (FilterValue)
|
||||
Complex filtering conditions with boolean operators, comparison operations, and nested logic.
|
||||
|
||||
**Structure:**
|
||||
- `combinator`: "and" | "or" - How conditions are combined
|
||||
- `conditions`: Array of filter conditions
|
||||
- Each condition has: `leftValue`, `operator` (type + operation), `rightValue`
|
||||
- Supports 40+ operations: equals, contains, exists, notExists, gt, lt, regex, etc.
|
||||
|
||||
**Example Usage:** IF node, Switch node condition filtering
|
||||
|
||||
### 2. **resourceMapper** (ResourceMapperValue)
|
||||
Data mapping configuration for transforming data between different formats.
|
||||
|
||||
**Structure:**
|
||||
- `mappingMode`: "defineBelow" | "autoMapInputData" | "mapManually"
|
||||
- `value`: Field mappings or expressions
|
||||
- `matchingColumns`: Column matching configuration
|
||||
- `schema`: Target schema definition
|
||||
|
||||
**Example Usage:** Google Sheets node, Airtable node data mapping
|
||||
|
||||
### 3. **assignmentCollection** (AssignmentCollectionValue)
|
||||
Variable assignments for setting multiple values at once.
|
||||
|
||||
**Structure:**
|
||||
- `assignments`: Array of name-value pairs
|
||||
- Each assignment has: `name`, `value`, `type`
|
||||
|
||||
**Example Usage:** Set node, Code node variable assignments
|
||||
|
||||
### 4. **resourceLocator** (INodeParameterResourceLocator)
|
||||
Resource selection with multiple lookup modes (ID, name, URL, etc.).
|
||||
|
||||
**Structure:**
|
||||
- `mode`: "id" | "list" | "url" | "name"
|
||||
- `value`: Resource identifier (string, number, or expression)
|
||||
- `cachedResultName`: Optional cached display name
|
||||
- `cachedResultUrl`: Optional cached URL
|
||||
|
||||
**Example Usage:** Google Sheets spreadsheet selection, Slack channel selection
|
||||
|
||||
## Performance & Results
|
||||
|
||||
The validation system was tested against real-world n8n.io workflow templates:
|
||||
|
||||
| Metric | Result |
|
||||
|--------|--------|
|
||||
| **Templates Tested** | 91 (top by popularity) |
|
||||
| **Nodes Validated** | 616 nodes with special types |
|
||||
| **Total Validations** | 776 property validations |
|
||||
| **Pass Rate** | 100.00% (776/776) |
|
||||
| **False Positive Rate** | 0.00% |
|
||||
| **Average Time** | 0.01ms per validation |
|
||||
| **Max Time** | 1.00ms per validation |
|
||||
| **Performance vs Target** | 500x faster than 50ms target |
|
||||
|
||||
### Type-Specific Results
|
||||
|
||||
- `filter`: 93/93 passed (100.00%)
|
||||
- `resourceMapper`: 69/69 passed (100.00%)
|
||||
- `assignmentCollection`: 213/213 passed (100.00%)
|
||||
- `resourceLocator`: 401/401 passed (100.00%)
|
||||
|
||||
## How It Works
|
||||
|
||||
### Automatic Integration
|
||||
|
||||
Structure validation is automatically applied during node configuration validation. When you call `validate_node_operation` or `validate_node_minimal`, the system:
|
||||
|
||||
1. **Identifies Special Types**: Detects properties that use filter, resourceMapper, assignmentCollection, or resourceLocator types
|
||||
2. **Validates Structure**: Checks that the configuration matches the expected structure for that type
|
||||
3. **Validates Operations**: For filter types, validates that operations are supported for the data type
|
||||
4. **Provides Context**: Returns specific error messages with property paths and fix suggestions
|
||||
|
||||
### Validation Flow
|
||||
|
||||
```
|
||||
User/AI provides node config
|
||||
↓
|
||||
validate_node_operation (MCP tool)
|
||||
↓
|
||||
EnhancedConfigValidator.validateWithMode()
|
||||
↓
|
||||
validateSpecialTypeStructures() ← Automatic structure validation
|
||||
↓
|
||||
TypeStructureService.validateStructure()
|
||||
↓
|
||||
Returns validation result with errors/warnings/suggestions
|
||||
```
|
||||
|
||||
### Edge Cases Handled
|
||||
|
||||
**1. Credential-Provided Fields**
|
||||
- Fields like Google Sheets `sheetId` that come from n8n credentials at runtime are excluded from validation
|
||||
- No false positives for fields that aren't in the configuration
|
||||
|
||||
**2. Filter Operations**
|
||||
- Universal operations (`exists`, `notExists`, `isNotEmpty`) work across all data types
|
||||
- Type-specific operations validated (e.g., `regex` only for strings, `gt`/`lt` only for numbers)
|
||||
|
||||
**3. Node-Specific Logic**
|
||||
- Custom validation logic for specific nodes (Google Sheets, Slack, etc.)
|
||||
- Context-aware error messages that understand the node's operation
|
||||
|
||||
## Example Validation Error
|
||||
|
||||
### Invalid Filter Structure
|
||||
|
||||
**Configuration:**
|
||||
```json
|
||||
{
|
||||
"conditions": {
|
||||
"combinator": "and",
|
||||
"conditions": [
|
||||
{
|
||||
"leftValue": "={{ $json.status }}",
|
||||
"rightValue": "active",
|
||||
"operator": {
|
||||
"type": "string",
|
||||
"operation": "invalidOperation" // ❌ Not a valid operation
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Validation Error:**
|
||||
```json
|
||||
{
|
||||
"valid": false,
|
||||
"errors": [
|
||||
{
|
||||
"type": "invalid_structure",
|
||||
"property": "conditions.conditions[0].operator.operation",
|
||||
"message": "Unsupported operation 'invalidOperation' for type 'string'",
|
||||
"suggestion": "Valid operations for string: equals, notEquals, contains, notContains, startsWith, endsWith, regex, exists, notExists, isNotEmpty"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Technical Details
|
||||
|
||||
### Implementation
|
||||
|
||||
- **Type Definitions**: `src/types/type-structures.ts` (301 lines)
|
||||
- **Type Structures**: `src/constants/type-structures.ts` (741 lines, 22 complete type structures)
|
||||
- **Service Layer**: `src/services/type-structure-service.ts` (427 lines)
|
||||
- **Validator Integration**: `src/services/enhanced-config-validator.ts` (line 270)
|
||||
- **Node-Specific Logic**: `src/services/node-specific-validators.ts`
|
||||
|
||||
### Test Coverage
|
||||
|
||||
- **Unit Tests**:
|
||||
- `tests/unit/types/type-structures.test.ts` (14 tests)
|
||||
- `tests/unit/constants/type-structures.test.ts` (39 tests)
|
||||
- `tests/unit/services/type-structure-service.test.ts` (64 tests)
|
||||
- `tests/unit/services/enhanced-config-validator-type-structures.test.ts`
|
||||
|
||||
- **Integration Tests**:
|
||||
- `tests/integration/validation/real-world-structure-validation.test.ts` (8 tests, 388ms)
|
||||
|
||||
- **Validation Scripts**:
|
||||
- `scripts/test-structure-validation.ts` - Standalone validation against 100 templates
|
||||
|
||||
### Documentation
|
||||
|
||||
- **Implementation Plan**: `docs/local/v3/implementation-plan-final.md` - Complete technical specifications
|
||||
- **Phase Results**: Phases 1-3 completed with 100% success criteria met
|
||||
|
||||
## For Developers
|
||||
|
||||
### Adding New Type Structures
|
||||
|
||||
1. Define the type structure in `src/constants/type-structures.ts`
|
||||
2. Add validation logic in `TypeStructureService.validateStructure()`
|
||||
3. Add tests in `tests/unit/constants/type-structures.test.ts`
|
||||
4. Test against real templates using `scripts/test-structure-validation.ts`
|
||||
|
||||
### Testing Structure Validation
|
||||
|
||||
**Run Unit Tests:**
|
||||
```bash
|
||||
npm run test:unit -- tests/unit/services/enhanced-config-validator-type-structures.test.ts
|
||||
```
|
||||
|
||||
**Run Integration Tests:**
|
||||
```bash
|
||||
npm run test:integration -- tests/integration/validation/real-world-structure-validation.test.ts
|
||||
```
|
||||
|
||||
**Run Full Validation:**
|
||||
```bash
|
||||
npm run test:structure-validation
|
||||
```
|
||||
|
||||
### Relevant Test Files
|
||||
|
||||
- **Type Tests**: `tests/unit/types/type-structures.test.ts`
|
||||
- **Structure Tests**: `tests/unit/constants/type-structures.test.ts`
|
||||
- **Service Tests**: `tests/unit/services/type-structure-service.test.ts`
|
||||
- **Validator Tests**: `tests/unit/services/enhanced-config-validator-type-structures.test.ts`
|
||||
- **Integration Tests**: `tests/integration/validation/real-world-structure-validation.test.ts`
|
||||
- **Real-World Validation**: `scripts/test-structure-validation.ts`
|
||||
|
||||
## Production Readiness
|
||||
|
||||
✅ **All Tests Passing**: 100% pass rate on unit and integration tests
|
||||
✅ **Performance Validated**: 0.01ms average (500x better than 50ms target)
|
||||
✅ **Zero Breaking Changes**: Fully backward compatible
|
||||
✅ **Real-World Validation**: 91 templates, 616 nodes, 776 validations
|
||||
✅ **Production Deployment**: Successfully deployed in v2.22.21
|
||||
✅ **Edge Cases Handled**: Credential fields, filter operations, node-specific logic
|
||||
|
||||
## Version History
|
||||
|
||||
- **v2.22.21** (2025-11-21): Type structure validation system completed (Phases 1-3)
|
||||
- 22 complete type structures defined
|
||||
- 100% pass rate on real-world validation
|
||||
- 0.01ms average validation time
|
||||
- Zero false positives
|
||||
@@ -162,7 +162,7 @@ n8n_validate_workflow({id: createdWorkflowId})
|
||||
n8n_update_partial_workflow({
|
||||
workflowId: id,
|
||||
operations: [
|
||||
{type: 'updateNode', nodeId: 'slack1', changes: {position: [100, 200]}}
|
||||
{type: 'updateNode', nodeId: 'slack1', updates: {position: [100, 200]}}
|
||||
]
|
||||
})
|
||||
|
||||
|
||||
BIN
docs/img/skills.png
Normal file
BIN
docs/img/skills.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 430 KiB |
@@ -1,162 +0,0 @@
|
||||
# Issue #90: "propertyValues[itemName] is not iterable" Error - Research Findings
|
||||
|
||||
## Executive Summary
|
||||
|
||||
The error "propertyValues[itemName] is not iterable" occurs when AI agents create workflows with incorrect data structures for n8n nodes that use `fixedCollection` properties. This primarily affects Switch Node v2, If Node, and Filter Node. The error prevents workflows from loading in the n8n UI, resulting in empty canvases.
|
||||
|
||||
## Root Cause Analysis
|
||||
|
||||
### 1. Data Structure Mismatch
|
||||
|
||||
The error occurs when n8n's validation engine expects an iterable array but encounters a non-iterable object. This happens with nodes using `fixedCollection` type properties.
|
||||
|
||||
**Incorrect Structure (causes error):**
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"conditions": {
|
||||
"values": [
|
||||
{
|
||||
"value1": "={{$json.status}}",
|
||||
"operation": "equals",
|
||||
"value2": "active"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Correct Structure:**
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"conditions": [
|
||||
{
|
||||
"value1": "={{$json.status}}",
|
||||
"operation": "equals",
|
||||
"value2": "active"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Affected Nodes
|
||||
|
||||
Based on the research and issue comments, the following nodes are affected:
|
||||
|
||||
1. **Switch Node v2** (`n8n-nodes-base.switch` with typeVersion: 2)
|
||||
- Uses `rules` parameter with `conditions` fixedCollection
|
||||
- v3 doesn't have this issue due to restructured schema
|
||||
|
||||
2. **If Node** (`n8n-nodes-base.if` with typeVersion: 1)
|
||||
- Uses `conditions` parameter with nested conditions array
|
||||
- Similar structure to Switch v2
|
||||
|
||||
3. **Filter Node** (`n8n-nodes-base.filter`)
|
||||
- Uses `conditions` parameter
|
||||
- Same fixedCollection pattern
|
||||
|
||||
### 3. Why AI Agents Create Incorrect Structures
|
||||
|
||||
1. **Training Data Issues**: AI models may have been trained on outdated or incorrect n8n workflow examples
|
||||
2. **Nested Object Inference**: AI tends to create unnecessarily nested structures when it sees collection-type parameters
|
||||
3. **Legacy Format Confusion**: Mixing v2 and v3 Switch node formats
|
||||
4. **Schema Misinterpretation**: The term "fixedCollection" may lead AI to create object wrappers
|
||||
|
||||
## Current Impact
|
||||
|
||||
From issue #90 comments:
|
||||
- Multiple users experiencing the issue
|
||||
- Workflows fail to load completely (empty canvas)
|
||||
- Users resort to using Switch Node v3 or direct API calls
|
||||
- The issue appears in "most MCPs" according to user feedback
|
||||
|
||||
## Recommended Actions
|
||||
|
||||
### 1. Immediate Validation Enhancement
|
||||
|
||||
Add specific validation for fixedCollection properties in the workflow validator:
|
||||
|
||||
```typescript
|
||||
// In workflow-validator.ts or enhanced-config-validator.ts
|
||||
function validateFixedCollectionParameters(node, result) {
|
||||
const problematicNodes = {
|
||||
'n8n-nodes-base.switch': { version: 2, fields: ['rules'] },
|
||||
'n8n-nodes-base.if': { version: 1, fields: ['conditions'] },
|
||||
'n8n-nodes-base.filter': { version: 1, fields: ['conditions'] }
|
||||
};
|
||||
|
||||
const nodeConfig = problematicNodes[node.type];
|
||||
if (nodeConfig && node.typeVersion === nodeConfig.version) {
|
||||
// Validate structure
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Enhanced MCP Tool Validation
|
||||
|
||||
Update the validation tools to detect and prevent this specific error pattern:
|
||||
|
||||
1. **In `validate_node_operation` tool**: Add checks for fixedCollection structures
|
||||
2. **In `validate_workflow` tool**: Include specific validation for Switch/If nodes
|
||||
3. **In `n8n_create_workflow` tool**: Pre-validate parameters before submission
|
||||
|
||||
### 3. AI-Friendly Examples
|
||||
|
||||
Update workflow examples to show correct structures:
|
||||
|
||||
```typescript
|
||||
// In workflow-examples.ts
|
||||
export const SWITCH_NODE_EXAMPLE = {
|
||||
name: "Switch",
|
||||
type: "n8n-nodes-base.switch",
|
||||
typeVersion: 3, // Prefer v3 over v2
|
||||
parameters: {
|
||||
// Correct v3 structure
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### 4. Migration Strategy
|
||||
|
||||
For existing workflows with Switch v2:
|
||||
1. Detect Switch v2 nodes in validation
|
||||
2. Suggest migration to v3
|
||||
3. Provide automatic conversion utility
|
||||
|
||||
### 5. Documentation Updates
|
||||
|
||||
1. Add warnings about fixedCollection structures in tool documentation
|
||||
2. Include specific examples of correct vs incorrect structures
|
||||
3. Document the Switch v2 to v3 migration path
|
||||
|
||||
## Proposed Implementation Priority
|
||||
|
||||
1. **High Priority**: Add validation to prevent creation of invalid structures
|
||||
2. **High Priority**: Update existing validation tools to catch this error
|
||||
3. **Medium Priority**: Add auto-fix capabilities to correct structures
|
||||
4. **Medium Priority**: Update examples and documentation
|
||||
5. **Low Priority**: Create migration utilities for v2 to v3
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
1. Create test cases for each affected node type
|
||||
2. Test both correct and incorrect structures
|
||||
3. Verify validation catches all variants of the error
|
||||
4. Test auto-fix suggestions work correctly
|
||||
|
||||
## Success Metrics
|
||||
|
||||
- Zero instances of "propertyValues[itemName] is not iterable" in newly created workflows
|
||||
- Clear error messages that guide users to correct structures
|
||||
- Successful validation of all Switch/If node configurations before workflow creation
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Implement validation enhancements in the workflow validator
|
||||
2. Update MCP tools to include these validations
|
||||
3. Add comprehensive tests
|
||||
4. Update documentation with clear examples
|
||||
5. Consider adding a migration tool for existing workflows
|
||||
1213
docs/local/DEEP_DIVE_ANALYSIS_2025-10-02.md
Normal file
1213
docs/local/DEEP_DIVE_ANALYSIS_2025-10-02.md
Normal file
File diff suppressed because it is too large
Load Diff
225
docs/local/DEEP_DIVE_ANALYSIS_README.md
Normal file
225
docs/local/DEEP_DIVE_ANALYSIS_README.md
Normal file
@@ -0,0 +1,225 @@
|
||||
# N8N-MCP Deep Dive Analysis - October 2, 2025
|
||||
|
||||
## Overview
|
||||
|
||||
This directory contains a comprehensive deep-dive analysis of n8n-mcp usage data from September 26 - October 2, 2025.
|
||||
|
||||
**Data Volume Analyzed:**
|
||||
- 212,375 telemetry events
|
||||
- 5,751 workflow creations
|
||||
- 2,119 unique users
|
||||
- 6 days of usage data
|
||||
|
||||
## Report Structure
|
||||
|
||||
|
||||
###: `DEEP_DIVE_ANALYSIS_2025-10-02.md` (Main Report)
|
||||
|
||||
**Sections Covered:**
|
||||
1. **Executive Summary** - Key findings and recommendations
|
||||
2. **Tool Performance Analysis** - Success rates, performance metrics, critical findings
|
||||
3. **Validation Catastrophe** - The node type prefix disaster analysis
|
||||
4. **Usage Patterns & User Segmentation** - User distribution, daily trends
|
||||
5. **Tool Sequence Analysis** - How AI agents use tools together
|
||||
6. **Workflow Creation Patterns** - Complexity distribution, popular nodes
|
||||
7. **Platform & Version Distribution** - OS, architecture, version adoption
|
||||
8. **Error Patterns & Root Causes** - TypeErrors, validation errors, discovery failures
|
||||
9. **P0-P1 Refactoring Recommendations** - Detailed implementation guides
|
||||
|
||||
**Sections Covered:**
|
||||
- Remaining P1 and P2 recommendations
|
||||
- Architectural refactoring suggestions
|
||||
- Telemetry enhancements
|
||||
- CHANGELOG integration
|
||||
- Final recommendations summary
|
||||
|
||||
## Key Findings Summary
|
||||
|
||||
### Critical Issues (P0 - Fix Immediately)
|
||||
|
||||
1. **Node Type Prefix Validation Catastrophe**
|
||||
- 5,000+ validation errors from single root cause
|
||||
- `nodes-base.X` vs `n8n-nodes-base.X` confusion
|
||||
- **Solution**: Auto-normalize prefixes (2-4 hours effort)
|
||||
|
||||
2. **TypeError in Node Information Tools**
|
||||
- 10-18% failure rate in get_node_essentials/info
|
||||
- 1,000+ failures affecting hundreds of users
|
||||
- **Solution**: Complete null-safety audit (1 day effort)
|
||||
|
||||
3. **Task Discovery Failures**
|
||||
- `get_node_for_task` failing 28% of the time
|
||||
- Worst-performing tool in entire system
|
||||
- **Solution**: Expand task library + fuzzy matching (3 days effort)
|
||||
|
||||
### Performance Metrics
|
||||
|
||||
**Excellent Reliability (96-100% success):**
|
||||
- n8n_update_partial_workflow: 98.7%
|
||||
- search_nodes: 99.8%
|
||||
- n8n_create_workflow: 96.1%
|
||||
- All workflow management tools: 100%
|
||||
|
||||
**User Distribution:**
|
||||
- Power Users (12): 2,112 events/user, 33 workflows
|
||||
- Heavy Users (47): 673 events/user, 18 workflows
|
||||
- Regular Users (516): 199 events/user, 7 workflows (CORE AUDIENCE)
|
||||
- Active Users (919): 52 events/user, 2 workflows
|
||||
- Casual Users (625): 8 events/user, 1 workflow
|
||||
|
||||
### Usage Insights
|
||||
|
||||
**Most Used Tools:**
|
||||
1. n8n_update_partial_workflow: 10,177 calls (iterative refinement)
|
||||
2. search_nodes: 8,839 calls (node discovery)
|
||||
3. n8n_create_workflow: 6,046 calls (workflow creation)
|
||||
|
||||
**Most Common Tool Sequences:**
|
||||
1. update → update → update (549x) - Iterative refinement pattern
|
||||
2. create → update (297x) - Create then refine
|
||||
3. update → get_workflow (265x) - Update then verify
|
||||
|
||||
**Most Popular Nodes:**
|
||||
1. code (53% of workflows) - AI agents love programmatic control
|
||||
2. httpRequest (47%) - Integration-heavy usage
|
||||
3. webhook (32%) - Event-driven automation
|
||||
|
||||
## SQL Analytical Views Created
|
||||
|
||||
15 comprehensive views were created in Supabase for ongoing analysis:
|
||||
|
||||
1. `vw_tool_performance` - Performance metrics per tool
|
||||
2. `vw_error_analysis` - Error patterns and frequencies
|
||||
3. `vw_validation_analysis` - Validation failure details
|
||||
4. `vw_tool_sequences` - Tool-to-tool transition patterns
|
||||
5. `vw_workflow_creation_patterns` - Workflow characteristics
|
||||
6. `vw_node_usage_analysis` - Node popularity and complexity
|
||||
7. `vw_node_cooccurrence` - Which nodes are used together
|
||||
8. `vw_user_activity` - Per-user activity metrics
|
||||
9. `vw_session_analysis` - Platform/version distribution
|
||||
10. `vw_workflow_validation_failures` - Workflow validation issues
|
||||
11. `vw_temporal_patterns` - Time-based usage patterns
|
||||
12. `vw_tool_funnel` - User progression through tools
|
||||
13. `vw_search_analysis` - Search behavior
|
||||
14. `vw_tool_success_summary` - Success/failure rates
|
||||
15. `vw_user_journeys` - Complete user session reconstruction
|
||||
|
||||
## Priority Recommendations
|
||||
|
||||
### Immediate Actions (This Week)
|
||||
|
||||
✅ **P0-R1**: Auto-normalize node type prefixes → Eliminate 4,800 errors
|
||||
✅ **P0-R2**: Complete null-safety audit → Fix 10-18% TypeError failures
|
||||
✅ **P0-R3**: Expand get_node_for_task library → 72% → 95% success rate
|
||||
|
||||
**Expected Impact**: Reduce error rate from 5-10% to <2% overall
|
||||
|
||||
### Next Release (2-3 Weeks)
|
||||
|
||||
✅ **P1-R4**: Batch workflow operations → Save 30-50% tokens
|
||||
✅ **P1-R5**: Proactive node suggestions → Reduce search iterations
|
||||
✅ **P1-R6**: Auto-fix suggestions in errors → Self-service recovery
|
||||
|
||||
**Expected Impact**: 40% faster workflow creation, better UX
|
||||
|
||||
### Future Roadmap (1-3 Months)
|
||||
|
||||
✅ **A1**: Service layer consolidation → Cleaner architecture
|
||||
✅ **A2**: Repository caching → 50% faster node operations
|
||||
✅ **R10**: Workflow template library from usage → 80% coverage
|
||||
✅ **T1-T3**: Enhanced telemetry → Better observability
|
||||
|
||||
**Expected Impact**: Scalable foundation for 10x growth
|
||||
|
||||
## Methodology
|
||||
|
||||
### Data Sources
|
||||
|
||||
1. **Supabase Telemetry Database**
|
||||
- `telemetry_events` table: 212,375 rows
|
||||
- `telemetry_workflows` table: 5,751 rows
|
||||
|
||||
2. **Analytical Views**
|
||||
- Created 15 SQL views for multi-dimensional analysis
|
||||
- Enabled complex queries and pattern recognition
|
||||
|
||||
3. **CHANGELOG Review**
|
||||
- Analyzed recent changes (v2.14.0 - v2.14.6)
|
||||
- Correlated fixes with error patterns
|
||||
|
||||
### Analysis Approach
|
||||
|
||||
1. **Quantitative Analysis**
|
||||
- Success/failure rates per tool
|
||||
- Performance metrics (avg, median, p95, p99)
|
||||
- User segmentation and cohort analysis
|
||||
- Temporal trends and growth patterns
|
||||
|
||||
2. **Pattern Recognition**
|
||||
- Tool sequence analysis (Markov chains)
|
||||
- Node co-occurrence patterns
|
||||
- Workflow complexity distribution
|
||||
- Error clustering and root cause analysis
|
||||
|
||||
3. **Qualitative Insights**
|
||||
- CHANGELOG integration
|
||||
- Error message analysis
|
||||
- User journey reconstruction
|
||||
- Best practice identification
|
||||
|
||||
## How to Use This Analysis
|
||||
|
||||
### For Development Priorities
|
||||
|
||||
1. Review **P0 Critical Recommendations** (Section 8)
|
||||
2. Check estimated effort and impact
|
||||
3. Prioritize based on ROI (impact/effort ratio)
|
||||
4. Follow implementation guides with code examples
|
||||
|
||||
### For Architecture Decisions
|
||||
|
||||
1. Review **Architectural Recommendations** (Section 9)
|
||||
2. Consider service layer consolidation
|
||||
3. Evaluate repository caching opportunities
|
||||
4. Plan for 10x scale
|
||||
|
||||
### For Product Strategy
|
||||
|
||||
1. Review **Usage Patterns** (Section 3 & 5)
|
||||
2. Understand user segments (power vs casual)
|
||||
3. Identify high-value features (most-used tools)
|
||||
4. Focus on reliability over features (96% success rate target)
|
||||
|
||||
### For Telemetry Enhancement
|
||||
|
||||
1. Review **Telemetry Enhancements** (Section 10)
|
||||
2. Add fine-grained timing metrics
|
||||
3. Track workflow creation funnels
|
||||
4. Monitor node-level analytics
|
||||
|
||||
## Contact & Feedback
|
||||
|
||||
For questions about this analysis or to request additional insights:
|
||||
- Data Analyst: Claude Code with Supabase MCP
|
||||
- Analysis Date: October 2, 2025
|
||||
- Data Period: September 26 - October 2, 2025
|
||||
|
||||
## Change Log
|
||||
|
||||
- **2025-10-02**: Initial comprehensive analysis completed
|
||||
- 15 SQL analytical views created
|
||||
- 13 sections of detailed findings
|
||||
- P0/P1/P2 recommendations with implementation guides
|
||||
- Code examples and effort estimates provided
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. ✅ Review findings with development team
|
||||
2. ✅ Prioritize P0 recommendations for immediate implementation
|
||||
3. ✅ Plan P1 features for next release cycle
|
||||
4. ✅ Set up monitoring for key metrics
|
||||
5. ✅ Schedule follow-up analysis (weekly recommended)
|
||||
|
||||
---
|
||||
|
||||
*This analysis represents a snapshot of n8n-mcp usage during early adoption phase. Patterns may evolve as the user base grows and matures.*
|
||||
1328
docs/local/Deep_dive_p1_p2.md
Normal file
1328
docs/local/Deep_dive_p1_p2.md
Normal file
File diff suppressed because it is too large
Load Diff
3396
docs/local/N8N_AI_WORKFLOW_BUILDER_ANALYSIS.md
Normal file
3396
docs/local/N8N_AI_WORKFLOW_BUILDER_ANALYSIS.md
Normal file
File diff suppressed because it is too large
Load Diff
1489
docs/local/P0_IMPLEMENTATION_PLAN.md
Normal file
1489
docs/local/P0_IMPLEMENTATION_PLAN.md
Normal file
File diff suppressed because it is too large
Load Diff
369
docs/local/TEMPLATE_MINING_ANALYSIS.md
Normal file
369
docs/local/TEMPLATE_MINING_ANALYSIS.md
Normal file
@@ -0,0 +1,369 @@
|
||||
# Template Mining Analysis - Alternative to P0-R3
|
||||
|
||||
**Date**: 2025-10-02
|
||||
**Context**: Analyzing whether to fix `get_node_for_task` (28% failure rate) or replace it with template-based configuration extraction
|
||||
|
||||
## Executive Summary
|
||||
|
||||
**RECOMMENDATION**: Replace `get_node_for_task` with template-based configuration extraction. The template database contains 2,646 real-world workflows with rich node configurations that far exceed the 31 hardcoded task templates.
|
||||
|
||||
## Key Findings
|
||||
|
||||
### 1. Template Database Coverage
|
||||
|
||||
- **Total Templates**: 2,646 production workflows from n8n.io
|
||||
- **Unique Node Types**: 543 (covers 103% of our 525 core nodes)
|
||||
- **Metadata Coverage**: 100% (AI-generated structured metadata)
|
||||
|
||||
### 2. Node Type Coverage in Templates
|
||||
|
||||
Top node types by template usage:
|
||||
```
|
||||
3,820 templates: n8n-nodes-base.httpRequest (144% of total templates!)
|
||||
3,678 templates: n8n-nodes-base.set
|
||||
2,445 templates: n8n-nodes-base.code
|
||||
1,700 templates: n8n-nodes-base.googleSheets
|
||||
1,471 templates: @n8n/n8n-nodes-langchain.agent
|
||||
1,269 templates: @n8n/n8n-nodes-langchain.lmChatOpenAi
|
||||
792 templates: n8n-nodes-base.telegram
|
||||
702 templates: n8n-nodes-base.httpRequestTool
|
||||
596 templates: n8n-nodes-base.gmail
|
||||
466 templates: n8n-nodes-base.webhook
|
||||
```
|
||||
|
||||
**Comparison**:
|
||||
- Hardcoded task templates: 31 tasks covering 5.9% of nodes
|
||||
- Real templates: 2,646 templates with 2-3k examples for common nodes
|
||||
|
||||
### 3. Database Structure
|
||||
|
||||
```sql
|
||||
CREATE TABLE templates (
|
||||
id INTEGER PRIMARY KEY,
|
||||
workflow_id INTEGER UNIQUE NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
description TEXT,
|
||||
-- Node information
|
||||
nodes_used TEXT, -- JSON array: ["n8n-nodes-base.httpRequest", ...]
|
||||
workflow_json_compressed TEXT, -- Base64 encoded gzip of full workflow
|
||||
-- Metadata (100% coverage)
|
||||
metadata_json TEXT, -- AI-generated structured metadata
|
||||
-- Stats
|
||||
views INTEGER DEFAULT 0,
|
||||
created_at DATETIME,
|
||||
-- ...
|
||||
);
|
||||
```
|
||||
|
||||
### 4. Real Configuration Examples
|
||||
|
||||
#### HTTP Request Node Configurations
|
||||
|
||||
**Simple URL fetch**:
|
||||
```json
|
||||
{
|
||||
"url": "https://api.example.com/data",
|
||||
"options": {}
|
||||
}
|
||||
```
|
||||
|
||||
**With authentication**:
|
||||
```json
|
||||
{
|
||||
"url": "=https://api.wavespeed.ai/api/v3/predictions/{{ $json.data.id }}/result",
|
||||
"options": {},
|
||||
"authentication": "genericCredentialType",
|
||||
"genericAuthType": "httpHeaderAuth"
|
||||
}
|
||||
```
|
||||
|
||||
**Complex expressions**:
|
||||
```json
|
||||
{
|
||||
"url": "=https://image.pollinations.ai/prompt/{{$('Social Media Content Factory').item.json.output.description.replaceAll(' ','-').replaceAll(',','').replaceAll('.','') }}",
|
||||
"options": {}
|
||||
}
|
||||
```
|
||||
|
||||
#### Webhook Node Configurations
|
||||
|
||||
**Basic webhook**:
|
||||
```json
|
||||
{
|
||||
"path": "ytube",
|
||||
"options": {},
|
||||
"httpMethod": "POST",
|
||||
"responseMode": "responseNode"
|
||||
}
|
||||
```
|
||||
|
||||
**With binary data**:
|
||||
```json
|
||||
{
|
||||
"path": "your-endpoint",
|
||||
"options": {
|
||||
"binaryPropertyName": "data"
|
||||
},
|
||||
"httpMethod": "POST"
|
||||
}
|
||||
```
|
||||
|
||||
### 5. AI-Generated Metadata
|
||||
|
||||
Each template has structured metadata including:
|
||||
|
||||
```json
|
||||
{
|
||||
"categories": ["automation", "integration", "data processing"],
|
||||
"complexity": "medium",
|
||||
"use_cases": [
|
||||
"Extract transaction data from Gmail",
|
||||
"Automate bookkeeping",
|
||||
"Expense tracking"
|
||||
],
|
||||
"estimated_setup_minutes": 30,
|
||||
"required_services": ["Gmail", "Google Sheets", "Google Gemini"],
|
||||
"key_features": [
|
||||
"Fetch emails by label",
|
||||
"Extract transaction data",
|
||||
"Use LLM for structured output"
|
||||
],
|
||||
"target_audience": ["Accountants", "Small business owners"]
|
||||
}
|
||||
```
|
||||
|
||||
## Comparison: Task Templates vs Real Templates
|
||||
|
||||
### Current Approach (get_node_for_task)
|
||||
|
||||
**Pros**:
|
||||
- Curated configurations with best practices
|
||||
- Predictable, stable responses
|
||||
- Fast lookup (no decompression needed)
|
||||
|
||||
**Cons**:
|
||||
- Only 31 tasks (5.9% node coverage)
|
||||
- 28% failure rate (users can't find what they need)
|
||||
- Requires manual maintenance
|
||||
- Static configurations without real-world context
|
||||
- Usage ratio 22.5:1 (search_nodes is preferred)
|
||||
|
||||
### Template-Based Approach
|
||||
|
||||
**Pros**:
|
||||
- 2,646 real workflows with 2-3k examples for common nodes
|
||||
- 100% metadata coverage for semantic matching
|
||||
- Real-world patterns and best practices
|
||||
- Covers 543 node types (103% coverage)
|
||||
- Self-updating (templates fetched from n8n.io)
|
||||
- Rich context (use cases, complexity, setup time)
|
||||
|
||||
**Cons**:
|
||||
- Requires decompression for full workflow access
|
||||
- May contain template-specific context (but can be filtered)
|
||||
- Need ranking/filtering logic for best matches
|
||||
|
||||
## Proposed Implementation Strategy
|
||||
|
||||
### Phase 1: Extract Node Configurations from Templates
|
||||
|
||||
Create a new service: `TemplateConfigExtractor`
|
||||
|
||||
```typescript
|
||||
interface ExtractedNodeConfig {
|
||||
nodeType: string;
|
||||
configuration: Record<string, any>;
|
||||
source: {
|
||||
templateId: number;
|
||||
templateName: string;
|
||||
templateViews: number;
|
||||
useCases: string[];
|
||||
complexity: 'simple' | 'medium' | 'complex';
|
||||
};
|
||||
patterns: {
|
||||
hasAuthentication: boolean;
|
||||
hasExpressions: boolean;
|
||||
hasOptionalFields: boolean;
|
||||
};
|
||||
}
|
||||
|
||||
class TemplateConfigExtractor {
|
||||
async extractConfigsForNode(
|
||||
nodeType: string,
|
||||
options?: {
|
||||
complexity?: 'simple' | 'medium' | 'complex';
|
||||
requiresAuth?: boolean;
|
||||
limit?: number;
|
||||
}
|
||||
): Promise<ExtractedNodeConfig[]> {
|
||||
// 1. Query templates containing nodeType
|
||||
// 2. Decompress workflow_json_compressed
|
||||
// 3. Extract node configurations
|
||||
// 4. Rank by popularity + complexity match
|
||||
// 5. Return top N configurations
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 2: Integrate with Existing Tools
|
||||
|
||||
**Option A**: Enhance `get_node_essentials`
|
||||
- Add `includeExamples: boolean` parameter
|
||||
- Return 2-3 real configurations from templates
|
||||
- Preserve existing compact format
|
||||
|
||||
**Option B**: Enhance `get_node_info`
|
||||
- Add `examples` section with template-sourced configs
|
||||
- Include source attribution (template name, views)
|
||||
|
||||
**Option C**: New tool `get_node_examples`
|
||||
- Dedicated tool for retrieving configuration examples
|
||||
- Query by node type, complexity, use case
|
||||
- Returns ranked list of real configurations
|
||||
|
||||
### Phase 3: Deprecate get_node_for_task
|
||||
|
||||
- Mark as deprecated in tool documentation
|
||||
- Redirect to enhanced tools
|
||||
- Remove after 2-3 version cycles
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Decompression Cost
|
||||
|
||||
- Average compressed size: 6-12 KB
|
||||
- Decompression time: ~5-10ms per template
|
||||
- Caching strategy needed for frequently accessed templates
|
||||
|
||||
### Query Strategy
|
||||
|
||||
```sql
|
||||
-- Fast: Get templates for a node type (no decompression)
|
||||
SELECT id, name, views, metadata_json
|
||||
FROM templates
|
||||
WHERE nodes_used LIKE '%n8n-nodes-base.httpRequest%'
|
||||
ORDER BY views DESC
|
||||
LIMIT 10;
|
||||
|
||||
-- Then decompress only top matches
|
||||
```
|
||||
|
||||
### Caching
|
||||
|
||||
- Cache decompressed workflows for popular templates (top 100)
|
||||
- TTL: 1 hour
|
||||
- Estimated memory: 100 * 50KB = 5MB
|
||||
|
||||
## Impact on P0-R3
|
||||
|
||||
**Original P0-R3 Plan**: Expand task library from 31 to 100+ tasks using fuzzy matching
|
||||
|
||||
**New Approach**: Mine 2,646 templates for real configurations
|
||||
|
||||
**Impact Assessment**:
|
||||
|
||||
| Metric | Original Plan | Template Mining |
|
||||
|--------|--------------|-----------------|
|
||||
| Configuration examples | 100 (estimated) | 2,646+ actual |
|
||||
| Node coverage | ~20% | 103% |
|
||||
| Maintenance | High (manual) | Low (auto-fetch) |
|
||||
| Accuracy | Curated | Production-tested |
|
||||
| Context richness | Limited | Rich metadata |
|
||||
| Development time | 2-3 weeks | 1 week |
|
||||
|
||||
**Recommendation**: PIVOT to template mining approach for P0-R3
|
||||
|
||||
## Implementation Estimate
|
||||
|
||||
### Week 1: Core Infrastructure
|
||||
- Day 1-2: Create `TemplateConfigExtractor` service
|
||||
- Day 3: Implement caching layer
|
||||
- Day 4-5: Testing and optimization
|
||||
|
||||
### Week 2: Integration
|
||||
- Day 1-2: Enhance `get_node_essentials` with examples
|
||||
- Day 3: Update tool documentation
|
||||
- Day 4-5: Integration testing
|
||||
|
||||
**Total**: 2 weeks vs 3 weeks for original plan
|
||||
|
||||
## Validation Tests
|
||||
|
||||
```typescript
|
||||
// Test: Extract HTTP Request configs
|
||||
const configs = await extractor.extractConfigsForNode(
|
||||
'n8n-nodes-base.httpRequest',
|
||||
{ complexity: 'simple', limit: 5 }
|
||||
);
|
||||
|
||||
// Expected: 5 configs from top templates
|
||||
// - Simple URL fetch
|
||||
// - With authentication
|
||||
// - With custom headers
|
||||
// - With expressions
|
||||
// - With error handling
|
||||
|
||||
// Test: Extract webhook configs
|
||||
const webhookConfigs = await extractor.extractConfigsForNode(
|
||||
'n8n-nodes-base.webhook',
|
||||
{ limit: 3 }
|
||||
);
|
||||
|
||||
// Expected: 3 configs showing different patterns
|
||||
// - Basic POST webhook
|
||||
// - With response node
|
||||
// - With binary data handling
|
||||
```
|
||||
|
||||
## Risks and Mitigation
|
||||
|
||||
### Risk 1: Template Quality Varies
|
||||
- **Mitigation**: Filter by views (popularity) and metadata complexity rating
|
||||
- Only use templates with >1000 views for examples
|
||||
|
||||
### Risk 2: Decompression Performance
|
||||
- **Mitigation**: Cache decompressed popular templates
|
||||
- Implement lazy loading (decompress on demand)
|
||||
|
||||
### Risk 3: Template-Specific Context
|
||||
- **Mitigation**: Extract only node configuration, strip workflow-specific context
|
||||
- Provide source attribution for context
|
||||
|
||||
### Risk 4: Breaking Changes in Template Structure
|
||||
- **Mitigation**: Robust error handling in decompression
|
||||
- Fallback to cached configs if template fetch fails
|
||||
|
||||
## Success Metrics
|
||||
|
||||
**Before** (get_node_for_task):
|
||||
- 392 calls, 72% success rate
|
||||
- 28% failure rate
|
||||
- 31 task templates
|
||||
- 5.9% node coverage
|
||||
|
||||
**Target** (template-based):
|
||||
- 90%+ success rate for configuration discovery
|
||||
- 100%+ node coverage
|
||||
- 2,646+ real-world examples
|
||||
- Self-updating from n8n.io
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. ✅ Complete template database analysis
|
||||
2. ⏳ Create `TemplateConfigExtractor` service
|
||||
3. ⏳ Implement caching layer
|
||||
4. ⏳ Enhance `get_node_essentials` with examples
|
||||
5. ⏳ Update P0 implementation plan
|
||||
6. ⏳ Begin implementation
|
||||
|
||||
## Conclusion
|
||||
|
||||
The template database provides a vastly superior alternative to hardcoded task templates:
|
||||
|
||||
- **2,646 templates** vs 31 tasks (85x more examples)
|
||||
- **103% node coverage** vs 5.9% coverage (17x improvement)
|
||||
- **Real-world configurations** vs synthetic examples
|
||||
- **Self-updating** vs manual maintenance
|
||||
- **Rich metadata** for semantic matching
|
||||
|
||||
**Recommendation**: Pivot P0-R3 from "expand task library" to "mine template configurations"
|
||||
1306
docs/local/integration-testing-plan.md
Normal file
1306
docs/local/integration-testing-plan.md
Normal file
File diff suppressed because it is too large
Load Diff
260
docs/local/integration-tests-phase1-summary.md
Normal file
260
docs/local/integration-tests-phase1-summary.md
Normal file
@@ -0,0 +1,260 @@
|
||||
# Integration Tests Phase 1: Foundation - COMPLETED
|
||||
|
||||
## Overview
|
||||
Phase 1 establishes the foundation for n8n API integration testing. All core utilities, fixtures, and infrastructure are now in place.
|
||||
|
||||
## Branch
|
||||
`feat/integration-tests-foundation`
|
||||
|
||||
## Completed Tasks
|
||||
|
||||
### 1. Environment Configuration
|
||||
- ✅ Updated `.env.example` with integration testing configuration
|
||||
- ✅ Added environment variables for:
|
||||
- n8n API credentials (`N8N_API_URL`, `N8N_API_KEY`)
|
||||
- Webhook workflow IDs (4 workflows for GET/POST/PUT/DELETE)
|
||||
- Test configuration (cleanup, tags, naming)
|
||||
- ✅ Included detailed setup instructions in comments
|
||||
|
||||
### 2. Directory Structure
|
||||
```
|
||||
tests/integration/n8n-api/
|
||||
├── workflows/ (empty - for Phase 2+)
|
||||
├── executions/ (empty - for Phase 2+)
|
||||
├── system/ (empty - for Phase 2+)
|
||||
├── scripts/
|
||||
│ └── cleanup-orphans.ts
|
||||
└── utils/
|
||||
├── credentials.ts
|
||||
├── n8n-client.ts
|
||||
├── test-context.ts
|
||||
├── cleanup-helpers.ts
|
||||
├── fixtures.ts
|
||||
├── factories.ts
|
||||
└── webhook-workflows.ts
|
||||
```
|
||||
|
||||
### 3. Core Utilities
|
||||
|
||||
#### `credentials.ts` (200 lines)
|
||||
- Environment-aware credential loading
|
||||
- Detects CI vs local environment automatically
|
||||
- Validation functions with helpful error messages
|
||||
- Non-throwing credential check functions
|
||||
|
||||
**Key Functions:**
|
||||
- `getN8nCredentials()` - Load credentials from .env or GitHub secrets
|
||||
- `validateCredentials()` - Ensure required credentials are present
|
||||
- `validateWebhookWorkflows()` - Check webhook workflow IDs with setup instructions
|
||||
- `hasCredentials()` - Non-throwing credential check
|
||||
- `hasWebhookWorkflows()` - Non-throwing webhook check
|
||||
|
||||
#### `n8n-client.ts` (45 lines)
|
||||
- Singleton n8n API client wrapper
|
||||
- Pre-configured with test credentials
|
||||
- Health check functionality
|
||||
|
||||
**Key Functions:**
|
||||
- `getTestN8nClient()` - Get/create configured API client
|
||||
- `resetTestN8nClient()` - Reset client instance
|
||||
- `isN8nApiAccessible()` - Check API connectivity
|
||||
|
||||
#### `test-context.ts` (120 lines)
|
||||
- Resource tracking for automatic cleanup
|
||||
- Test workflow naming utilities
|
||||
- Tag management
|
||||
|
||||
**Key Functions:**
|
||||
- `createTestContext()` - Create context for tracking resources
|
||||
- `TestContext.trackWorkflow()` - Track workflow for cleanup
|
||||
- `TestContext.trackExecution()` - Track execution for cleanup
|
||||
- `TestContext.cleanup()` - Delete all tracked resources
|
||||
- `createTestWorkflowName()` - Generate unique workflow names
|
||||
- `getTestTag()` - Get configured test tag
|
||||
|
||||
#### `cleanup-helpers.ts` (275 lines)
|
||||
- Multi-level cleanup strategies
|
||||
- Orphaned resource detection
|
||||
- Age-based execution cleanup
|
||||
- Tag-based workflow cleanup
|
||||
|
||||
**Key Functions:**
|
||||
- `cleanupOrphanedWorkflows()` - Find and delete test workflows
|
||||
- `cleanupOldExecutions()` - Delete executions older than X hours
|
||||
- `cleanupAllTestResources()` - Comprehensive cleanup
|
||||
- `cleanupWorkflowsByTag()` - Delete workflows by tag
|
||||
- `cleanupExecutionsByWorkflow()` - Delete workflow's executions
|
||||
|
||||
#### `fixtures.ts` (310 lines)
|
||||
- Pre-built workflow templates
|
||||
- All using FULL node type format (n8n-nodes-base.*)
|
||||
|
||||
**Available Fixtures:**
|
||||
- `SIMPLE_WEBHOOK_WORKFLOW` - Single webhook node
|
||||
- `SIMPLE_HTTP_WORKFLOW` - Webhook + HTTP Request
|
||||
- `MULTI_NODE_WORKFLOW` - Complex branching workflow
|
||||
- `ERROR_HANDLING_WORKFLOW` - Error output configuration
|
||||
- `AI_AGENT_WORKFLOW` - Langchain agent node
|
||||
- `EXPRESSION_WORKFLOW` - n8n expressions testing
|
||||
|
||||
**Helper Functions:**
|
||||
- `getFixture()` - Get fixture by name (with deep clone)
|
||||
- `createCustomWorkflow()` - Build custom workflow from nodes
|
||||
|
||||
#### `factories.ts` (315 lines)
|
||||
- Dynamic test data generation
|
||||
- Node builders with sensible defaults
|
||||
- Workflow composition helpers
|
||||
|
||||
**Node Factories:**
|
||||
- `createWebhookNode()` - Webhook node with customization
|
||||
- `createHttpRequestNode()` - HTTP Request node
|
||||
- `createSetNode()` - Set node with assignments
|
||||
- `createManualTriggerNode()` - Manual trigger node
|
||||
|
||||
**Connection Factories:**
|
||||
- `createConnection()` - Simple node connection
|
||||
- `createSequentialWorkflow()` - Auto-connected sequential nodes
|
||||
- `createParallelWorkflow()` - Trigger with parallel branches
|
||||
- `createErrorHandlingWorkflow()` - Workflow with error handling
|
||||
|
||||
**Utilities:**
|
||||
- `randomString()` - Generate random test data
|
||||
- `uniqueId()` - Unique IDs for testing
|
||||
- `createTestTags()` - Test workflow tags
|
||||
- `createWorkflowSettings()` - Common settings
|
||||
|
||||
#### `webhook-workflows.ts` (215 lines)
|
||||
- Webhook workflow configuration templates
|
||||
- Setup instructions generator
|
||||
- URL generation utilities
|
||||
|
||||
**Key Features:**
|
||||
- `WEBHOOK_WORKFLOW_CONFIGS` - Configurations for all 4 HTTP methods
|
||||
- `printSetupInstructions()` - Print detailed setup guide
|
||||
- `generateWebhookWorkflowJson()` - Generate workflow JSON
|
||||
- `exportAllWebhookWorkflows()` - Export all 4 configs
|
||||
- `getWebhookUrl()` - Get webhook URL for testing
|
||||
- `isValidWebhookWorkflow()` - Validate workflow structure
|
||||
|
||||
### 4. Scripts
|
||||
|
||||
#### `cleanup-orphans.ts` (40 lines)
|
||||
- Standalone cleanup script
|
||||
- Can be run manually or in CI
|
||||
- Comprehensive output logging
|
||||
|
||||
**Usage:**
|
||||
```bash
|
||||
npm run test:cleanup:orphans
|
||||
```
|
||||
|
||||
### 5. npm Scripts
|
||||
Added to `package.json`:
|
||||
```json
|
||||
{
|
||||
"test:integration:n8n": "vitest run tests/integration/n8n-api",
|
||||
"test:cleanup:orphans": "tsx tests/integration/n8n-api/scripts/cleanup-orphans.ts"
|
||||
}
|
||||
```
|
||||
|
||||
## Code Quality
|
||||
|
||||
### TypeScript
|
||||
- ✅ All code passes `npm run typecheck`
|
||||
- ✅ All code compiles with `npm run build`
|
||||
- ✅ No TypeScript errors
|
||||
- ✅ Proper type annotations throughout
|
||||
|
||||
### Error Handling
|
||||
- ✅ Comprehensive error messages
|
||||
- ✅ Helpful setup instructions in error messages
|
||||
- ✅ Non-throwing validation functions where appropriate
|
||||
- ✅ Graceful handling of missing credentials
|
||||
|
||||
### Documentation
|
||||
- ✅ All functions have JSDoc comments
|
||||
- ✅ Usage examples in comments
|
||||
- ✅ Clear parameter descriptions
|
||||
- ✅ Return type documentation
|
||||
|
||||
## Files Created
|
||||
|
||||
### Documentation
|
||||
1. `docs/local/integration-testing-plan.md` (550 lines)
|
||||
2. `docs/local/integration-tests-phase1-summary.md` (this file)
|
||||
|
||||
### Code
|
||||
1. `.env.example` - Updated with test configuration (32 new lines)
|
||||
2. `package.json` - Added 2 npm scripts
|
||||
3. `tests/integration/n8n-api/utils/credentials.ts` (200 lines)
|
||||
4. `tests/integration/n8n-api/utils/n8n-client.ts` (45 lines)
|
||||
5. `tests/integration/n8n-api/utils/test-context.ts` (120 lines)
|
||||
6. `tests/integration/n8n-api/utils/cleanup-helpers.ts` (275 lines)
|
||||
7. `tests/integration/n8n-api/utils/fixtures.ts` (310 lines)
|
||||
8. `tests/integration/n8n-api/utils/factories.ts` (315 lines)
|
||||
9. `tests/integration/n8n-api/utils/webhook-workflows.ts` (215 lines)
|
||||
10. `tests/integration/n8n-api/scripts/cleanup-orphans.ts` (40 lines)
|
||||
|
||||
**Total New Code:** ~1,520 lines of production-ready TypeScript
|
||||
|
||||
## Next Steps (Phase 2)
|
||||
|
||||
Phase 2 will implement the first actual integration tests:
|
||||
- Create workflow creation tests (10+ scenarios)
|
||||
- Test P0 bug fix (SHORT vs FULL node types)
|
||||
- Test workflow retrieval
|
||||
- Test workflow deletion
|
||||
|
||||
**Branch:** `feat/integration-tests-workflow-creation`
|
||||
|
||||
## Prerequisites for Running Tests
|
||||
|
||||
Before running integration tests, you need to:
|
||||
|
||||
1. **Set up n8n instance:**
|
||||
- Local: `npx n8n start`
|
||||
- Or use cloud/self-hosted n8n
|
||||
|
||||
2. **Configure credentials in `.env`:**
|
||||
```bash
|
||||
N8N_API_URL=http://localhost:5678
|
||||
N8N_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
3. **Create 4 webhook workflows manually:**
|
||||
- One for each HTTP method (GET, POST, PUT, DELETE)
|
||||
- Activate each workflow in n8n UI
|
||||
- Set workflow IDs in `.env`:
|
||||
```bash
|
||||
N8N_TEST_WEBHOOK_GET_ID=<workflow-id>
|
||||
N8N_TEST_WEBHOOK_POST_ID=<workflow-id>
|
||||
N8N_TEST_WEBHOOK_PUT_ID=<workflow-id>
|
||||
N8N_TEST_WEBHOOK_DELETE_ID=<workflow-id>
|
||||
```
|
||||
|
||||
See `docs/local/integration-testing-plan.md` for detailed setup instructions.
|
||||
|
||||
## Success Metrics
|
||||
|
||||
Phase 1 Success Criteria - ALL MET:
|
||||
- ✅ All utilities implemented and tested
|
||||
- ✅ TypeScript compiles without errors
|
||||
- ✅ Code follows project conventions
|
||||
- ✅ Comprehensive documentation
|
||||
- ✅ Environment configuration complete
|
||||
- ✅ Cleanup infrastructure in place
|
||||
- ✅ Ready for Phase 2 test implementation
|
||||
|
||||
## Lessons Learned
|
||||
|
||||
1. **N8nApiClient Constructor:** Uses config object, not separate parameters
|
||||
2. **Cursor Handling:** n8n API returns `null` for no more pages, need to convert to `undefined`
|
||||
3. **Workflow ID Validation:** Some workflows might have undefined IDs, need null checks
|
||||
4. **Connection Types:** Error connections need explicit typing to avoid TypeScript errors
|
||||
5. **Webhook Activation:** Cannot be done via API, must be manual - hence pre-activated workflow requirement
|
||||
|
||||
## Time Invested
|
||||
|
||||
Phase 1 actual time: ~2 hours (estimated 2-3 days in plan)
|
||||
- Faster than expected due to clear architecture and reusable patterns
|
||||
@@ -1,712 +0,0 @@
|
||||
# MCP Tools Documentation for LLMs
|
||||
|
||||
This document provides comprehensive documentation for the most commonly used MCP tools in the n8n-mcp server. Each tool includes parameters, return formats, examples, and best practices.
|
||||
|
||||
## Table of Contents
|
||||
1. [search_nodes](#search_nodes)
|
||||
2. [get_node_essentials](#get_node_essentials)
|
||||
3. [list_nodes](#list_nodes)
|
||||
4. [validate_node_minimal](#validate_node_minimal)
|
||||
5. [validate_node_operation](#validate_node_operation)
|
||||
6. [get_node_for_task](#get_node_for_task)
|
||||
7. [n8n_create_workflow](#n8n_create_workflow)
|
||||
8. [n8n_update_partial_workflow](#n8n_update_partial_workflow)
|
||||
|
||||
---
|
||||
|
||||
## search_nodes
|
||||
|
||||
**Brief Description**: Search for n8n nodes by keywords in names and descriptions.
|
||||
|
||||
### Parameters
|
||||
- `query` (string, required): Search term - single word recommended for best results
|
||||
- `limit` (number, optional): Maximum results to return (default: 20)
|
||||
|
||||
### Return Format
|
||||
```json
|
||||
{
|
||||
"nodes": [
|
||||
{
|
||||
"nodeType": "nodes-base.slack",
|
||||
"displayName": "Slack",
|
||||
"description": "Send messages to Slack channels"
|
||||
}
|
||||
],
|
||||
"totalFound": 5
|
||||
}
|
||||
```
|
||||
|
||||
### Common Use Cases
|
||||
1. **Finding integration nodes**: `search_nodes("slack")` to find Slack integration
|
||||
2. **Finding HTTP nodes**: `search_nodes("http")` for HTTP/webhook nodes
|
||||
3. **Finding database nodes**: `search_nodes("postgres")` for PostgreSQL nodes
|
||||
|
||||
### Examples
|
||||
```json
|
||||
// Search for Slack-related nodes
|
||||
{
|
||||
"query": "slack",
|
||||
"limit": 10
|
||||
}
|
||||
|
||||
// Search for webhook nodes
|
||||
{
|
||||
"query": "webhook",
|
||||
"limit": 20
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Notes
|
||||
- Fast operation (cached results)
|
||||
- Single-word queries are more precise
|
||||
- Returns results with OR logic (any word matches)
|
||||
|
||||
### Best Practices
|
||||
- Use single words for precise results: "slack" not "send slack message"
|
||||
- Try shorter terms if no results: "sheet" instead of "spreadsheet"
|
||||
- Search is case-insensitive
|
||||
- Common searches: "http", "webhook", "email", "database", "slack"
|
||||
|
||||
### Common Pitfalls
|
||||
- Multi-word searches return too many results (OR logic)
|
||||
- Searching for exact phrases doesn't work
|
||||
- Node types aren't searchable here (use exact type with get_node_info)
|
||||
|
||||
### Related Tools
|
||||
- `list_nodes` - Browse nodes by category
|
||||
- `get_node_essentials` - Get node configuration after finding it
|
||||
- `list_ai_tools` - Find AI-capable nodes specifically
|
||||
|
||||
---
|
||||
|
||||
## get_node_essentials
|
||||
|
||||
**Brief Description**: Get only the 10-20 most important properties for a node with working examples.
|
||||
|
||||
### Parameters
|
||||
- `nodeType` (string, required): Full node type with prefix (e.g., "nodes-base.httpRequest")
|
||||
|
||||
### Return Format
|
||||
```json
|
||||
{
|
||||
"nodeType": "nodes-base.httpRequest",
|
||||
"displayName": "HTTP Request",
|
||||
"essentialProperties": [
|
||||
{
|
||||
"name": "method",
|
||||
"type": "options",
|
||||
"default": "GET",
|
||||
"options": ["GET", "POST", "PUT", "DELETE"],
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"name": "url",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"placeholder": "https://api.example.com/endpoint"
|
||||
}
|
||||
],
|
||||
"examples": [
|
||||
{
|
||||
"name": "Simple GET Request",
|
||||
"configuration": {
|
||||
"method": "GET",
|
||||
"url": "https://api.example.com/users"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tips": [
|
||||
"Use expressions like {{$json.url}} to make URLs dynamic",
|
||||
"Enable 'Split Into Items' for array responses"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Common Use Cases
|
||||
1. **Quick node configuration**: Get just what you need without parsing 100KB+ of data
|
||||
2. **Learning node basics**: Understand essential properties with examples
|
||||
3. **Building workflows efficiently**: 95% smaller responses than get_node_info
|
||||
|
||||
### Examples
|
||||
```json
|
||||
// Get essentials for HTTP Request node
|
||||
{
|
||||
"nodeType": "nodes-base.httpRequest"
|
||||
}
|
||||
|
||||
// Get essentials for Slack node
|
||||
{
|
||||
"nodeType": "nodes-base.slack"
|
||||
}
|
||||
|
||||
// Get essentials for OpenAI node
|
||||
{
|
||||
"nodeType": "nodes-langchain.openAi"
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Notes
|
||||
- Very fast (<5KB responses vs 100KB+ for full info)
|
||||
- Curated for 20+ common nodes
|
||||
- Automatic fallback for unconfigured nodes
|
||||
|
||||
### Best Practices
|
||||
- Always use this before get_node_info
|
||||
- Node type must include prefix: "nodes-base.slack" not "slack"
|
||||
- Check examples section for working configurations
|
||||
- Use tips section for common patterns
|
||||
|
||||
### Common Pitfalls
|
||||
- Forgetting the prefix in node type
|
||||
- Using wrong package name (n8n-nodes-base vs @n8n/n8n-nodes-langchain)
|
||||
- Case sensitivity in node types
|
||||
|
||||
### Related Tools
|
||||
- `get_node_info` - Full schema when essentials aren't enough
|
||||
- `search_node_properties` - Find specific properties
|
||||
- `get_node_for_task` - Pre-configured for common tasks
|
||||
|
||||
---
|
||||
|
||||
## list_nodes
|
||||
|
||||
**Brief Description**: List available n8n nodes with optional filtering by package, category, or capabilities.
|
||||
|
||||
### Parameters
|
||||
- `package` (string, optional): Filter by exact package name
|
||||
- `category` (string, optional): Filter by category (trigger, transform, output, input)
|
||||
- `developmentStyle` (string, optional): Filter by implementation style
|
||||
- `isAITool` (boolean, optional): Filter for AI-capable nodes
|
||||
- `limit` (number, optional): Maximum results (default: 50, max: 500)
|
||||
|
||||
### Return Format
|
||||
```json
|
||||
{
|
||||
"nodes": [
|
||||
{
|
||||
"nodeType": "nodes-base.webhook",
|
||||
"displayName": "Webhook",
|
||||
"description": "Receive HTTP requests",
|
||||
"categories": ["trigger"],
|
||||
"version": 2
|
||||
}
|
||||
],
|
||||
"total": 104,
|
||||
"hasMore": false
|
||||
}
|
||||
```
|
||||
|
||||
### Common Use Cases
|
||||
1. **Browse all triggers**: `list_nodes({category: "trigger", limit: 200})`
|
||||
2. **List all nodes**: `list_nodes({limit: 500})`
|
||||
3. **Find AI nodes**: `list_nodes({isAITool: true})`
|
||||
4. **Browse core nodes**: `list_nodes({package: "n8n-nodes-base"})`
|
||||
|
||||
### Examples
|
||||
```json
|
||||
// List all trigger nodes
|
||||
{
|
||||
"category": "trigger",
|
||||
"limit": 200
|
||||
}
|
||||
|
||||
// List all AI-capable nodes
|
||||
{
|
||||
"isAITool": true,
|
||||
"limit": 100
|
||||
}
|
||||
|
||||
// List nodes from core package
|
||||
{
|
||||
"package": "n8n-nodes-base",
|
||||
"limit": 200
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Notes
|
||||
- Fast operation (cached results)
|
||||
- Default limit of 50 may miss nodes - use 200+
|
||||
- Returns metadata only, not full schemas
|
||||
|
||||
### Best Practices
|
||||
- Always set limit to 200+ for complete results
|
||||
- Use exact package names: "n8n-nodes-base" not "@n8n/n8n-nodes-base"
|
||||
- Categories are singular: "trigger" not "triggers"
|
||||
- Common categories: trigger (104), transform, output, input
|
||||
|
||||
### Common Pitfalls
|
||||
- Default limit (50) misses many nodes
|
||||
- Using wrong package name format
|
||||
- Multiple filters may return empty results
|
||||
|
||||
### Related Tools
|
||||
- `search_nodes` - Search by keywords
|
||||
- `list_ai_tools` - Specifically for AI nodes
|
||||
- `get_database_statistics` - Overview of all nodes
|
||||
|
||||
---
|
||||
|
||||
## validate_node_minimal
|
||||
|
||||
**Brief Description**: Quick validation checking only for missing required fields.
|
||||
|
||||
### Parameters
|
||||
- `nodeType` (string, required): Node type to validate (e.g., "nodes-base.slack")
|
||||
- `config` (object, required): Node configuration to check
|
||||
|
||||
### Return Format
|
||||
```json
|
||||
{
|
||||
"valid": false,
|
||||
"missingRequired": ["channel", "messageType"],
|
||||
"message": "Missing 2 required fields"
|
||||
}
|
||||
```
|
||||
|
||||
### Common Use Cases
|
||||
1. **Quick validation**: Check if all required fields are present
|
||||
2. **Pre-flight check**: Validate before creating workflow
|
||||
3. **Minimal overhead**: Fastest validation option
|
||||
|
||||
### Examples
|
||||
```json
|
||||
// Validate Slack message configuration
|
||||
{
|
||||
"nodeType": "nodes-base.slack",
|
||||
"config": {
|
||||
"resource": "message",
|
||||
"operation": "send",
|
||||
"text": "Hello World"
|
||||
// Missing: channel
|
||||
}
|
||||
}
|
||||
|
||||
// Validate HTTP Request
|
||||
{
|
||||
"nodeType": "nodes-base.httpRequest",
|
||||
"config": {
|
||||
"method": "POST"
|
||||
// Missing: url
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Notes
|
||||
- Fastest validation option
|
||||
- No schema loading overhead
|
||||
- Returns only missing fields
|
||||
|
||||
### Best Practices
|
||||
- Use for quick checks during workflow building
|
||||
- Follow up with validate_node_operation for complex nodes
|
||||
- Check operation-specific requirements
|
||||
|
||||
### Common Pitfalls
|
||||
- Doesn't validate field values or types
|
||||
- Doesn't check operation-specific requirements
|
||||
- Won't catch configuration errors beyond missing fields
|
||||
|
||||
### Related Tools
|
||||
- `validate_node_operation` - Comprehensive validation
|
||||
- `validate_workflow` - Full workflow validation
|
||||
|
||||
---
|
||||
|
||||
## validate_node_operation
|
||||
|
||||
**Brief Description**: Comprehensive node configuration validation with operation awareness and helpful error messages.
|
||||
|
||||
### Parameters
|
||||
- `nodeType` (string, required): Node type to validate
|
||||
- `config` (object, required): Complete node configuration including operation fields
|
||||
- `profile` (string, optional): Validation profile (minimal, runtime, ai-friendly, strict)
|
||||
|
||||
### Return Format
|
||||
```json
|
||||
{
|
||||
"valid": false,
|
||||
"errors": [
|
||||
{
|
||||
"field": "channel",
|
||||
"message": "Channel is required to send Slack message",
|
||||
"suggestion": "Add channel: '#general' or '@username'"
|
||||
}
|
||||
],
|
||||
"warnings": [
|
||||
{
|
||||
"field": "unfurl_links",
|
||||
"message": "Consider setting unfurl_links: false for better performance"
|
||||
}
|
||||
],
|
||||
"examples": {
|
||||
"minimal": {
|
||||
"resource": "message",
|
||||
"operation": "send",
|
||||
"channel": "#general",
|
||||
"text": "Hello World"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Common Use Cases
|
||||
1. **Complex node validation**: Slack, Google Sheets, databases
|
||||
2. **Operation-specific checks**: Different rules per operation
|
||||
3. **Getting fix suggestions**: Helpful error messages with solutions
|
||||
|
||||
### Examples
|
||||
```json
|
||||
// Validate Slack configuration
|
||||
{
|
||||
"nodeType": "nodes-base.slack",
|
||||
"config": {
|
||||
"resource": "message",
|
||||
"operation": "send",
|
||||
"text": "Hello team!"
|
||||
},
|
||||
"profile": "ai-friendly"
|
||||
}
|
||||
|
||||
// Validate Google Sheets operation
|
||||
{
|
||||
"nodeType": "nodes-base.googleSheets",
|
||||
"config": {
|
||||
"operation": "append",
|
||||
"sheetId": "1234567890",
|
||||
"range": "Sheet1!A:Z"
|
||||
},
|
||||
"profile": "runtime"
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Notes
|
||||
- Slower than minimal validation
|
||||
- Loads full node schema
|
||||
- Operation-aware validation rules
|
||||
|
||||
### Best Practices
|
||||
- Use "ai-friendly" profile for balanced validation
|
||||
- Check examples in response for working configurations
|
||||
- Follow suggestions to fix errors
|
||||
- Essential for complex nodes (Slack, databases, APIs)
|
||||
|
||||
### Common Pitfalls
|
||||
- Forgetting operation fields (resource, operation, action)
|
||||
- Using wrong profile (too strict or too lenient)
|
||||
- Ignoring warnings that could cause runtime issues
|
||||
|
||||
### Related Tools
|
||||
- `validate_node_minimal` - Quick required field check
|
||||
- `get_property_dependencies` - Understand field relationships
|
||||
- `validate_workflow` - Validate entire workflow
|
||||
|
||||
---
|
||||
|
||||
## get_node_for_task
|
||||
|
||||
**Brief Description**: Get pre-configured node settings for common automation tasks.
|
||||
|
||||
### Parameters
|
||||
- `task` (string, required): Task identifier (e.g., "post_json_request", "receive_webhook")
|
||||
|
||||
### Return Format
|
||||
```json
|
||||
{
|
||||
"task": "post_json_request",
|
||||
"nodeType": "nodes-base.httpRequest",
|
||||
"displayName": "HTTP Request",
|
||||
"configuration": {
|
||||
"method": "POST",
|
||||
"url": "={{ $json.api_endpoint }}",
|
||||
"responseFormat": "json",
|
||||
"options": {
|
||||
"bodyContentType": "json"
|
||||
},
|
||||
"bodyParametersJson": "={{ JSON.stringify($json) }}"
|
||||
},
|
||||
"userMustProvide": [
|
||||
"url - The API endpoint URL",
|
||||
"bodyParametersJson - The JSON data to send"
|
||||
],
|
||||
"tips": [
|
||||
"Use expressions to make values dynamic",
|
||||
"Enable 'Split Into Items' for batch processing"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Common Use Cases
|
||||
1. **Quick task setup**: Configure nodes for specific tasks instantly
|
||||
2. **Learning patterns**: See how to configure nodes properly
|
||||
3. **Common workflows**: Standard patterns like webhooks, API calls, database queries
|
||||
|
||||
### Examples
|
||||
```json
|
||||
// Get configuration for JSON POST request
|
||||
{
|
||||
"task": "post_json_request"
|
||||
}
|
||||
|
||||
// Get webhook receiver configuration
|
||||
{
|
||||
"task": "receive_webhook"
|
||||
}
|
||||
|
||||
// Get AI chat configuration
|
||||
{
|
||||
"task": "chat_with_ai"
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Notes
|
||||
- Instant response (pre-configured templates)
|
||||
- No database lookups required
|
||||
- Includes working examples
|
||||
|
||||
### Best Practices
|
||||
- Use list_tasks first to see available options
|
||||
- Check userMustProvide section
|
||||
- Follow tips for best results
|
||||
- Common tasks: API calls, webhooks, database queries, AI chat
|
||||
|
||||
### Common Pitfalls
|
||||
- Not all tasks available (use list_tasks)
|
||||
- Configuration needs customization
|
||||
- Some fields still need user input
|
||||
|
||||
### Related Tools
|
||||
- `list_tasks` - See all available tasks
|
||||
- `get_node_essentials` - Alternative approach
|
||||
- `search_templates` - Find complete workflow templates
|
||||
|
||||
---
|
||||
|
||||
## n8n_create_workflow
|
||||
|
||||
**Brief Description**: Create a new workflow in n8n with nodes and connections.
|
||||
|
||||
### Parameters
|
||||
- `name` (string, required): Workflow name
|
||||
- `nodes` (array, required): Array of node definitions
|
||||
- `connections` (object, required): Node connections mapping
|
||||
- `settings` (object, optional): Workflow settings
|
||||
|
||||
### Return Format
|
||||
```json
|
||||
{
|
||||
"id": "workflow-uuid",
|
||||
"name": "My Workflow",
|
||||
"active": false,
|
||||
"createdAt": "2024-01-15T10:30:00Z",
|
||||
"updatedAt": "2024-01-15T10:30:00Z",
|
||||
"nodes": [...],
|
||||
"connections": {...}
|
||||
}
|
||||
```
|
||||
|
||||
### Common Use Cases
|
||||
1. **Automated workflow creation**: Build workflows programmatically
|
||||
2. **Template deployment**: Deploy pre-built workflow patterns
|
||||
3. **Multi-workflow systems**: Create interconnected workflows
|
||||
|
||||
### Examples
|
||||
```json
|
||||
// Create simple webhook → HTTP request workflow
|
||||
{
|
||||
"name": "Webhook to API",
|
||||
"nodes": [
|
||||
{
|
||||
"id": "webhook-1",
|
||||
"name": "Webhook",
|
||||
"type": "n8n-nodes-base.webhook",
|
||||
"typeVersion": 2,
|
||||
"position": [250, 300],
|
||||
"parameters": {
|
||||
"path": "/my-webhook",
|
||||
"httpMethod": "POST"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "http-1",
|
||||
"name": "HTTP Request",
|
||||
"type": "n8n-nodes-base.httpRequest",
|
||||
"typeVersion": 4.2,
|
||||
"position": [450, 300],
|
||||
"parameters": {
|
||||
"method": "POST",
|
||||
"url": "https://api.example.com/process",
|
||||
"responseFormat": "json"
|
||||
}
|
||||
}
|
||||
],
|
||||
"connections": {
|
||||
"Webhook": {
|
||||
"main": [[{"node": "HTTP Request", "type": "main", "index": 0}]]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Notes
|
||||
- API call to n8n instance required
|
||||
- Workflow created in inactive state
|
||||
- Must be manually activated in UI
|
||||
|
||||
### Best Practices
|
||||
- Always include typeVersion for nodes
|
||||
- Use node names (not IDs) in connections
|
||||
- Position nodes logically ([x, y] coordinates)
|
||||
- Test with validate_workflow first
|
||||
- Start simple, add complexity gradually
|
||||
|
||||
### Common Pitfalls
|
||||
- Missing typeVersion causes errors
|
||||
- Using node IDs instead of names in connections
|
||||
- Forgetting required node properties
|
||||
- Creating cycles in connections
|
||||
- Workflow can't be activated via API
|
||||
|
||||
### Related Tools
|
||||
- `validate_workflow` - Validate before creating
|
||||
- `n8n_update_partial_workflow` - Modify existing workflows
|
||||
- `n8n_trigger_webhook_workflow` - Execute workflows
|
||||
|
||||
---
|
||||
|
||||
## n8n_update_partial_workflow
|
||||
|
||||
**Brief Description**: Update workflows using diff operations for precise, incremental changes without sending the entire workflow.
|
||||
|
||||
### Parameters
|
||||
- `id` (string, required): Workflow ID to update
|
||||
- `operations` (array, required): Array of diff operations (max 5)
|
||||
- `validateOnly` (boolean, optional): Test without applying changes
|
||||
|
||||
### Return Format
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"workflow": {
|
||||
"id": "workflow-uuid",
|
||||
"name": "Updated Workflow",
|
||||
"nodes": [...],
|
||||
"connections": {...}
|
||||
},
|
||||
"appliedOperations": 3
|
||||
}
|
||||
```
|
||||
|
||||
### Common Use Cases
|
||||
1. **Add nodes to existing workflows**: Insert new functionality
|
||||
2. **Update node configurations**: Change parameters without full replacement
|
||||
3. **Manage connections**: Add/remove node connections
|
||||
4. **Quick edits**: Rename, enable/disable nodes, update settings
|
||||
|
||||
### Examples
|
||||
```json
|
||||
// Add a new node and connect it
|
||||
{
|
||||
"id": "workflow-123",
|
||||
"operations": [
|
||||
{
|
||||
"type": "addNode",
|
||||
"node": {
|
||||
"id": "set-1",
|
||||
"name": "Set Data",
|
||||
"type": "n8n-nodes-base.set",
|
||||
"typeVersion": 3,
|
||||
"position": [600, 300],
|
||||
"parameters": {
|
||||
"values": {
|
||||
"string": [{
|
||||
"name": "status",
|
||||
"value": "processed"
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "HTTP Request",
|
||||
"target": "Set Data"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
// Update multiple properties
|
||||
{
|
||||
"id": "workflow-123",
|
||||
"operations": [
|
||||
{
|
||||
"type": "updateName",
|
||||
"name": "Production Workflow v2"
|
||||
},
|
||||
{
|
||||
"type": "updateNode",
|
||||
"nodeName": "Webhook",
|
||||
"changes": {
|
||||
"parameters.path": "/v2/webhook"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "addTag",
|
||||
"tag": "production"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Notes
|
||||
- 80-90% token savings vs full updates
|
||||
- Maximum 5 operations per request
|
||||
- Two-pass processing handles dependencies
|
||||
- Transactional: all or nothing
|
||||
|
||||
### Best Practices
|
||||
- Use validateOnly: true to test first
|
||||
- Keep operations under 5 for reliability
|
||||
- Operations can be in any order (v2.7.0+)
|
||||
- Use node names, not IDs in operations
|
||||
- For updateNode, use dot notation for nested paths
|
||||
|
||||
### Common Pitfalls
|
||||
- Exceeding 5 operations limit
|
||||
- Using node IDs instead of names
|
||||
- Forgetting required node properties in addNode
|
||||
- Not testing with validateOnly first
|
||||
|
||||
### Related Tools
|
||||
- `n8n_update_full_workflow` - Complete workflow replacement
|
||||
- `n8n_get_workflow` - Fetch current workflow state
|
||||
- `validate_workflow` - Validate changes before applying
|
||||
|
||||
---
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Workflow Building Process
|
||||
1. **Discovery**: `search_nodes` → `list_nodes`
|
||||
2. **Configuration**: `get_node_essentials` → `get_node_for_task`
|
||||
3. **Validation**: `validate_node_minimal` → `validate_node_operation`
|
||||
4. **Creation**: `validate_workflow` → `n8n_create_workflow`
|
||||
5. **Updates**: `n8n_update_partial_workflow`
|
||||
|
||||
### Performance Tips
|
||||
- Use `get_node_essentials` instead of `get_node_info` (95% smaller)
|
||||
- Set high limits on `list_nodes` (200+)
|
||||
- Use single words in `search_nodes`
|
||||
- Validate incrementally while building
|
||||
|
||||
### Common Node Types
|
||||
- **Triggers**: webhook, schedule, emailReadImap, slackTrigger
|
||||
- **Core**: httpRequest, code, set, if, merge, splitInBatches
|
||||
- **Integrations**: slack, gmail, googleSheets, postgres, mongodb
|
||||
- **AI**: agent, openAi, chainLlm, documentLoader
|
||||
|
||||
### Error Prevention
|
||||
- Always include node type prefixes: "nodes-base.slack"
|
||||
- Use node names (not IDs) in connections
|
||||
- Include typeVersion in all nodes
|
||||
- Test with validateOnly before applying changes
|
||||
- Check userMustProvide sections in templates
|
||||
@@ -1,514 +0,0 @@
|
||||
# n8n MCP Client Tool Integration - Implementation Plan (Simplified)
|
||||
|
||||
## Overview
|
||||
|
||||
This document provides a **simplified** implementation plan for making n8n-mcp compatible with n8n's MCP Client Tool (v1.1). Based on expert review, we're taking a minimal approach that extends the existing single-session server rather than creating new architecture.
|
||||
|
||||
## Key Design Principles
|
||||
|
||||
1. **Minimal Changes**: Extend existing single-session server with n8n compatibility mode
|
||||
2. **No Overengineering**: No complex session management or multi-session architecture
|
||||
3. **Docker-Native**: Separate Docker image for n8n deployment
|
||||
4. **Remote Deployment**: Designed to run alongside n8n in production
|
||||
5. **Backward Compatible**: Existing functionality remains unchanged
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Docker and Docker Compose
|
||||
- n8n version 1.104.2 or higher (with MCP Client Tool v1.1)
|
||||
- Basic understanding of Docker networking
|
||||
|
||||
## Implementation Approach
|
||||
|
||||
Instead of creating new multi-session architecture, we'll extend the existing single-session server with an n8n compatibility mode. This approach was recommended by all three expert reviewers as simpler and more maintainable.
|
||||
|
||||
## Architecture Changes
|
||||
|
||||
```
|
||||
src/
|
||||
├── http-server-single-session.ts # MODIFY: Add n8n mode flag
|
||||
└── mcp/
|
||||
└── server.ts # NO CHANGES NEEDED
|
||||
|
||||
Docker/
|
||||
├── Dockerfile.n8n # NEW: n8n-specific image
|
||||
├── docker-compose.n8n.yml # NEW: Simplified stack
|
||||
└── .github/workflows/
|
||||
└── docker-build-n8n.yml # NEW: Build workflow
|
||||
```
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
### Step 1: Modify Existing Single-Session Server
|
||||
|
||||
#### 1.1 Update `src/http-server-single-session.ts`
|
||||
|
||||
Add n8n compatibility mode to the existing server with minimal changes:
|
||||
|
||||
```typescript
|
||||
// Add these constants at the top (after imports)
|
||||
const PROTOCOL_VERSION = "2024-11-05";
|
||||
const N8N_MODE = process.env.N8N_MODE === 'true';
|
||||
|
||||
// In the constructor or start method, add logging
|
||||
if (N8N_MODE) {
|
||||
logger.info('Running in n8n compatibility mode');
|
||||
}
|
||||
|
||||
// In setupRoutes method, add the protocol version endpoint
|
||||
if (N8N_MODE) {
|
||||
app.get('/mcp', (req, res) => {
|
||||
res.json({
|
||||
protocolVersion: PROTOCOL_VERSION,
|
||||
serverInfo: {
|
||||
name: "n8n-mcp",
|
||||
version: PROJECT_VERSION,
|
||||
capabilities: {
|
||||
tools: true,
|
||||
resources: false,
|
||||
prompts: false,
|
||||
},
|
||||
},
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// In handleMCPRequest method, add session header
|
||||
if (N8N_MODE && this.session) {
|
||||
res.setHeader('Mcp-Session-Id', this.session.sessionId);
|
||||
}
|
||||
|
||||
// Update error handling to use JSON-RPC format
|
||||
catch (error) {
|
||||
logger.error('MCP request error:', error);
|
||||
|
||||
if (N8N_MODE) {
|
||||
res.status(500).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32603,
|
||||
message: 'Internal error',
|
||||
data: error instanceof Error ? error.message : 'Unknown error',
|
||||
},
|
||||
id: null,
|
||||
});
|
||||
} else {
|
||||
// Keep existing error handling for backward compatibility
|
||||
res.status(500).json({
|
||||
error: 'Internal server error',
|
||||
details: error instanceof Error ? error.message : 'Unknown error'
|
||||
});
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
That's it! No new files, no complex session management. Just a few lines of code.
|
||||
|
||||
### Step 2: Update Package Scripts
|
||||
|
||||
#### 2.1 Update `package.json`
|
||||
|
||||
Add a simple script for n8n mode:
|
||||
|
||||
```json
|
||||
{
|
||||
"scripts": {
|
||||
"start:n8n": "N8N_MODE=true MCP_MODE=http node dist/mcp/index.js"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Step 3: Create Docker Infrastructure for n8n
|
||||
|
||||
#### 3.1 Create `Dockerfile.n8n`
|
||||
|
||||
```dockerfile
|
||||
# Dockerfile.n8n - Optimized for n8n integration
|
||||
FROM node:22-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache python3 make g++
|
||||
|
||||
# Copy package files
|
||||
COPY package*.json tsconfig*.json ./
|
||||
|
||||
# Install ALL dependencies
|
||||
RUN npm ci --no-audit --no-fund
|
||||
|
||||
# Copy source and build
|
||||
COPY src ./src
|
||||
RUN npm run build && npm run rebuild
|
||||
|
||||
# Runtime stage
|
||||
FROM node:22-alpine
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache curl dumb-init
|
||||
|
||||
# Create non-root user
|
||||
RUN addgroup -g 1001 -S nodejs && adduser -S nodejs -u 1001
|
||||
|
||||
# Copy application from builder
|
||||
COPY --from=builder --chown=nodejs:nodejs /app/dist ./dist
|
||||
COPY --from=builder --chown=nodejs:nodejs /app/data ./data
|
||||
COPY --from=builder --chown=nodejs:nodejs /app/node_modules ./node_modules
|
||||
COPY --chown=nodejs:nodejs package.json ./
|
||||
|
||||
USER nodejs
|
||||
|
||||
EXPOSE 3001
|
||||
|
||||
HEALTHCHECK CMD curl -f http://localhost:3001/health || exit 1
|
||||
|
||||
ENTRYPOINT ["dumb-init", "--"]
|
||||
CMD ["node", "dist/mcp/index.js"]
|
||||
```
|
||||
|
||||
#### 3.2 Create `docker-compose.n8n.yml`
|
||||
|
||||
```yaml
|
||||
# docker-compose.n8n.yml - Simple stack for n8n + n8n-mcp
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
n8n:
|
||||
image: n8nio/n8n:latest
|
||||
container_name: n8n
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "5678:5678"
|
||||
environment:
|
||||
- N8N_BASIC_AUTH_ACTIVE=${N8N_BASIC_AUTH_ACTIVE:-true}
|
||||
- N8N_BASIC_AUTH_USER=${N8N_USER:-admin}
|
||||
- N8N_BASIC_AUTH_PASSWORD=${N8N_PASSWORD:-changeme}
|
||||
- N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE=true
|
||||
volumes:
|
||||
- n8n_data:/home/node/.n8n
|
||||
networks:
|
||||
- n8n-net
|
||||
depends_on:
|
||||
n8n-mcp:
|
||||
condition: service_healthy
|
||||
|
||||
n8n-mcp:
|
||||
image: ghcr.io/${GITHUB_USER:-czlonkowski}/n8n-mcp-n8n:latest
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.n8n
|
||||
container_name: n8n-mcp
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- MCP_MODE=http
|
||||
- N8N_MODE=true
|
||||
- AUTH_TOKEN=${MCP_AUTH_TOKEN}
|
||||
- NODE_ENV=production
|
||||
- HTTP_PORT=3001
|
||||
networks:
|
||||
- n8n-net
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3001/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
networks:
|
||||
n8n-net:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
n8n_data:
|
||||
```
|
||||
|
||||
#### 3.3 Create `.env.n8n.example`
|
||||
|
||||
```bash
|
||||
# .env.n8n.example - Copy to .env and configure
|
||||
|
||||
# n8n Configuration
|
||||
N8N_USER=admin
|
||||
N8N_PASSWORD=changeme
|
||||
N8N_BASIC_AUTH_ACTIVE=true
|
||||
|
||||
# MCP Configuration
|
||||
# Generate with: openssl rand -base64 32
|
||||
MCP_AUTH_TOKEN=your-secure-token-minimum-32-characters
|
||||
|
||||
# GitHub username for image registry
|
||||
GITHUB_USER=czlonkowski
|
||||
```
|
||||
|
||||
### Step 4: Create GitHub Actions Workflow
|
||||
|
||||
#### 4.1 Create `.github/workflows/docker-build-n8n.yml`
|
||||
|
||||
```yaml
|
||||
name: Build n8n Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
tags: ['v*']
|
||||
paths:
|
||||
- 'src/**'
|
||||
- 'package*.json'
|
||||
- 'Dockerfile.n8n'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}-n8n
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
|
||||
- uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: docker/metadata-action@v5
|
||||
id: meta
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile.n8n
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
```
|
||||
|
||||
### Step 5: Testing
|
||||
|
||||
#### 5.1 Unit Tests for n8n Mode
|
||||
|
||||
Create `tests/unit/http-server-n8n-mode.test.ts`:
|
||||
|
||||
```typescript
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import request from 'supertest';
|
||||
|
||||
describe('n8n Mode', () => {
|
||||
it('should return protocol version on GET /mcp', async () => {
|
||||
process.env.N8N_MODE = 'true';
|
||||
const app = await createTestApp();
|
||||
|
||||
const response = await request(app)
|
||||
.get('/mcp')
|
||||
.expect(200);
|
||||
|
||||
expect(response.body.protocolVersion).toBe('2024-11-05');
|
||||
expect(response.body.serverInfo.capabilities.tools).toBe(true);
|
||||
});
|
||||
|
||||
it('should include session ID in response headers', async () => {
|
||||
process.env.N8N_MODE = 'true';
|
||||
const app = await createTestApp();
|
||||
|
||||
const response = await request(app)
|
||||
.post('/mcp')
|
||||
.set('Authorization', 'Bearer test-token')
|
||||
.send({ jsonrpc: '2.0', method: 'initialize', id: 1 });
|
||||
|
||||
expect(response.headers['mcp-session-id']).toBeDefined();
|
||||
});
|
||||
|
||||
it('should format errors as JSON-RPC', async () => {
|
||||
process.env.N8N_MODE = 'true';
|
||||
const app = await createTestApp();
|
||||
|
||||
const response = await request(app)
|
||||
.post('/mcp')
|
||||
.send({ invalid: 'request' })
|
||||
.expect(500);
|
||||
|
||||
expect(response.body.jsonrpc).toBe('2.0');
|
||||
expect(response.body.error.code).toBe(-32603);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
#### 5.2 Quick Deployment Script
|
||||
|
||||
Create `deploy/quick-deploy-n8n.sh`:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "🚀 Quick Deploy n8n + n8n-mcp"
|
||||
|
||||
# Check prerequisites
|
||||
command -v docker >/dev/null 2>&1 || { echo "Docker required"; exit 1; }
|
||||
command -v docker-compose >/dev/null 2>&1 || { echo "Docker Compose required"; exit 1; }
|
||||
|
||||
# Generate auth token if not exists
|
||||
if [ ! -f .env ]; then
|
||||
cp .env.n8n.example .env
|
||||
TOKEN=$(openssl rand -base64 32)
|
||||
sed -i "s/your-secure-token-minimum-32-characters/$TOKEN/" .env
|
||||
echo "Generated MCP_AUTH_TOKEN: $TOKEN"
|
||||
fi
|
||||
|
||||
# Deploy
|
||||
docker-compose -f docker-compose.n8n.yml up -d
|
||||
|
||||
echo ""
|
||||
echo "✅ Deployment complete!"
|
||||
echo ""
|
||||
echo "📋 Next steps:"
|
||||
echo "1. Access n8n at http://localhost:5678"
|
||||
echo " Username: admin (or check .env)"
|
||||
echo " Password: changeme (or check .env)"
|
||||
echo ""
|
||||
echo "2. Create a workflow with MCP Client Tool:"
|
||||
echo " - Server URL: http://n8n-mcp:3001/mcp"
|
||||
echo " - Authentication: Bearer Token"
|
||||
echo " - Token: Check .env file for MCP_AUTH_TOKEN"
|
||||
echo ""
|
||||
echo "📊 View logs: docker-compose -f docker-compose.n8n.yml logs -f"
|
||||
echo "🛑 Stop: docker-compose -f docker-compose.n8n.yml down"
|
||||
```
|
||||
|
||||
## Implementation Checklist (Simplified)
|
||||
|
||||
### Code Changes
|
||||
- [ ] Add N8N_MODE flag to `http-server-single-session.ts`
|
||||
- [ ] Add protocol version endpoint (GET /mcp) when N8N_MODE=true
|
||||
- [ ] Add Mcp-Session-Id header to responses
|
||||
- [ ] Update error responses to JSON-RPC format when N8N_MODE=true
|
||||
- [ ] Add npm script `start:n8n` to package.json
|
||||
|
||||
### Docker Infrastructure
|
||||
- [ ] Create `Dockerfile.n8n` for n8n-specific image
|
||||
- [ ] Create `docker-compose.n8n.yml` for simple deployment
|
||||
- [ ] Create `.env.n8n.example` template
|
||||
- [ ] Create GitHub Actions workflow `docker-build-n8n.yml`
|
||||
- [ ] Create `deploy/quick-deploy-n8n.sh` script
|
||||
|
||||
### Testing
|
||||
- [ ] Write unit tests for n8n mode functionality
|
||||
- [ ] Test with actual n8n MCP Client Tool
|
||||
- [ ] Verify protocol version endpoint
|
||||
- [ ] Test authentication flow
|
||||
- [ ] Validate error formatting
|
||||
|
||||
### Documentation
|
||||
- [ ] Update README with n8n deployment section
|
||||
- [ ] Document N8N_MODE environment variable
|
||||
- [ ] Add troubleshooting guide for common issues
|
||||
|
||||
## Quick Start Guide
|
||||
|
||||
### 1. One-Command Deployment
|
||||
|
||||
```bash
|
||||
# Clone and deploy
|
||||
git clone https://github.com/czlonkowski/n8n-mcp.git
|
||||
cd n8n-mcp
|
||||
./deploy/quick-deploy-n8n.sh
|
||||
```
|
||||
|
||||
### 2. Manual Configuration in n8n
|
||||
|
||||
After deployment, configure the MCP Client Tool in n8n:
|
||||
|
||||
1. Open n8n at `http://localhost:5678`
|
||||
2. Create a new workflow
|
||||
3. Add "MCP Client Tool" node (under AI category)
|
||||
4. Configure:
|
||||
- **Server URL**: `http://n8n-mcp:3001/mcp`
|
||||
- **Authentication**: Bearer Token
|
||||
- **Token**: Check your `.env` file for MCP_AUTH_TOKEN
|
||||
5. Select a tool (e.g., `list_nodes`)
|
||||
6. Execute the workflow
|
||||
|
||||
### 3. Production Deployment
|
||||
|
||||
For production with SSL, use a reverse proxy:
|
||||
|
||||
```nginx
|
||||
# nginx configuration
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name n8n.yourdomain.com;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:5678;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The MCP server should remain internal only - n8n connects via Docker network.
|
||||
|
||||
## Success Criteria
|
||||
|
||||
The implementation is successful when:
|
||||
|
||||
1. **Minimal Code Changes**: Only ~20 lines added to existing server
|
||||
2. **Protocol Compliance**: GET /mcp returns correct protocol version
|
||||
3. **n8n Connection**: MCP Client Tool connects successfully
|
||||
4. **Tool Execution**: Tools work without modification
|
||||
5. **Backward Compatible**: Existing Claude Desktop usage unaffected
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **"Protocol version mismatch"**
|
||||
- Ensure N8N_MODE=true is set
|
||||
- Check GET /mcp returns "2024-11-05"
|
||||
|
||||
2. **"Authentication failed"**
|
||||
- Verify AUTH_TOKEN matches in .env and n8n
|
||||
- Token must be 32+ characters
|
||||
- Use "Bearer Token" auth type in n8n
|
||||
|
||||
3. **"Connection refused"**
|
||||
- Check containers are on same network
|
||||
- Use internal hostname: `http://n8n-mcp:3001/mcp`
|
||||
- Verify health check passes
|
||||
|
||||
4. **Testing the Setup**
|
||||
```bash
|
||||
# Check protocol version
|
||||
docker exec n8n-mcp curl http://localhost:3001/mcp
|
||||
|
||||
# View logs
|
||||
docker-compose -f docker-compose.n8n.yml logs -f n8n-mcp
|
||||
```
|
||||
|
||||
## Summary
|
||||
|
||||
This simplified approach:
|
||||
- **Extends existing code** rather than creating new architecture
|
||||
- **Adds n8n compatibility** with minimal changes
|
||||
- **Uses separate Docker image** for clean deployment
|
||||
- **Maintains backward compatibility** for existing users
|
||||
- **Avoids overengineering** with simple, practical solutions
|
||||
|
||||
Total implementation effort: ~2-3 hours (vs. 2-3 days for multi-session approach)
|
||||
@@ -1,146 +0,0 @@
|
||||
# Test Artifacts Documentation
|
||||
|
||||
This document describes the comprehensive test result artifact storage system implemented in the n8n-mcp project.
|
||||
|
||||
## Overview
|
||||
|
||||
The test artifact system captures, stores, and presents test results in multiple formats to facilitate debugging, analysis, and historical tracking of test performance.
|
||||
|
||||
## Artifact Types
|
||||
|
||||
### 1. Test Results
|
||||
- **JUnit XML** (`test-results/junit.xml`): Standard format for CI integration
|
||||
- **JSON Results** (`test-results/results.json`): Detailed test data for analysis
|
||||
- **HTML Report** (`test-results/html/index.html`): Interactive test report
|
||||
- **Test Summary** (`test-summary.md`): Markdown summary for PR comments
|
||||
|
||||
### 2. Coverage Reports
|
||||
- **LCOV** (`coverage/lcov.info`): Standard coverage format
|
||||
- **HTML Coverage** (`coverage/html/index.html`): Interactive coverage browser
|
||||
- **Coverage Summary** (`coverage/coverage-summary.json`): JSON coverage data
|
||||
|
||||
### 3. Benchmark Results
|
||||
- **Benchmark JSON** (`benchmark-results.json`): Raw benchmark data
|
||||
- **Comparison Reports** (`benchmark-comparison.md`): PR benchmark comparisons
|
||||
|
||||
### 4. Detailed Reports
|
||||
- **HTML Report** (`test-reports/report.html`): Comprehensive styled report
|
||||
- **Markdown Report** (`test-reports/report.md`): Full markdown report
|
||||
- **JSON Report** (`test-reports/report.json`): Complete test data
|
||||
|
||||
## GitHub Actions Integration
|
||||
|
||||
### Test Workflow (`test.yml`)
|
||||
|
||||
The main test workflow:
|
||||
1. Runs tests with coverage using multiple reporters
|
||||
2. Generates test summaries and detailed reports
|
||||
3. Uploads artifacts with metadata
|
||||
4. Posts summaries to PRs
|
||||
5. Creates a combined artifact index
|
||||
|
||||
### Benchmark PR Workflow (`benchmark-pr.yml`)
|
||||
|
||||
For pull requests:
|
||||
1. Runs benchmarks on PR branch
|
||||
2. Runs benchmarks on base branch
|
||||
3. Compares results
|
||||
4. Posts comparison to PR
|
||||
5. Sets status checks for regressions
|
||||
|
||||
## Artifact Retention
|
||||
|
||||
- **Test Results**: 30 days
|
||||
- **Coverage Reports**: 30 days
|
||||
- **Benchmark Results**: 30 days
|
||||
- **Combined Results**: 90 days
|
||||
- **Test Metadata**: 30 days
|
||||
|
||||
## PR Comment Integration
|
||||
|
||||
The system automatically:
|
||||
- Posts test summaries to PR comments
|
||||
- Updates existing comments instead of creating duplicates
|
||||
- Includes links to full artifacts
|
||||
- Shows coverage and benchmark changes
|
||||
|
||||
## Job Summary
|
||||
|
||||
Each workflow run includes a job summary with:
|
||||
- Test results overview
|
||||
- Coverage summary
|
||||
- Benchmark results
|
||||
- Direct links to download artifacts
|
||||
|
||||
## Local Development
|
||||
|
||||
### Running Tests with Reports
|
||||
|
||||
```bash
|
||||
# Run tests with all reporters
|
||||
CI=true npm run test:coverage
|
||||
|
||||
# Generate detailed reports
|
||||
node scripts/generate-detailed-reports.js
|
||||
|
||||
# Generate test summary
|
||||
node scripts/generate-test-summary.js
|
||||
|
||||
# Compare benchmarks
|
||||
node scripts/compare-benchmarks.js benchmark-results.json benchmark-baseline.json
|
||||
```
|
||||
|
||||
### Report Locations
|
||||
|
||||
When running locally, reports are generated in:
|
||||
- `test-results/` - Vitest outputs
|
||||
- `test-reports/` - Detailed reports
|
||||
- `coverage/` - Coverage reports
|
||||
- Root directory - Summary files
|
||||
|
||||
## Report Formats
|
||||
|
||||
### HTML Report Features
|
||||
- Responsive design
|
||||
- Test suite breakdown
|
||||
- Failed test details with error messages
|
||||
- Coverage visualization with progress bars
|
||||
- Benchmark performance metrics
|
||||
- Sortable tables
|
||||
|
||||
### Markdown Report Features
|
||||
- GitHub-compatible formatting
|
||||
- Summary statistics
|
||||
- Failed test listings
|
||||
- Coverage breakdown
|
||||
- Benchmark comparisons
|
||||
|
||||
### JSON Report Features
|
||||
- Complete test data
|
||||
- Programmatic access
|
||||
- Historical comparison
|
||||
- CI/CD integration
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Always Check Artifacts**: When tests fail in CI, download and review the HTML report
|
||||
2. **Monitor Coverage**: Use the coverage reports to identify untested code
|
||||
3. **Track Benchmarks**: Review benchmark comparisons on performance-critical PRs
|
||||
4. **Archive Important Runs**: Download artifacts from significant releases
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Missing Artifacts
|
||||
- Check if tests ran to completion
|
||||
- Verify artifact upload steps executed
|
||||
- Check retention period hasn't expired
|
||||
|
||||
### Report Generation Failures
|
||||
- Ensure all dependencies are installed
|
||||
- Check for valid test/coverage output files
|
||||
- Review workflow logs for errors
|
||||
|
||||
### PR Comment Issues
|
||||
- Verify GitHub Actions permissions
|
||||
- Check bot authentication
|
||||
- Review comment posting logs
|
||||
@@ -1,802 +0,0 @@
|
||||
# n8n-MCP Testing Architecture
|
||||
|
||||
## Overview
|
||||
|
||||
This document describes the comprehensive testing infrastructure implemented for the n8n-MCP project. The testing suite includes over 1,100 tests split between unit and integration tests, benchmarks, and a complete CI/CD pipeline ensuring code quality and reliability.
|
||||
|
||||
### Test Suite Statistics (from CI Run #41)
|
||||
|
||||
- **Total Tests**: 1,182 tests
|
||||
- **Unit Tests**: 933 tests (932 passed, 1 skipped)
|
||||
- **Integration Tests**: 249 tests (245 passed, 4 skipped)
|
||||
- **Test Files**:
|
||||
- 30 unit test files
|
||||
- 14 integration test files
|
||||
- **Test Execution Time**:
|
||||
- Unit tests: ~2 minutes with coverage
|
||||
- Integration tests: ~23 seconds
|
||||
- Total CI time: ~2.5 minutes
|
||||
- **Success Rate**: 99.5% (only 5 tests skipped, 0 failures)
|
||||
- **CI/CD Pipeline**: Fully automated with GitHub Actions
|
||||
- **Test Artifacts**: JUnit XML, coverage reports, benchmark results
|
||||
- **Parallel Execution**: Configurable with thread pool
|
||||
|
||||
## Testing Framework: Vitest
|
||||
|
||||
We use **Vitest** as our primary testing framework, chosen for its:
|
||||
- **Speed**: Native ESM support and fast execution
|
||||
- **TypeScript Integration**: First-class TypeScript support
|
||||
- **Watch Mode**: Instant feedback during development
|
||||
- **Jest Compatibility**: Easy migration from Jest
|
||||
- **Built-in Mocking**: Powerful mocking capabilities
|
||||
- **Coverage**: Integrated code coverage with v8
|
||||
|
||||
### Configuration
|
||||
|
||||
```typescript
|
||||
// vitest.config.ts
|
||||
export default defineConfig({
|
||||
test: {
|
||||
globals: true,
|
||||
environment: 'node',
|
||||
setupFiles: ['./tests/setup/global-setup.ts'],
|
||||
pool: 'threads',
|
||||
poolOptions: {
|
||||
threads: {
|
||||
singleThread: process.env.TEST_PARALLEL !== 'true',
|
||||
maxThreads: parseInt(process.env.TEST_MAX_WORKERS || '4', 10)
|
||||
}
|
||||
},
|
||||
coverage: {
|
||||
provider: 'v8',
|
||||
reporter: ['lcov', 'html', 'text-summary'],
|
||||
exclude: ['node_modules/', 'tests/', '**/*.test.ts', 'scripts/']
|
||||
}
|
||||
},
|
||||
resolve: {
|
||||
alias: {
|
||||
'@': path.resolve(__dirname, './src'),
|
||||
'@tests': path.resolve(__dirname, './tests')
|
||||
}
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
tests/
|
||||
├── unit/ # Unit tests with mocks (933 tests, 30 files)
|
||||
│ ├── __mocks__/ # Mock implementations
|
||||
│ │ └── n8n-nodes-base.test.ts
|
||||
│ ├── database/ # Database layer tests
|
||||
│ │ ├── database-adapter-unit.test.ts
|
||||
│ │ ├── node-repository-core.test.ts
|
||||
│ │ └── template-repository-core.test.ts
|
||||
│ ├── loaders/ # Node loader tests
|
||||
│ │ └── node-loader.test.ts
|
||||
│ ├── mappers/ # Data mapper tests
|
||||
│ │ └── docs-mapper.test.ts
|
||||
│ ├── mcp/ # MCP server and tools tests
|
||||
│ │ ├── handlers-n8n-manager.test.ts
|
||||
│ │ ├── handlers-workflow-diff.test.ts
|
||||
│ │ ├── tools-documentation.test.ts
|
||||
│ │ └── tools.test.ts
|
||||
│ ├── parsers/ # Parser tests
|
||||
│ │ ├── node-parser.test.ts
|
||||
│ │ ├── property-extractor.test.ts
|
||||
│ │ └── simple-parser.test.ts
|
||||
│ ├── services/ # Service layer tests (largest test suite)
|
||||
│ │ ├── config-validator.test.ts
|
||||
│ │ ├── enhanced-config-validator.test.ts
|
||||
│ │ ├── example-generator.test.ts
|
||||
│ │ ├── expression-validator.test.ts
|
||||
│ │ ├── n8n-api-client.test.ts
|
||||
│ │ ├── n8n-validation.test.ts
|
||||
│ │ ├── node-specific-validators.test.ts
|
||||
│ │ ├── property-dependencies.test.ts
|
||||
│ │ ├── property-filter.test.ts
|
||||
│ │ ├── task-templates.test.ts
|
||||
│ │ ├── workflow-diff-engine.test.ts
|
||||
│ │ ├── workflow-validator-comprehensive.test.ts
|
||||
│ │ └── workflow-validator.test.ts
|
||||
│ └── utils/ # Utility function tests
|
||||
│ └── database-utils.test.ts
|
||||
├── integration/ # Integration tests (249 tests, 14 files)
|
||||
│ ├── database/ # Database integration tests
|
||||
│ │ ├── connection-management.test.ts
|
||||
│ │ ├── fts5-search.test.ts
|
||||
│ │ ├── node-repository.test.ts
|
||||
│ │ ├── performance.test.ts
|
||||
│ │ └── transactions.test.ts
|
||||
│ ├── mcp-protocol/ # MCP protocol tests
|
||||
│ │ ├── basic-connection.test.ts
|
||||
│ │ ├── error-handling.test.ts
|
||||
│ │ ├── performance.test.ts
|
||||
│ │ ├── protocol-compliance.test.ts
|
||||
│ │ ├── session-management.test.ts
|
||||
│ │ └── tool-invocation.test.ts
|
||||
│ └── setup/ # Integration test setup
|
||||
│ ├── integration-setup.ts
|
||||
│ └── msw-test-server.ts
|
||||
├── benchmarks/ # Performance benchmarks
|
||||
│ ├── database-queries.bench.ts
|
||||
│ └── sample.bench.ts
|
||||
├── setup/ # Global test configuration
|
||||
│ ├── global-setup.ts # Global test setup
|
||||
│ ├── msw-setup.ts # Mock Service Worker setup
|
||||
│ └── test-env.ts # Test environment configuration
|
||||
├── utils/ # Test utilities
|
||||
│ ├── assertions.ts # Custom assertions
|
||||
│ ├── builders/ # Test data builders
|
||||
│ │ └── workflow.builder.ts
|
||||
│ ├── data-generators.ts # Test data generators
|
||||
│ ├── database-utils.ts # Database test utilities
|
||||
│ └── test-helpers.ts # General test helpers
|
||||
├── mocks/ # Mock implementations
|
||||
│ └── n8n-api/ # n8n API mocks
|
||||
│ ├── handlers.ts # MSW request handlers
|
||||
│ └── data/ # Mock data
|
||||
└── fixtures/ # Test fixtures
|
||||
├── database/ # Database fixtures
|
||||
├── factories/ # Data factories
|
||||
└── workflows/ # Workflow fixtures
|
||||
```
|
||||
|
||||
## Mock Strategy
|
||||
|
||||
### 1. Mock Service Worker (MSW) for API Mocking
|
||||
|
||||
We use MSW for intercepting and mocking HTTP requests:
|
||||
|
||||
```typescript
|
||||
// tests/mocks/n8n-api/handlers.ts
|
||||
import { http, HttpResponse } from 'msw';
|
||||
|
||||
export const handlers = [
|
||||
// Workflow endpoints
|
||||
http.get('*/workflows/:id', ({ params }) => {
|
||||
const workflow = mockWorkflows.find(w => w.id === params.id);
|
||||
if (!workflow) {
|
||||
return new HttpResponse(null, { status: 404 });
|
||||
}
|
||||
return HttpResponse.json(workflow);
|
||||
}),
|
||||
|
||||
// Execution endpoints
|
||||
http.post('*/workflows/:id/run', async ({ params, request }) => {
|
||||
const body = await request.json();
|
||||
return HttpResponse.json({
|
||||
executionId: generateExecutionId(),
|
||||
status: 'running'
|
||||
});
|
||||
})
|
||||
];
|
||||
```
|
||||
|
||||
### 2. Database Mocking
|
||||
|
||||
For unit tests, we mock the database layer:
|
||||
|
||||
```typescript
|
||||
// tests/unit/__mocks__/better-sqlite3.ts
|
||||
import { vi } from 'vitest';
|
||||
|
||||
export default vi.fn(() => ({
|
||||
prepare: vi.fn(() => ({
|
||||
all: vi.fn().mockReturnValue([]),
|
||||
get: vi.fn().mockReturnValue(undefined),
|
||||
run: vi.fn().mockReturnValue({ changes: 1 }),
|
||||
finalize: vi.fn()
|
||||
})),
|
||||
exec: vi.fn(),
|
||||
close: vi.fn(),
|
||||
pragma: vi.fn()
|
||||
}));
|
||||
```
|
||||
|
||||
### 3. MCP SDK Mocking
|
||||
|
||||
For testing MCP protocol interactions:
|
||||
|
||||
```typescript
|
||||
// tests/integration/mcp-protocol/test-helpers.ts
|
||||
export class TestableN8NMCPServer extends N8NMCPServer {
|
||||
private transports = new Set<Transport>();
|
||||
|
||||
async connectToTransport(transport: Transport): Promise<void> {
|
||||
this.transports.add(transport);
|
||||
await this.connect(transport);
|
||||
}
|
||||
|
||||
async close(): Promise<void> {
|
||||
for (const transport of this.transports) {
|
||||
await transport.close();
|
||||
}
|
||||
this.transports.clear();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Test Patterns and Utilities
|
||||
|
||||
### 1. Database Test Utilities
|
||||
|
||||
```typescript
|
||||
// tests/utils/database-utils.ts
|
||||
export class TestDatabase {
|
||||
constructor(options: TestDatabaseOptions = {}) {
|
||||
this.options = {
|
||||
mode: 'memory',
|
||||
enableFTS5: true,
|
||||
...options
|
||||
};
|
||||
}
|
||||
|
||||
async initialize(): Promise<Database.Database> {
|
||||
const db = this.options.mode === 'memory'
|
||||
? new Database(':memory:')
|
||||
: new Database(this.dbPath);
|
||||
|
||||
if (this.options.enableFTS5) {
|
||||
await this.enableFTS5(db);
|
||||
}
|
||||
|
||||
return db;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Data Generators
|
||||
|
||||
```typescript
|
||||
// tests/utils/data-generators.ts
|
||||
export class TestDataGenerator {
|
||||
static generateNode(overrides: Partial<ParsedNode> = {}): ParsedNode {
|
||||
return {
|
||||
nodeType: `test.node${faker.number.int()}`,
|
||||
displayName: faker.commerce.productName(),
|
||||
description: faker.lorem.sentence(),
|
||||
properties: this.generateProperties(5),
|
||||
...overrides
|
||||
};
|
||||
}
|
||||
|
||||
static generateWorkflow(nodeCount = 3): any {
|
||||
const nodes = Array.from({ length: nodeCount }, (_, i) => ({
|
||||
id: `node_${i}`,
|
||||
type: 'test.node',
|
||||
position: [i * 100, 0],
|
||||
parameters: {}
|
||||
}));
|
||||
|
||||
return { nodes, connections: {} };
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Custom Assertions
|
||||
|
||||
```typescript
|
||||
// tests/utils/assertions.ts
|
||||
export function expectValidMCPResponse(response: any): void {
|
||||
expect(response).toBeDefined();
|
||||
expect(response.content).toBeDefined();
|
||||
expect(Array.isArray(response.content)).toBe(true);
|
||||
expect(response.content[0]).toHaveProperty('type', 'text');
|
||||
expect(response.content[0]).toHaveProperty('text');
|
||||
}
|
||||
|
||||
export function expectNodeStructure(node: any): void {
|
||||
expect(node).toHaveProperty('nodeType');
|
||||
expect(node).toHaveProperty('displayName');
|
||||
expect(node).toHaveProperty('properties');
|
||||
expect(Array.isArray(node.properties)).toBe(true);
|
||||
}
|
||||
```
|
||||
|
||||
## Unit Testing
|
||||
|
||||
Our unit tests focus on testing individual components in isolation with mocked dependencies:
|
||||
|
||||
### Service Layer Tests
|
||||
|
||||
The bulk of our unit tests (400+ tests) are in the services layer:
|
||||
|
||||
```typescript
|
||||
// tests/unit/services/workflow-validator-comprehensive.test.ts
|
||||
describe('WorkflowValidator Comprehensive Tests', () => {
|
||||
it('should validate complex workflow with AI nodes', () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'ai_agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
parameters: { prompt: 'Analyze data' }
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = validator.validateWorkflow(workflow);
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Parser Tests
|
||||
|
||||
Testing the node parsing logic:
|
||||
|
||||
```typescript
|
||||
// tests/unit/parsers/property-extractor.test.ts
|
||||
describe('PropertyExtractor', () => {
|
||||
it('should extract nested properties correctly', () => {
|
||||
const node = {
|
||||
properties: [
|
||||
{
|
||||
displayName: 'Options',
|
||||
name: 'options',
|
||||
type: 'collection',
|
||||
options: [
|
||||
{ name: 'timeout', type: 'number' }
|
||||
]
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
const extracted = extractor.extractProperties(node);
|
||||
expect(extracted).toHaveProperty('options.timeout');
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Mock Testing
|
||||
|
||||
Testing our mock implementations:
|
||||
|
||||
```typescript
|
||||
// tests/unit/__mocks__/n8n-nodes-base.test.ts
|
||||
describe('n8n-nodes-base mock', () => {
|
||||
it('should provide mocked node definitions', () => {
|
||||
const httpNode = mockNodes['n8n-nodes-base.httpRequest'];
|
||||
expect(httpNode).toBeDefined();
|
||||
expect(httpNode.description.displayName).toBe('HTTP Request');
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Integration Testing
|
||||
|
||||
Our integration tests verify the complete system behavior:
|
||||
|
||||
### MCP Protocol Testing
|
||||
|
||||
```typescript
|
||||
// tests/integration/mcp-protocol/tool-invocation.test.ts
|
||||
describe('MCP Tool Invocation', () => {
|
||||
let mcpServer: TestableN8NMCPServer;
|
||||
let client: Client;
|
||||
|
||||
beforeEach(async () => {
|
||||
mcpServer = new TestableN8NMCPServer();
|
||||
await mcpServer.initialize();
|
||||
|
||||
const [serverTransport, clientTransport] = InMemoryTransport.createLinkedPair();
|
||||
await mcpServer.connectToTransport(serverTransport);
|
||||
|
||||
client = new Client({ name: 'test-client', version: '1.0.0' }, {});
|
||||
await client.connect(clientTransport);
|
||||
});
|
||||
|
||||
it('should list nodes with filtering', async () => {
|
||||
const response = await client.callTool({
|
||||
name: 'list_nodes',
|
||||
arguments: { category: 'trigger', limit: 10 }
|
||||
});
|
||||
|
||||
expectValidMCPResponse(response);
|
||||
const result = JSON.parse(response.content[0].text);
|
||||
expect(result.nodes).toHaveLength(10);
|
||||
expect(result.nodes.every(n => n.category === 'trigger')).toBe(true);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Database Integration Testing
|
||||
|
||||
```typescript
|
||||
// tests/integration/database/fts5-search.test.ts
|
||||
describe('FTS5 Search Integration', () => {
|
||||
it('should perform fuzzy search', async () => {
|
||||
const results = await nodeRepo.searchNodes('HTT', 'FUZZY');
|
||||
|
||||
expect(results.some(n => n.nodeType.includes('httpRequest'))).toBe(true);
|
||||
expect(results.some(n => n.displayName.includes('HTTP'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle complex boolean queries', async () => {
|
||||
const results = await nodeRepo.searchNodes('webhook OR http', 'OR');
|
||||
|
||||
expect(results.length).toBeGreaterThan(0);
|
||||
expect(results.some(n =>
|
||||
n.description?.includes('webhook') ||
|
||||
n.description?.includes('http')
|
||||
)).toBe(true);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Test Distribution and Coverage
|
||||
|
||||
### Test Distribution by Component
|
||||
|
||||
Based on our 1,182 tests:
|
||||
|
||||
1. **Services Layer** (~450 tests)
|
||||
- `workflow-validator-comprehensive.test.ts`: 150+ tests
|
||||
- `node-specific-validators.test.ts`: 120+ tests
|
||||
- `n8n-validation.test.ts`: 80+ tests
|
||||
- `n8n-api-client.test.ts`: 60+ tests
|
||||
|
||||
2. **Parsers** (~200 tests)
|
||||
- `simple-parser.test.ts`: 80+ tests
|
||||
- `property-extractor.test.ts`: 70+ tests
|
||||
- `node-parser.test.ts`: 50+ tests
|
||||
|
||||
3. **MCP Integration** (~150 tests)
|
||||
- `tool-invocation.test.ts`: 50+ tests
|
||||
- `error-handling.test.ts`: 40+ tests
|
||||
- `session-management.test.ts`: 30+ tests
|
||||
|
||||
4. **Database** (~300 tests)
|
||||
- Unit tests for repositories: 100+ tests
|
||||
- Integration tests for FTS5 search: 80+ tests
|
||||
- Transaction tests: 60+ tests
|
||||
- Performance tests: 60+ tests
|
||||
|
||||
### Test Execution Performance
|
||||
|
||||
From our CI runs:
|
||||
- **Fastest tests**: Unit tests with mocks (<1ms each)
|
||||
- **Slowest tests**: Integration tests with real database (100-5000ms)
|
||||
- **Average test time**: ~20ms per test
|
||||
- **Total suite execution**: Under 3 minutes in CI
|
||||
|
||||
## CI/CD Pipeline
|
||||
|
||||
Our GitHub Actions workflow runs all tests automatically:
|
||||
|
||||
```yaml
|
||||
# .github/workflows/test.yml
|
||||
name: Test Suite
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Run unit tests with coverage
|
||||
run: npm run test:unit -- --coverage
|
||||
|
||||
- name: Run integration tests
|
||||
run: npm run test:integration
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
```
|
||||
|
||||
### Test Execution Scripts
|
||||
|
||||
```json
|
||||
// package.json
|
||||
{
|
||||
"scripts": {
|
||||
"test": "vitest",
|
||||
"test:unit": "vitest run tests/unit",
|
||||
"test:integration": "vitest run tests/integration --config vitest.config.integration.ts",
|
||||
"test:coverage": "vitest run --coverage",
|
||||
"test:watch": "vitest watch",
|
||||
"test:bench": "vitest bench --config vitest.config.benchmark.ts",
|
||||
"benchmark:ci": "CI=true node scripts/run-benchmarks-ci.js"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### CI Test Results Summary
|
||||
|
||||
From our latest CI run (#41):
|
||||
|
||||
```
|
||||
UNIT TESTS:
|
||||
Test Files 30 passed (30)
|
||||
Tests 932 passed | 1 skipped (933)
|
||||
|
||||
INTEGRATION TESTS:
|
||||
Test Files 14 passed (14)
|
||||
Tests 245 passed | 4 skipped (249)
|
||||
|
||||
TOTAL: 1,177 passed | 5 skipped | 0 failed
|
||||
```
|
||||
|
||||
## Performance Testing
|
||||
|
||||
We use Vitest's built-in benchmark functionality:
|
||||
|
||||
```typescript
|
||||
// tests/benchmarks/database-queries.bench.ts
|
||||
import { bench, describe } from 'vitest';
|
||||
|
||||
describe('Database Query Performance', () => {
|
||||
bench('search nodes by category', async () => {
|
||||
await nodeRepo.getNodesByCategory('trigger');
|
||||
});
|
||||
|
||||
bench('FTS5 search performance', async () => {
|
||||
await nodeRepo.searchNodes('webhook http request', 'AND');
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Environment Configuration
|
||||
|
||||
Test environment is configured via `.env.test`:
|
||||
|
||||
```bash
|
||||
# Test Environment Configuration
|
||||
NODE_ENV=test
|
||||
TEST_DB_PATH=:memory:
|
||||
TEST_PARALLEL=false
|
||||
TEST_MAX_WORKERS=4
|
||||
FEATURE_TEST_COVERAGE=true
|
||||
MSW_ENABLED=true
|
||||
```
|
||||
|
||||
## Key Patterns and Lessons Learned
|
||||
|
||||
### 1. Response Structure Consistency
|
||||
|
||||
All MCP responses follow a specific structure that must be handled correctly:
|
||||
|
||||
```typescript
|
||||
// Common pattern for handling MCP responses
|
||||
const response = await client.callTool({ name: 'list_nodes', arguments: {} });
|
||||
|
||||
// MCP responses have content array with text objects
|
||||
expect(response.content).toBeDefined();
|
||||
expect(response.content[0].type).toBe('text');
|
||||
|
||||
// Parse the actual data
|
||||
const data = JSON.parse(response.content[0].text);
|
||||
```
|
||||
|
||||
### 2. MSW Integration Setup
|
||||
|
||||
Proper MSW setup is crucial for integration tests:
|
||||
|
||||
```typescript
|
||||
// tests/integration/setup/integration-setup.ts
|
||||
import { setupServer } from 'msw/node';
|
||||
import { handlers } from '@tests/mocks/n8n-api/handlers';
|
||||
|
||||
// Create server but don't start it globally
|
||||
const server = setupServer(...handlers);
|
||||
|
||||
beforeAll(async () => {
|
||||
// Only start MSW for integration tests
|
||||
if (process.env.MSW_ENABLED === 'true') {
|
||||
server.listen({ onUnhandledRequest: 'bypass' });
|
||||
}
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
server.close();
|
||||
});
|
||||
```
|
||||
|
||||
### 3. Database Isolation for Parallel Tests
|
||||
|
||||
Each test gets its own database to enable parallel execution:
|
||||
|
||||
```typescript
|
||||
// tests/utils/database-utils.ts
|
||||
export function createTestDatabaseAdapter(
|
||||
db?: Database.Database,
|
||||
options: TestDatabaseOptions = {}
|
||||
): DatabaseAdapter {
|
||||
const database = db || new Database(':memory:');
|
||||
|
||||
// Enable FTS5 if needed
|
||||
if (options.enableFTS5) {
|
||||
database.exec('PRAGMA main.compile_options;');
|
||||
}
|
||||
|
||||
return new DatabaseAdapter(database);
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Environment-Aware Performance Thresholds
|
||||
|
||||
CI environments are slower, so we adjust expectations:
|
||||
|
||||
```typescript
|
||||
// Environment-aware thresholds
|
||||
const getThreshold = (local: number, ci: number) =>
|
||||
process.env.CI ? ci : local;
|
||||
|
||||
it('should respond quickly', async () => {
|
||||
const start = performance.now();
|
||||
await someOperation();
|
||||
const duration = performance.now() - start;
|
||||
|
||||
expect(duration).toBeLessThan(getThreshold(50, 200));
|
||||
});
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Test Isolation
|
||||
- Each test creates its own database instance
|
||||
- Tests clean up after themselves
|
||||
- No shared state between tests
|
||||
|
||||
### 2. Proper Cleanup Order
|
||||
```typescript
|
||||
afterEach(async () => {
|
||||
// Close client first to ensure no pending requests
|
||||
await client.close();
|
||||
|
||||
// Give time for client to fully close
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
|
||||
// Then close server
|
||||
await mcpServer.close();
|
||||
|
||||
// Finally cleanup database
|
||||
await testDb.cleanup();
|
||||
});
|
||||
```
|
||||
|
||||
### 3. Handle Async Operations Carefully
|
||||
```typescript
|
||||
// Avoid race conditions in cleanup
|
||||
it('should handle disconnection', async () => {
|
||||
// ... test code ...
|
||||
|
||||
// Ensure operations complete before cleanup
|
||||
await transport.close();
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
});
|
||||
```
|
||||
|
||||
### 4. Meaningful Test Organization
|
||||
- Group related tests using `describe` blocks
|
||||
- Use descriptive test names that explain the behavior
|
||||
- Follow AAA pattern: Arrange, Act, Assert
|
||||
- Keep tests focused on single behaviors
|
||||
|
||||
## Debugging Tests
|
||||
|
||||
### Running Specific Tests
|
||||
```bash
|
||||
# Run a single test file
|
||||
npm test tests/integration/mcp-protocol/tool-invocation.test.ts
|
||||
|
||||
# Run tests matching a pattern
|
||||
npm test -- --grep "should list nodes"
|
||||
|
||||
# Run with debugging output
|
||||
DEBUG=* npm test
|
||||
```
|
||||
|
||||
### VSCode Integration
|
||||
```json
|
||||
// .vscode/launch.json
|
||||
{
|
||||
"configurations": [
|
||||
{
|
||||
"type": "node",
|
||||
"request": "launch",
|
||||
"name": "Debug Tests",
|
||||
"program": "${workspaceFolder}/node_modules/vitest/vitest.mjs",
|
||||
"args": ["run", "${file}"],
|
||||
"console": "integratedTerminal"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Test Coverage
|
||||
|
||||
While we don't enforce strict coverage thresholds yet, the infrastructure is in place:
|
||||
- Coverage reports generated in `lcov`, `html`, and `text` formats
|
||||
- Integration with Codecov for tracking coverage over time
|
||||
- Per-file coverage visible in VSCode with extensions
|
||||
|
||||
## Future Improvements
|
||||
|
||||
1. **E2E Testing**: Add Playwright for testing the full MCP server interaction
|
||||
2. **Load Testing**: Implement k6 or Artillery for stress testing
|
||||
3. **Contract Testing**: Add Pact for ensuring API compatibility
|
||||
4. **Visual Regression**: For any UI components that may be added
|
||||
5. **Mutation Testing**: Use Stryker to ensure test quality
|
||||
|
||||
## Common Issues and Solutions
|
||||
|
||||
### 1. Tests Hanging in CI
|
||||
|
||||
**Problem**: Tests would hang indefinitely in CI due to `process.exit()` calls.
|
||||
|
||||
**Solution**: Remove all `process.exit()` calls from test code and use proper cleanup:
|
||||
```typescript
|
||||
// Bad
|
||||
afterAll(() => {
|
||||
process.exit(0); // This causes Vitest to hang
|
||||
});
|
||||
|
||||
// Good
|
||||
afterAll(async () => {
|
||||
await cleanup();
|
||||
// Let Vitest handle process termination
|
||||
});
|
||||
```
|
||||
|
||||
### 2. MCP Response Structure
|
||||
|
||||
**Problem**: Tests expecting wrong response format from MCP tools.
|
||||
|
||||
**Solution**: Always access responses through `content[0].text`:
|
||||
```typescript
|
||||
// Wrong
|
||||
const data = response[0].text;
|
||||
|
||||
// Correct
|
||||
const data = JSON.parse(response.content[0].text);
|
||||
```
|
||||
|
||||
### 3. Database Not Found Errors
|
||||
|
||||
**Problem**: Tests failing with "node not found" when database is empty.
|
||||
|
||||
**Solution**: Check for empty databases before assertions:
|
||||
```typescript
|
||||
const stats = await server.executeTool('get_database_statistics', {});
|
||||
if (stats.totalNodes > 0) {
|
||||
expect(result.nodes.length).toBeGreaterThan(0);
|
||||
} else {
|
||||
expect(result.nodes).toHaveLength(0);
|
||||
}
|
||||
```
|
||||
|
||||
### 4. MSW Loading Globally
|
||||
|
||||
**Problem**: MSW interfering with unit tests when loaded globally.
|
||||
|
||||
**Solution**: Only load MSW in integration test setup:
|
||||
```typescript
|
||||
// vitest.config.integration.ts
|
||||
setupFiles: [
|
||||
'./tests/setup/global-setup.ts',
|
||||
'./tests/integration/setup/integration-setup.ts' // MSW only here
|
||||
]
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
- [Vitest Documentation](https://vitest.dev/)
|
||||
- [MSW Documentation](https://mswjs.io/)
|
||||
- [Testing Best Practices](https://github.com/goldbergyoni/javascript-testing-best-practices)
|
||||
- [MCP SDK Documentation](https://modelcontextprotocol.io/)
|
||||
@@ -1,276 +0,0 @@
|
||||
# n8n-MCP Testing Implementation Checklist
|
||||
|
||||
## Test Suite Development Status
|
||||
|
||||
### Context
|
||||
- **Situation**: Building comprehensive test suite from scratch
|
||||
- **Branch**: feat/comprehensive-testing-suite (separate from main)
|
||||
- **Main Branch Status**: Working in production without tests
|
||||
- **Goal**: Add test coverage without disrupting development
|
||||
|
||||
## Immediate Actions (Day 1)
|
||||
|
||||
- [x] ~~Fix failing tests (Phase 0)~~ ✅ COMPLETED
|
||||
- [x] ~~Create GitHub Actions workflow file~~ ✅ COMPLETED
|
||||
- [x] ~~Install Vitest and remove Jest~~ ✅ COMPLETED
|
||||
- [x] ~~Create vitest.config.ts~~ ✅ COMPLETED
|
||||
- [x] ~~Setup global test configuration~~ ✅ COMPLETED
|
||||
- [x] ~~Migrate existing tests to Vitest syntax~~ ✅ COMPLETED
|
||||
- [x] ~~Setup coverage reporting with Codecov~~ ✅ COMPLETED
|
||||
|
||||
## Phase 1: Vitest Migration ✅ COMPLETED
|
||||
|
||||
All tests have been successfully migrated from Jest to Vitest:
|
||||
- ✅ Removed Jest and installed Vitest
|
||||
- ✅ Created vitest.config.ts with path aliases
|
||||
- ✅ Set up global test configuration
|
||||
- ✅ Migrated all 6 test files (68 tests passing)
|
||||
- ✅ Updated TypeScript configuration
|
||||
- ✅ Cleaned up Jest configuration files
|
||||
|
||||
## Week 1: Foundation
|
||||
|
||||
### Testing Infrastructure ✅ COMPLETED (Phase 2)
|
||||
- [x] ~~Create test directory structure~~ ✅ COMPLETED
|
||||
- [x] ~~Setup mock infrastructure for better-sqlite3~~ ✅ COMPLETED
|
||||
- [x] ~~Create mock for n8n-nodes-base package~~ ✅ COMPLETED
|
||||
- [x] ~~Setup test database utilities~~ ✅ COMPLETED
|
||||
- [x] ~~Create factory pattern for nodes~~ ✅ COMPLETED
|
||||
- [x] ~~Create builder pattern for workflows~~ ✅ COMPLETED
|
||||
- [x] ~~Setup global test utilities~~ ✅ COMPLETED
|
||||
- [x] ~~Configure test environment variables~~ ✅ COMPLETED
|
||||
|
||||
### CI/CD Pipeline ✅ COMPLETED (Phase 3.8)
|
||||
- [x] ~~GitHub Actions for test execution~~ ✅ COMPLETED & VERIFIED
|
||||
- Successfully running with Vitest
|
||||
- 1021 tests passing in CI
|
||||
- Build time: ~2 minutes
|
||||
- [x] ~~Coverage reporting integration~~ ✅ COMPLETED (Codecov setup)
|
||||
- [x] ~~Performance benchmark tracking~~ ✅ COMPLETED
|
||||
- [x] ~~Test result artifacts~~ ✅ COMPLETED
|
||||
- [ ] Branch protection rules
|
||||
- [ ] Required status checks
|
||||
|
||||
## Week 2: Mock Infrastructure
|
||||
|
||||
### Database Mocking
|
||||
- [ ] Complete better-sqlite3 mock implementation
|
||||
- [ ] Mock prepared statements
|
||||
- [ ] Mock transactions
|
||||
- [ ] Mock FTS5 search functionality
|
||||
- [ ] Test data seeding utilities
|
||||
|
||||
### External Dependencies
|
||||
- [ ] Mock axios for API calls
|
||||
- [ ] Mock file system operations
|
||||
- [ ] Mock MCP SDK
|
||||
- [ ] Mock Express server
|
||||
- [ ] Mock WebSocket connections
|
||||
|
||||
## Week 3-4: Unit Tests ✅ COMPLETED (Phase 3)
|
||||
|
||||
### Core Services (Priority 1) ✅ COMPLETED
|
||||
- [x] ~~`config-validator.ts` - 95% coverage~~ ✅ 96.9%
|
||||
- [x] ~~`enhanced-config-validator.ts` - 95% coverage~~ ✅ 94.55%
|
||||
- [x] ~~`workflow-validator.ts` - 90% coverage~~ ✅ 97.59%
|
||||
- [x] ~~`expression-validator.ts` - 90% coverage~~ ✅ 97.22%
|
||||
- [x] ~~`property-filter.ts` - 90% coverage~~ ✅ 95.25%
|
||||
- [x] ~~`example-generator.ts` - 85% coverage~~ ✅ 94.34%
|
||||
|
||||
### Parsers (Priority 2) ✅ COMPLETED
|
||||
- [x] ~~`node-parser.ts` - 90% coverage~~ ✅ 97.42%
|
||||
- [x] ~~`property-extractor.ts` - 90% coverage~~ ✅ 95.49%
|
||||
|
||||
### MCP Layer (Priority 3) ✅ COMPLETED
|
||||
- [x] ~~`tools.ts` - 90% coverage~~ ✅ 94.11%
|
||||
- [x] ~~`handlers-n8n-manager.ts` - 85% coverage~~ ✅ 92.71%
|
||||
- [x] ~~`handlers-workflow-diff.ts` - 85% coverage~~ ✅ 96.34%
|
||||
- [x] ~~`tools-documentation.ts` - 80% coverage~~ ✅ 94.12%
|
||||
|
||||
### Database Layer (Priority 4) ✅ COMPLETED
|
||||
- [x] ~~`node-repository.ts` - 85% coverage~~ ✅ 91.48%
|
||||
- [x] ~~`database-adapter.ts` - 85% coverage~~ ✅ 89.29%
|
||||
- [x] ~~`template-repository.ts` - 80% coverage~~ ✅ 86.78%
|
||||
|
||||
### Loaders and Mappers (Priority 5) ✅ COMPLETED
|
||||
- [x] ~~`node-loader.ts` - 85% coverage~~ ✅ 91.89%
|
||||
- [x] ~~`docs-mapper.ts` - 80% coverage~~ ✅ 95.45%
|
||||
|
||||
### Additional Critical Services Tested ✅ COMPLETED (Phase 3.5)
|
||||
- [x] ~~`n8n-api-client.ts`~~ ✅ 83.87%
|
||||
- [x] ~~`workflow-diff-engine.ts`~~ ✅ 90.06%
|
||||
- [x] ~~`n8n-validation.ts`~~ ✅ 97.14%
|
||||
- [x] ~~`node-specific-validators.ts`~~ ✅ 98.7%
|
||||
|
||||
## Week 5-6: Integration Tests 🚧 IN PROGRESS
|
||||
|
||||
### Real Status (July 29, 2025)
|
||||
**Context**: Building test suite from scratch on testing branch. Main branch has no tests.
|
||||
|
||||
**Overall Status**: 187/246 tests passing (76% pass rate)
|
||||
**Critical Issue**: CI shows green despite 58 failing tests due to `|| true` in workflow
|
||||
|
||||
### MCP Protocol Tests 🔄 MIXED STATUS
|
||||
- [x] ~~Full MCP server initialization~~ ✅ COMPLETED
|
||||
- [x] ~~Tool invocation flow~~ ✅ FIXED (30 tests in tool-invocation.test.ts)
|
||||
- [ ] Error handling and recovery ⚠️ 16 FAILING (error-handling.test.ts)
|
||||
- [x] ~~Concurrent request handling~~ ✅ COMPLETED
|
||||
- [ ] Session management ⚠️ 5 FAILING (timeout issues)
|
||||
|
||||
### n8n API Integration 🔄 PENDING
|
||||
- [ ] Workflow CRUD operations (MSW mocks ready)
|
||||
- [ ] Webhook triggering
|
||||
- [ ] Execution monitoring
|
||||
- [ ] Authentication handling
|
||||
- [ ] Error scenarios
|
||||
|
||||
### Database Integration ⚠️ ISSUES FOUND
|
||||
- [x] ~~SQLite operations with real DB~~ ✅ BASIC TESTS PASS
|
||||
- [ ] FTS5 search functionality ⚠️ 7 FAILING (syntax errors)
|
||||
- [ ] Transaction handling ⚠️ 1 FAILING (isolation issues)
|
||||
- [ ] Migration testing 🔄 NOT STARTED
|
||||
- [ ] Performance under load ⚠️ 4 FAILING (slower than thresholds)
|
||||
|
||||
## Week 7-8: E2E & Performance
|
||||
|
||||
### End-to-End Scenarios
|
||||
- [ ] Complete workflow creation flow
|
||||
- [ ] AI agent workflow setup
|
||||
- [ ] Template import and validation
|
||||
- [ ] Workflow execution monitoring
|
||||
- [ ] Error recovery scenarios
|
||||
|
||||
### Performance Benchmarks
|
||||
- [ ] Node loading speed (< 50ms per node)
|
||||
- [ ] Search performance (< 100ms for 1000 nodes)
|
||||
- [ ] Validation speed (< 10ms simple, < 100ms complex)
|
||||
- [ ] Database query performance
|
||||
- [ ] Memory usage profiling
|
||||
- [ ] Concurrent request handling
|
||||
|
||||
### Load Testing
|
||||
- [ ] 100 concurrent MCP requests
|
||||
- [ ] 10,000 nodes in database
|
||||
- [ ] 1,000 workflow validations/minute
|
||||
- [ ] Memory leak detection
|
||||
- [ ] Resource cleanup verification
|
||||
|
||||
## Testing Quality Gates
|
||||
|
||||
### Coverage Requirements
|
||||
- [ ] Overall: 80%+ (Currently: 62.67%)
|
||||
- [x] ~~Core services: 90%+~~ ✅ COMPLETED
|
||||
- [x] ~~MCP tools: 90%+~~ ✅ COMPLETED
|
||||
- [x] ~~Critical paths: 95%+~~ ✅ COMPLETED
|
||||
- [x] ~~New code: 90%+~~ ✅ COMPLETED
|
||||
|
||||
### Performance Requirements
|
||||
- [x] ~~All unit tests < 10ms~~ ✅ COMPLETED
|
||||
- [ ] Integration tests < 1s
|
||||
- [ ] E2E tests < 10s
|
||||
- [x] ~~Full suite < 5 minutes~~ ✅ COMPLETED (~2 minutes)
|
||||
- [x] ~~No memory leaks~~ ✅ COMPLETED
|
||||
|
||||
### Code Quality
|
||||
- [x] ~~No ESLint errors~~ ✅ COMPLETED
|
||||
- [x] ~~No TypeScript errors~~ ✅ COMPLETED
|
||||
- [x] ~~No console.log in tests~~ ✅ COMPLETED
|
||||
- [x] ~~All tests have descriptions~~ ✅ COMPLETED
|
||||
- [x] ~~No hardcoded values~~ ✅ COMPLETED
|
||||
|
||||
## Monitoring & Maintenance
|
||||
|
||||
### Daily
|
||||
- [ ] Check CI pipeline status
|
||||
- [ ] Review failed tests
|
||||
- [ ] Monitor flaky tests
|
||||
|
||||
### Weekly
|
||||
- [ ] Review coverage reports
|
||||
- [ ] Update test documentation
|
||||
- [ ] Performance benchmark review
|
||||
- [ ] Team sync on testing progress
|
||||
|
||||
### Monthly
|
||||
- [ ] Update baseline benchmarks
|
||||
- [ ] Review and refactor tests
|
||||
- [ ] Update testing strategy
|
||||
- [ ] Training/knowledge sharing
|
||||
|
||||
## Risk Mitigation
|
||||
|
||||
### Technical Risks
|
||||
- [ ] Mock complexity - Use simple, maintainable mocks
|
||||
- [ ] Test brittleness - Focus on behavior, not implementation
|
||||
- [ ] Performance impact - Run heavy tests in parallel
|
||||
- [ ] Flaky tests - Proper async handling and isolation
|
||||
|
||||
### Process Risks
|
||||
- [ ] Slow adoption - Provide training and examples
|
||||
- [ ] Coverage gaming - Review test quality, not just numbers
|
||||
- [ ] Maintenance burden - Automate what's possible
|
||||
- [ ] Integration complexity - Use test containers
|
||||
|
||||
## Success Criteria
|
||||
|
||||
### Current Reality Check
|
||||
- **Unit Tests**: ✅ SOLID (932 passing, 87.8% coverage)
|
||||
- **Integration Tests**: ⚠️ NEEDS WORK (58 failing, 76% pass rate)
|
||||
- **E2E Tests**: 🔄 NOT STARTED
|
||||
- **CI/CD**: ⚠️ BROKEN (hiding failures with || true)
|
||||
|
||||
### Revised Technical Metrics
|
||||
- Coverage: Currently 87.8% for unit tests ✅
|
||||
- Integration test pass rate: Target 100% (currently 76%)
|
||||
- Performance: Adjust thresholds based on reality
|
||||
- Reliability: Fix flaky tests during repair
|
||||
- Speed: CI pipeline < 5 minutes ✅ (~2 minutes)
|
||||
|
||||
### Team Metrics
|
||||
- All developers writing tests ✅
|
||||
- Tests reviewed in PRs ✅
|
||||
- No production bugs from tested code
|
||||
- Improved development velocity ✅
|
||||
|
||||
## Phases Completed
|
||||
|
||||
- **Phase 0**: Immediate Fixes ✅ COMPLETED
|
||||
- **Phase 1**: Vitest Migration ✅ COMPLETED
|
||||
- **Phase 2**: Test Infrastructure ✅ COMPLETED
|
||||
- **Phase 3**: Unit Tests (All 943 tests) ✅ COMPLETED
|
||||
- **Phase 3.5**: Critical Service Testing ✅ COMPLETED
|
||||
- **Phase 3.8**: CI/CD & Infrastructure ✅ COMPLETED
|
||||
- **Phase 4**: Integration Tests 🚧 IN PROGRESS
|
||||
- **Status**: 58 out of 246 tests failing (23.6% failure rate)
|
||||
- **CI Issue**: Tests appear green due to `|| true` error suppression
|
||||
- **Categories of Failures**:
|
||||
- Database: 9 tests (state isolation, FTS5 syntax)
|
||||
- MCP Protocol: 16 tests (response structure in error-handling.test.ts)
|
||||
- MSW: 6 tests (not initialized properly)
|
||||
- FTS5 Search: 7 tests (query syntax issues)
|
||||
- Session Management: 5 tests (async cleanup)
|
||||
- Performance: 15 tests (threshold mismatches)
|
||||
- **Next Steps**:
|
||||
1. Get team buy-in for "red" CI
|
||||
2. Remove `|| true` from workflow
|
||||
3. Fix tests systematically by category
|
||||
- **Phase 5**: E2E Tests 🔄 PENDING
|
||||
|
||||
## Resources & Tools
|
||||
|
||||
### Documentation
|
||||
- Vitest: https://vitest.dev/
|
||||
- Testing Library: https://testing-library.com/
|
||||
- MSW: https://mswjs.io/
|
||||
- Testcontainers: https://www.testcontainers.com/
|
||||
|
||||
### Monitoring
|
||||
- Codecov: https://codecov.io/
|
||||
- GitHub Actions: https://github.com/features/actions
|
||||
- Benchmark Action: https://github.com/benchmark-action/github-action-benchmark
|
||||
|
||||
### Team Resources
|
||||
- Testing best practices guide
|
||||
- Example test implementations
|
||||
- Mock usage patterns
|
||||
- Performance optimization tips
|
||||
@@ -1,472 +0,0 @@
|
||||
# n8n-MCP Testing Implementation Guide
|
||||
|
||||
## Phase 1: Foundation Setup (Week 1-2)
|
||||
|
||||
### 1.1 Install Vitest and Dependencies
|
||||
|
||||
```bash
|
||||
# Remove Jest
|
||||
npm uninstall jest ts-jest @types/jest
|
||||
|
||||
# Install Vitest and related packages
|
||||
npm install -D vitest @vitest/ui @vitest/coverage-v8
|
||||
npm install -D @testing-library/jest-dom
|
||||
npm install -D msw # For API mocking
|
||||
npm install -D @faker-js/faker # For test data
|
||||
npm install -D fishery # For factories
|
||||
```
|
||||
|
||||
### 1.2 Update package.json Scripts
|
||||
|
||||
```json
|
||||
{
|
||||
"scripts": {
|
||||
// Testing
|
||||
"test": "vitest",
|
||||
"test:ui": "vitest --ui",
|
||||
"test:unit": "vitest run tests/unit",
|
||||
"test:integration": "vitest run tests/integration",
|
||||
"test:e2e": "vitest run tests/e2e",
|
||||
"test:watch": "vitest watch",
|
||||
"test:coverage": "vitest run --coverage",
|
||||
"test:coverage:check": "vitest run --coverage --coverage.thresholdAutoUpdate=false",
|
||||
|
||||
// Benchmarks
|
||||
"bench": "vitest bench",
|
||||
"bench:compare": "vitest bench --compare",
|
||||
|
||||
// CI specific
|
||||
"test:ci": "vitest run --reporter=junit --reporter=default",
|
||||
"test:ci:coverage": "vitest run --coverage --reporter=junit --reporter=default"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 1.3 Migrate Existing Tests
|
||||
|
||||
```typescript
|
||||
// Before (Jest)
|
||||
import { describe, test, expect } from '@jest/globals';
|
||||
|
||||
// After (Vitest)
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
|
||||
// Update mock syntax
|
||||
// Jest: jest.mock('module')
|
||||
// Vitest: vi.mock('module')
|
||||
|
||||
// Update timer mocks
|
||||
// Jest: jest.useFakeTimers()
|
||||
// Vitest: vi.useFakeTimers()
|
||||
```
|
||||
|
||||
### 1.4 Create Test Database Setup
|
||||
|
||||
```typescript
|
||||
// tests/setup/test-database.ts
|
||||
import Database from 'better-sqlite3';
|
||||
import { readFileSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
|
||||
export class TestDatabase {
|
||||
private db: Database.Database;
|
||||
|
||||
constructor() {
|
||||
this.db = new Database(':memory:');
|
||||
this.initialize();
|
||||
}
|
||||
|
||||
private initialize() {
|
||||
const schema = readFileSync(
|
||||
join(__dirname, '../../src/database/schema.sql'),
|
||||
'utf8'
|
||||
);
|
||||
this.db.exec(schema);
|
||||
}
|
||||
|
||||
seedNodes(nodes: any[]) {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT INTO nodes (type, displayName, name, group, version, description, properties)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
`);
|
||||
|
||||
const insertMany = this.db.transaction((nodes) => {
|
||||
for (const node of nodes) {
|
||||
stmt.run(
|
||||
node.type,
|
||||
node.displayName,
|
||||
node.name,
|
||||
node.group,
|
||||
node.version,
|
||||
node.description,
|
||||
JSON.stringify(node.properties)
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
insertMany(nodes);
|
||||
}
|
||||
|
||||
close() {
|
||||
this.db.close();
|
||||
}
|
||||
|
||||
getDb() {
|
||||
return this.db;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Phase 2: Core Unit Tests (Week 3-4)
|
||||
|
||||
### 2.1 Test Organization Template
|
||||
|
||||
```typescript
|
||||
// tests/unit/services/[service-name].test.ts
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
|
||||
import { ServiceName } from '@/services/service-name';
|
||||
|
||||
describe('ServiceName', () => {
|
||||
let service: ServiceName;
|
||||
let mockDependency: any;
|
||||
|
||||
beforeEach(() => {
|
||||
// Setup mocks
|
||||
mockDependency = {
|
||||
method: vi.fn()
|
||||
};
|
||||
|
||||
// Create service instance
|
||||
service = new ServiceName(mockDependency);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('methodName', () => {
|
||||
it('should handle happy path', async () => {
|
||||
// Arrange
|
||||
const input = { /* test data */ };
|
||||
mockDependency.method.mockResolvedValue({ /* mock response */ });
|
||||
|
||||
// Act
|
||||
const result = await service.methodName(input);
|
||||
|
||||
// Assert
|
||||
expect(result).toEqual(/* expected output */);
|
||||
expect(mockDependency.method).toHaveBeenCalledWith(/* expected args */);
|
||||
});
|
||||
|
||||
it('should handle errors gracefully', async () => {
|
||||
// Arrange
|
||||
mockDependency.method.mockRejectedValue(new Error('Test error'));
|
||||
|
||||
// Act & Assert
|
||||
await expect(service.methodName({})).rejects.toThrow('Expected error message');
|
||||
});
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### 2.2 Mock Strategies by Layer
|
||||
|
||||
#### Database Layer
|
||||
```typescript
|
||||
// tests/unit/database/node-repository.test.ts
|
||||
import { vi } from 'vitest';
|
||||
|
||||
vi.mock('better-sqlite3', () => ({
|
||||
default: vi.fn(() => ({
|
||||
prepare: vi.fn(() => ({
|
||||
all: vi.fn(() => mockData),
|
||||
get: vi.fn((id) => mockData.find(d => d.id === id)),
|
||||
run: vi.fn(() => ({ changes: 1 }))
|
||||
})),
|
||||
exec: vi.fn(),
|
||||
close: vi.fn()
|
||||
}))
|
||||
}));
|
||||
```
|
||||
|
||||
#### External APIs
|
||||
```typescript
|
||||
// tests/unit/services/__mocks__/axios.ts
|
||||
export default {
|
||||
create: vi.fn(() => ({
|
||||
get: vi.fn(() => Promise.resolve({ data: {} })),
|
||||
post: vi.fn(() => Promise.resolve({ data: { id: '123' } })),
|
||||
put: vi.fn(() => Promise.resolve({ data: {} })),
|
||||
delete: vi.fn(() => Promise.resolve({ data: {} }))
|
||||
}))
|
||||
};
|
||||
```
|
||||
|
||||
#### File System
|
||||
```typescript
|
||||
// Use memfs for file system mocking
|
||||
import { vol } from 'memfs';
|
||||
|
||||
vi.mock('fs', () => vol);
|
||||
|
||||
beforeEach(() => {
|
||||
vol.reset();
|
||||
vol.fromJSON({
|
||||
'/test/file.json': JSON.stringify({ test: 'data' })
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### 2.3 Critical Path Tests
|
||||
|
||||
```typescript
|
||||
// Priority 1: Node Loading and Parsing
|
||||
// tests/unit/loaders/node-loader.test.ts
|
||||
|
||||
// Priority 2: Configuration Validation
|
||||
// tests/unit/services/config-validator.test.ts
|
||||
|
||||
// Priority 3: MCP Tools
|
||||
// tests/unit/mcp/tools.test.ts
|
||||
|
||||
// Priority 4: Database Operations
|
||||
// tests/unit/database/node-repository.test.ts
|
||||
|
||||
// Priority 5: Workflow Validation
|
||||
// tests/unit/services/workflow-validator.test.ts
|
||||
```
|
||||
|
||||
## Phase 3: Integration Tests (Week 5-6)
|
||||
|
||||
### 3.1 Test Container Setup
|
||||
|
||||
```typescript
|
||||
// tests/setup/test-containers.ts
|
||||
import { GenericContainer, StartedTestContainer } from 'testcontainers';
|
||||
|
||||
export class N8nTestContainer {
|
||||
private container: StartedTestContainer;
|
||||
|
||||
async start() {
|
||||
this.container = await new GenericContainer('n8nio/n8n:latest')
|
||||
.withExposedPorts(5678)
|
||||
.withEnv('N8N_BASIC_AUTH_ACTIVE', 'false')
|
||||
.withEnv('N8N_ENCRYPTION_KEY', 'test-key')
|
||||
.start();
|
||||
|
||||
return {
|
||||
url: `http://localhost:${this.container.getMappedPort(5678)}`,
|
||||
stop: () => this.container.stop()
|
||||
};
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3.2 Integration Test Pattern
|
||||
|
||||
```typescript
|
||||
// tests/integration/n8n-api/workflow-crud.test.ts
|
||||
import { N8nTestContainer } from '@tests/setup/test-containers';
|
||||
import { N8nAPIClient } from '@/services/n8n-api-client';
|
||||
|
||||
describe('n8n API Integration', () => {
|
||||
let container: any;
|
||||
let apiClient: N8nAPIClient;
|
||||
|
||||
beforeAll(async () => {
|
||||
container = await new N8nTestContainer().start();
|
||||
apiClient = new N8nAPIClient(container.url);
|
||||
}, 30000);
|
||||
|
||||
afterAll(async () => {
|
||||
await container.stop();
|
||||
});
|
||||
|
||||
it('should create and retrieve workflow', async () => {
|
||||
// Create workflow
|
||||
const workflow = createTestWorkflow();
|
||||
const created = await apiClient.createWorkflow(workflow);
|
||||
|
||||
expect(created.id).toBeDefined();
|
||||
|
||||
// Retrieve workflow
|
||||
const retrieved = await apiClient.getWorkflow(created.id);
|
||||
expect(retrieved.name).toBe(workflow.name);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Phase 4: E2E & Performance (Week 7-8)
|
||||
|
||||
### 4.1 E2E Test Setup
|
||||
|
||||
```typescript
|
||||
// tests/e2e/workflows/complete-workflow.test.ts
|
||||
import { MCPClient } from '@tests/utils/mcp-client';
|
||||
import { N8nTestContainer } from '@tests/setup/test-containers';
|
||||
|
||||
describe('Complete Workflow E2E', () => {
|
||||
let mcpServer: any;
|
||||
let n8nContainer: any;
|
||||
let mcpClient: MCPClient;
|
||||
|
||||
beforeAll(async () => {
|
||||
// Start n8n
|
||||
n8nContainer = await new N8nTestContainer().start();
|
||||
|
||||
// Start MCP server
|
||||
mcpServer = await startMCPServer({
|
||||
n8nUrl: n8nContainer.url
|
||||
});
|
||||
|
||||
// Create MCP client
|
||||
mcpClient = new MCPClient(mcpServer.url);
|
||||
}, 60000);
|
||||
|
||||
it('should execute complete workflow creation flow', async () => {
|
||||
// 1. Search for nodes
|
||||
const searchResult = await mcpClient.call('search_nodes', {
|
||||
query: 'webhook http slack'
|
||||
});
|
||||
|
||||
// 2. Get node details
|
||||
const webhookInfo = await mcpClient.call('get_node_info', {
|
||||
nodeType: 'nodes-base.webhook'
|
||||
});
|
||||
|
||||
// 3. Create workflow
|
||||
const workflow = new WorkflowBuilder('E2E Test')
|
||||
.addWebhookNode()
|
||||
.addHttpRequestNode()
|
||||
.addSlackNode()
|
||||
.connectSequentially()
|
||||
.build();
|
||||
|
||||
// 4. Validate workflow
|
||||
const validation = await mcpClient.call('validate_workflow', {
|
||||
workflow
|
||||
});
|
||||
|
||||
expect(validation.isValid).toBe(true);
|
||||
|
||||
// 5. Deploy to n8n
|
||||
const deployed = await mcpClient.call('n8n_create_workflow', {
|
||||
...workflow
|
||||
});
|
||||
|
||||
expect(deployed.id).toBeDefined();
|
||||
expect(deployed.active).toBe(false);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### 4.2 Performance Benchmarks
|
||||
|
||||
```typescript
|
||||
// vitest.benchmark.config.ts
|
||||
export default {
|
||||
test: {
|
||||
benchmark: {
|
||||
// Output benchmark results
|
||||
outputFile: './benchmark-results.json',
|
||||
|
||||
// Compare with baseline
|
||||
compare: './benchmark-baseline.json',
|
||||
|
||||
// Fail if performance degrades by more than 10%
|
||||
threshold: {
|
||||
p95: 1.1, // 110% of baseline
|
||||
p99: 1.2 // 120% of baseline
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
## Testing Best Practices
|
||||
|
||||
### 1. Test Naming Convention
|
||||
```typescript
|
||||
// Format: should [expected behavior] when [condition]
|
||||
it('should return user data when valid ID is provided')
|
||||
it('should throw ValidationError when email is invalid')
|
||||
it('should retry 3 times when network fails')
|
||||
```
|
||||
|
||||
### 2. Test Data Builders
|
||||
```typescript
|
||||
// Use builders for complex test data
|
||||
const user = new UserBuilder()
|
||||
.withEmail('test@example.com')
|
||||
.withRole('admin')
|
||||
.build();
|
||||
```
|
||||
|
||||
### 3. Custom Matchers
|
||||
```typescript
|
||||
// tests/utils/matchers.ts
|
||||
export const toBeValidNode = (received: any) => {
|
||||
const pass =
|
||||
received.type &&
|
||||
received.displayName &&
|
||||
received.properties &&
|
||||
Array.isArray(received.properties);
|
||||
|
||||
return {
|
||||
pass,
|
||||
message: () => `expected ${received} to be a valid node`
|
||||
};
|
||||
};
|
||||
|
||||
// Usage
|
||||
expect(node).toBeValidNode();
|
||||
```
|
||||
|
||||
### 4. Snapshot Testing
|
||||
```typescript
|
||||
// For complex structures
|
||||
it('should generate correct node schema', () => {
|
||||
const schema = generateNodeSchema(node);
|
||||
expect(schema).toMatchSnapshot();
|
||||
});
|
||||
```
|
||||
|
||||
### 5. Test Isolation
|
||||
```typescript
|
||||
// Always clean up after tests
|
||||
afterEach(async () => {
|
||||
await cleanup();
|
||||
vi.clearAllMocks();
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
```
|
||||
|
||||
## Coverage Goals by Module
|
||||
|
||||
| Module | Target | Priority | Notes |
|
||||
|--------|--------|----------|-------|
|
||||
| services/config-validator | 95% | High | Critical for reliability |
|
||||
| services/workflow-validator | 90% | High | Core functionality |
|
||||
| mcp/tools | 90% | High | User-facing API |
|
||||
| database/node-repository | 85% | Medium | Well-tested DB layer |
|
||||
| loaders/node-loader | 85% | Medium | External dependencies |
|
||||
| parsers/* | 90% | High | Data transformation |
|
||||
| utils/* | 80% | Low | Helper functions |
|
||||
| scripts/* | 50% | Low | One-time scripts |
|
||||
|
||||
## Continuous Improvement
|
||||
|
||||
1. **Weekly Reviews**: Review test coverage and identify gaps
|
||||
2. **Performance Baselines**: Update benchmarks monthly
|
||||
3. **Flaky Test Detection**: Monitor and fix within 48 hours
|
||||
4. **Test Documentation**: Keep examples updated
|
||||
5. **Developer Training**: Pair programming on tests
|
||||
|
||||
## Success Metrics
|
||||
|
||||
- [ ] All tests pass in CI (0 failures)
|
||||
- [ ] Coverage > 80% overall
|
||||
- [ ] No flaky tests
|
||||
- [ ] CI runs < 5 minutes
|
||||
- [ ] Performance benchmarks stable
|
||||
- [ ] Zero production bugs from tested code
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,66 +0,0 @@
|
||||
# Token Efficiency Improvements Summary
|
||||
|
||||
## Overview
|
||||
Made all MCP tool descriptions concise and token-efficient while preserving essential information.
|
||||
|
||||
## Key Improvements
|
||||
|
||||
### Before vs After Examples
|
||||
|
||||
1. **search_nodes**
|
||||
- Before: ~350 chars with verbose explanation
|
||||
- After: 165 chars
|
||||
- `Search nodes by keywords. Modes: OR (any word), AND (all words), FUZZY (typos OK). Primary nodes ranked first. Examples: "webhook"→Webhook, "http call"→HTTP Request.`
|
||||
|
||||
2. **get_node_info**
|
||||
- Before: ~450 chars with warnings about size
|
||||
- After: 174 chars
|
||||
- `Get FULL node schema (100KB+). TIP: Use get_node_essentials first! Returns all properties/operations/credentials. Prefix required: "nodes-base.httpRequest" not "httpRequest".`
|
||||
|
||||
3. **validate_node_minimal**
|
||||
- Before: ~350 chars explaining what it doesn't do
|
||||
- After: 102 chars
|
||||
- `Fast check for missing required fields only. No warnings/suggestions. Returns: list of missing fields.`
|
||||
|
||||
4. **get_property_dependencies**
|
||||
- Before: ~400 chars with full example
|
||||
- After: 131 chars
|
||||
- `Shows property dependencies and visibility rules. Example: sendBody=true reveals body fields. Test visibility with optional config.`
|
||||
|
||||
## Statistics
|
||||
|
||||
### Documentation Tools (22 tools)
|
||||
- Average description length: **129 characters**
|
||||
- Total characters: 2,836
|
||||
- Tools over 200 chars: 1 (list_nodes at 204)
|
||||
|
||||
### Management Tools (17 tools)
|
||||
- Average description length: **93 characters**
|
||||
- Total characters: 1,578
|
||||
- Tools over 200 chars: 1 (n8n_update_partial_workflow at 284)
|
||||
|
||||
## Strategy Used
|
||||
|
||||
1. **Remove redundancy**: Eliminated repeated information available in parameter descriptions
|
||||
2. **Use abbreviations**: "vs" instead of "versus", "&" instead of "and" where appropriate
|
||||
3. **Compact examples**: `"webhook"→Webhook` instead of verbose explanations
|
||||
4. **Direct language**: "Fast check" instead of "Quick validation that only checks"
|
||||
5. **Move details to documentation**: Complex tools reference `tools_documentation()` for full details
|
||||
6. **Essential info only**: Focus on what the tool does, not how it works internally
|
||||
|
||||
## Special Cases
|
||||
|
||||
### n8n_update_partial_workflow
|
||||
This tool's description is necessarily longer (284 chars) because:
|
||||
- Lists all 13 operation types
|
||||
- Critical for users to know available operations
|
||||
- Directs to full documentation for details
|
||||
|
||||
### Complex Documentation Preserved
|
||||
For tools like `n8n_update_partial_workflow`, detailed documentation was moved to `tools-documentation.ts` rather than deleted, ensuring users can still access comprehensive information when needed.
|
||||
|
||||
## Impact
|
||||
- **Token savings**: ~65-70% reduction in description tokens
|
||||
- **Faster AI responses**: Less context used for tool descriptions
|
||||
- **Better UX**: Clearer, more scannable tool list
|
||||
- **Maintained functionality**: All essential information preserved
|
||||
@@ -1,118 +0,0 @@
|
||||
# Transactional Updates Example
|
||||
|
||||
This example demonstrates the new transactional update capabilities in v2.7.0.
|
||||
|
||||
## Before (v2.6.x and earlier)
|
||||
|
||||
Previously, you had to carefully order operations to ensure nodes existed before connecting them:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "workflow-123",
|
||||
"operations": [
|
||||
// 1. First add all nodes
|
||||
{ "type": "addNode", "node": { "name": "Process", "type": "n8n-nodes-base.set", ... }},
|
||||
{ "type": "addNode", "node": { "name": "Notify", "type": "n8n-nodes-base.slack", ... }},
|
||||
|
||||
// 2. Then add connections (would fail if done before nodes)
|
||||
{ "type": "addConnection", "source": "Webhook", "target": "Process" },
|
||||
{ "type": "addConnection", "source": "Process", "target": "Notify" }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## After (v2.7.0+)
|
||||
|
||||
Now you can write operations in any order - the engine automatically handles dependencies:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "workflow-123",
|
||||
"operations": [
|
||||
// Connections can come first!
|
||||
{ "type": "addConnection", "source": "Webhook", "target": "Process" },
|
||||
{ "type": "addConnection", "source": "Process", "target": "Notify" },
|
||||
|
||||
// Nodes added later - still works!
|
||||
{ "type": "addNode", "node": { "name": "Process", "type": "n8n-nodes-base.set", "position": [400, 300] }},
|
||||
{ "type": "addNode", "node": { "name": "Notify", "type": "n8n-nodes-base.slack", "position": [600, 300] }}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
1. **Two-Pass Processing**:
|
||||
- Pass 1: All node operations (add, remove, update, move, enable, disable)
|
||||
- Pass 2: All other operations (connections, settings, metadata)
|
||||
|
||||
2. **Operation Limit**: Maximum 5 operations per request keeps complexity manageable
|
||||
|
||||
3. **Atomic Updates**: All operations succeed or all fail - no partial updates
|
||||
|
||||
## Benefits for AI Agents
|
||||
|
||||
- **Intuitive**: Write operations in the order that makes sense logically
|
||||
- **Reliable**: No need to track dependencies manually
|
||||
- **Simple**: Focus on what to change, not how to order changes
|
||||
- **Safe**: Built-in limits prevent overly complex operations
|
||||
|
||||
## Complete Example
|
||||
|
||||
Here's a real-world example of adding error handling to a workflow:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "workflow-123",
|
||||
"operations": [
|
||||
// Define the flow first (makes logical sense)
|
||||
{
|
||||
"type": "removeConnection",
|
||||
"source": "HTTP Request",
|
||||
"target": "Save to DB"
|
||||
},
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "HTTP Request",
|
||||
"target": "Error Handler"
|
||||
},
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "Error Handler",
|
||||
"target": "Send Alert"
|
||||
},
|
||||
|
||||
// Then add the nodes
|
||||
{
|
||||
"type": "addNode",
|
||||
"node": {
|
||||
"name": "Error Handler",
|
||||
"type": "n8n-nodes-base.if",
|
||||
"position": [500, 400],
|
||||
"parameters": {
|
||||
"conditions": {
|
||||
"boolean": [{
|
||||
"value1": "={{$json.error}}",
|
||||
"value2": true
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "addNode",
|
||||
"node": {
|
||||
"name": "Send Alert",
|
||||
"type": "n8n-nodes-base.emailSend",
|
||||
"position": [700, 400],
|
||||
"parameters": {
|
||||
"to": "alerts@company.com",
|
||||
"subject": "Workflow Error Alert"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
All operations will be processed correctly, even though connections reference nodes that don't exist yet!
|
||||
@@ -1,92 +0,0 @@
|
||||
# Validation Improvements v2.4.2
|
||||
|
||||
Based on AI agent feedback, we've implemented several improvements to the `validate_node_operation` tool:
|
||||
|
||||
## 🎯 Issues Addressed
|
||||
|
||||
### 1. **@version Warnings** ✅ FIXED
|
||||
- **Issue**: Showed confusing warnings about `@version` property not being used
|
||||
- **Fix**: Filter out internal properties starting with `@` or `_`
|
||||
- **Result**: No more false warnings about internal n8n properties
|
||||
|
||||
### 2. **Duplicate Errors** ✅ FIXED
|
||||
- **Issue**: Same error shown multiple times (e.g., missing `ts` field)
|
||||
- **Fix**: Implemented deduplication that keeps the most specific error message
|
||||
- **Result**: Each error shown only once with the best description
|
||||
|
||||
### 3. **Basic Code Validation** ✅ ADDED
|
||||
- **Issue**: No syntax validation for Code node
|
||||
- **Fix**: Added basic syntax checks for JavaScript and Python
|
||||
- **Features**:
|
||||
- Unbalanced braces/parentheses detection
|
||||
- Python indentation consistency check
|
||||
- n8n-specific patterns (return statement, input access)
|
||||
- Security warnings (eval/exec usage)
|
||||
|
||||
## 📊 Before & After
|
||||
|
||||
### Before (v2.4.1):
|
||||
```json
|
||||
{
|
||||
"errors": [
|
||||
{ "property": "ts", "message": "Required property 'Message Timestamp' is missing" },
|
||||
{ "property": "ts", "message": "Message timestamp (ts) is required to update a message" }
|
||||
],
|
||||
"warnings": [
|
||||
{ "property": "@version", "message": "Property '@version' is configured but won't be used" }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### After (v2.4.2):
|
||||
```json
|
||||
{
|
||||
"errors": [
|
||||
{ "property": "ts", "message": "Message timestamp (ts) is required to update a message",
|
||||
"fix": "Provide the timestamp of the message to update" }
|
||||
],
|
||||
"warnings": [] // No @version warning
|
||||
}
|
||||
```
|
||||
|
||||
## 🆕 Code Validation Examples
|
||||
|
||||
### JavaScript Syntax Check:
|
||||
```javascript
|
||||
// Missing closing brace
|
||||
if (true) {
|
||||
return items;
|
||||
// Error: "Unbalanced braces detected"
|
||||
```
|
||||
|
||||
### Python Indentation Check:
|
||||
```python
|
||||
def process():
|
||||
if True: # Tab
|
||||
return items # Spaces
|
||||
# Error: "Mixed tabs and spaces in indentation"
|
||||
```
|
||||
|
||||
### n8n Pattern Check:
|
||||
```javascript
|
||||
const result = items.map(item => item.json);
|
||||
// Warning: "No return statement found"
|
||||
// Suggestion: "Add: return items;"
|
||||
```
|
||||
|
||||
## 🚀 Impact
|
||||
|
||||
- **Cleaner validation results** - No more noise from internal properties
|
||||
- **Clearer error messages** - Each issue reported once with best description
|
||||
- **Better code quality** - Basic syntax validation catches common mistakes
|
||||
- **n8n best practices** - Warns about missing return statements and input handling
|
||||
|
||||
## 📝 Summary
|
||||
|
||||
The `validate_node_operation` tool is now even more helpful for AI agents and developers:
|
||||
- 95% reduction in false positives (operation-aware)
|
||||
- No duplicate or confusing warnings
|
||||
- Basic code validation for common syntax errors
|
||||
- n8n-specific pattern checking
|
||||
|
||||
**Rating improved from 9/10 to 9.5/10!** 🎉
|
||||
@@ -116,17 +116,46 @@ The `n8n_update_partial_workflow` tool allows you to make targeted changes to wo
|
||||
}
|
||||
```
|
||||
|
||||
#### Update Connection (Change routing)
|
||||
#### Rewire Connection
|
||||
```json
|
||||
{
|
||||
"type": "updateConnection",
|
||||
"type": "rewireConnection",
|
||||
"source": "Webhook",
|
||||
"from": "Old Handler",
|
||||
"to": "New Handler",
|
||||
"description": "Rewire connection to new handler"
|
||||
}
|
||||
```
|
||||
|
||||
#### Smart Parameters for IF Nodes
|
||||
```json
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "IF",
|
||||
"target": "Send Email",
|
||||
"changes": {
|
||||
"sourceOutput": "false", // Change from 'true' to 'false' output
|
||||
"targetInput": "main"
|
||||
},
|
||||
"description": "Route failed conditions to email"
|
||||
"target": "Success Handler",
|
||||
"branch": "true", // Semantic parameter instead of sourceIndex
|
||||
"description": "Route true branch to success handler"
|
||||
}
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "IF",
|
||||
"target": "Error Handler",
|
||||
"branch": "false", // Routes to false branch (sourceIndex=1)
|
||||
"description": "Route false branch to error handler"
|
||||
}
|
||||
```
|
||||
|
||||
#### Smart Parameters for Switch Nodes
|
||||
```json
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "Switch",
|
||||
"target": "Handler A",
|
||||
"case": 0, // First output
|
||||
"description": "Route case 0 to Handler A"
|
||||
}
|
||||
```
|
||||
|
||||
@@ -577,13 +606,13 @@ The tool validates all operations before applying any changes. Common errors inc
|
||||
|
||||
Always check the response for validation errors and adjust your operations accordingly.
|
||||
|
||||
## Transactional Updates (v2.7.0+)
|
||||
## Transactional Updates
|
||||
|
||||
The diff engine now supports transactional updates using a **two-pass processing** approach:
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Operation Limit**: Maximum 5 operations per request to ensure reliability
|
||||
1. **No Operation Limit**: Process unlimited operations in a single request
|
||||
2. **Two-Pass Processing**:
|
||||
- **Pass 1**: All node operations (add, remove, update, move, enable, disable)
|
||||
- **Pass 2**: All other operations (connections, settings, metadata)
|
||||
@@ -633,9 +662,9 @@ This allows you to add nodes and connect them in the same request:
|
||||
### Benefits
|
||||
|
||||
- **Order Independence**: You don't need to worry about operation order
|
||||
- **Atomic Updates**: All operations succeed or all fail
|
||||
- **Atomic Updates**: All operations succeed or all fail (unless continueOnError is enabled)
|
||||
- **Intuitive Usage**: Add complex workflow structures in one call
|
||||
- **Clear Limits**: 5 operations max keeps things simple and reliable
|
||||
- **No Hard Limits**: Process unlimited operations efficiently
|
||||
|
||||
### Example: Complete Workflow Addition
|
||||
|
||||
@@ -694,4 +723,4 @@ This allows you to add nodes and connect them in the same request:
|
||||
}
|
||||
```
|
||||
|
||||
All 5 operations will be processed correctly regardless of order!
|
||||
All operations will be processed correctly regardless of order!
|
||||
0
n8n-nodes.db
Normal file
0
n8n-nodes.db
Normal file
7640
package-lock.json
generated
7640
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
26
package.json
26
package.json
@@ -1,8 +1,16 @@
|
||||
{
|
||||
"name": "n8n-mcp",
|
||||
"version": "2.14.3",
|
||||
"version": "2.26.5",
|
||||
"description": "Integration between n8n workflow automation and Model Context Protocol (MCP)",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./dist/index.d.ts",
|
||||
"require": "./dist/index.js",
|
||||
"import": "./dist/index.js"
|
||||
}
|
||||
},
|
||||
"bin": {
|
||||
"n8n-mcp": "./dist/mcp/index.js"
|
||||
},
|
||||
@@ -31,6 +39,8 @@
|
||||
"test:watch": "vitest watch",
|
||||
"test:unit": "vitest run tests/unit",
|
||||
"test:integration": "vitest run --config vitest.config.integration.ts",
|
||||
"test:integration:n8n": "vitest run tests/integration/n8n-api",
|
||||
"test:cleanup:orphans": "tsx tests/integration/n8n-api/scripts/cleanup-orphans.ts",
|
||||
"test:e2e": "vitest run tests/e2e",
|
||||
"lint": "tsc --noEmit",
|
||||
"typecheck": "tsc --noEmit",
|
||||
@@ -38,6 +48,7 @@
|
||||
"update:n8n:check": "node scripts/update-n8n-deps.js --dry-run",
|
||||
"fetch:templates": "node dist/scripts/fetch-templates.js",
|
||||
"fetch:templates:update": "node dist/scripts/fetch-templates.js --update",
|
||||
"fetch:templates:extract": "node dist/scripts/fetch-templates.js --extract-only",
|
||||
"fetch:templates:robust": "node dist/scripts/fetch-templates-robust.js",
|
||||
"prebuild:fts5": "npx tsx scripts/prebuild-fts5.ts",
|
||||
"test:templates": "node dist/scripts/test-templates.js",
|
||||
@@ -55,6 +66,7 @@
|
||||
"test:workflow-diff": "node dist/scripts/test-workflow-diff.js",
|
||||
"test:transactional-diff": "node dist/scripts/test-transactional-diff.js",
|
||||
"test:tools-documentation": "node dist/scripts/test-tools-documentation.js",
|
||||
"test:structure-validation": "npx tsx scripts/test-structure-validation.ts",
|
||||
"test:url-configuration": "npm run build && ts-node scripts/test-url-configuration.ts",
|
||||
"test:search-improvements": "node dist/scripts/test-search-improvements.js",
|
||||
"test:fts5-search": "node dist/scripts/test-fts5-search.js",
|
||||
@@ -128,17 +140,19 @@
|
||||
"vitest": "^3.2.4"
|
||||
},
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.13.2",
|
||||
"@n8n/n8n-nodes-langchain": "^1.112.2",
|
||||
"@modelcontextprotocol/sdk": "^1.20.1",
|
||||
"@n8n/n8n-nodes-langchain": "^1.120.1",
|
||||
"@supabase/supabase-js": "^2.57.4",
|
||||
"dotenv": "^16.5.0",
|
||||
"express": "^5.1.0",
|
||||
"express-rate-limit": "^7.1.5",
|
||||
"lru-cache": "^11.2.1",
|
||||
"n8n": "^1.113.3",
|
||||
"n8n-core": "^1.112.1",
|
||||
"n8n-workflow": "^1.110.0",
|
||||
"n8n": "^1.121.2",
|
||||
"n8n-core": "^1.120.1",
|
||||
"n8n-workflow": "^1.118.1",
|
||||
"openai": "^4.77.0",
|
||||
"sql.js": "^1.13.0",
|
||||
"tslib": "^2.6.2",
|
||||
"uuid": "^10.0.0",
|
||||
"zod": "^3.24.1"
|
||||
},
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
{
|
||||
"name": "n8n-mcp-runtime",
|
||||
"version": "2.14.0",
|
||||
"version": "2.26.4",
|
||||
"description": "n8n MCP Server Runtime Dependencies Only",
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.13.2",
|
||||
"@supabase/supabase-js": "^2.57.4",
|
||||
"express": "^5.1.0",
|
||||
"express-rate-limit": "^7.1.5",
|
||||
"dotenv": "^16.5.0",
|
||||
"lru-cache": "^11.2.1",
|
||||
"sql.js": "^1.13.0",
|
||||
"tslib": "^2.6.2",
|
||||
"uuid": "^10.0.0",
|
||||
"axios": "^1.7.7"
|
||||
},
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
# n8n-MCP v2.7.0 Release Notes
|
||||
|
||||
## 🎉 What's New
|
||||
|
||||
### 🔧 File Refactoring & Version Management
|
||||
- **Renamed core MCP files** to remove unnecessary suffixes for cleaner codebase:
|
||||
- `tools-update.ts` → `tools.ts`
|
||||
- `server-update.ts` → `server.ts`
|
||||
- `http-server-fixed.ts` → `http-server.ts`
|
||||
- **Fixed version management** - Now reads from package.json as single source of truth (fixes #5)
|
||||
- **Updated imports** across 21+ files to use the new file names
|
||||
|
||||
### 🔍 New Diagnostic Tool
|
||||
- **Added `n8n_diagnostic` tool** - Helps troubleshoot why n8n management tools might not be appearing
|
||||
- Shows environment variable status, API connectivity, and tool availability
|
||||
- Provides step-by-step troubleshooting guidance
|
||||
- Includes verbose mode for additional debug information
|
||||
|
||||
### 🧹 Code Cleanup
|
||||
- Removed legacy HTTP server implementation with known issues
|
||||
- Removed unused legacy API client
|
||||
- Added version utility for consistent version handling
|
||||
- Added script to sync runtime package version
|
||||
|
||||
## 📦 Installation
|
||||
|
||||
### Docker (Recommended)
|
||||
```bash
|
||||
docker pull ghcr.io/czlonkowski/n8n-mcp:2.7.0
|
||||
```
|
||||
|
||||
### Claude Desktop
|
||||
Update your configuration to use the latest version:
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"n8n-mcp": {
|
||||
"command": "docker",
|
||||
"args": ["run", "-i", "--rm", "ghcr.io/czlonkowski/n8n-mcp:2.7.0"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 🐛 Bug Fixes
|
||||
- Fixed version mismatch where version was hardcoded as 2.4.1 instead of reading from package.json
|
||||
- Improved error messages for better debugging
|
||||
|
||||
## 📚 Documentation Updates
|
||||
- Condensed version history in CLAUDE.md
|
||||
- Updated documentation structure in README.md
|
||||
- Removed outdated documentation files
|
||||
- Added n8n_diagnostic tool to documentation
|
||||
|
||||
## 🙏 Acknowledgments
|
||||
Thanks to all contributors and users who reported issues!
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: https://github.com/czlonkowski/n8n-mcp/blob/main/CHANGELOG.md
|
||||
78
scripts/audit-schema-coverage.ts
Normal file
78
scripts/audit-schema-coverage.ts
Normal file
@@ -0,0 +1,78 @@
|
||||
/**
|
||||
* Database Schema Coverage Audit Script
|
||||
*
|
||||
* Audits the database to determine how many nodes have complete schema information
|
||||
* for resourceLocator mode validation. This helps assess the coverage of our
|
||||
* schema-driven validation approach.
|
||||
*/
|
||||
|
||||
import Database from 'better-sqlite3';
|
||||
import path from 'path';
|
||||
|
||||
const dbPath = path.join(__dirname, '../data/nodes.db');
|
||||
const db = new Database(dbPath, { readonly: true });
|
||||
|
||||
console.log('=== Schema Coverage Audit ===\n');
|
||||
|
||||
// Query 1: How many nodes have resourceLocator properties?
|
||||
const totalResourceLocator = db.prepare(`
|
||||
SELECT COUNT(*) as count FROM nodes
|
||||
WHERE properties_schema LIKE '%resourceLocator%'
|
||||
`).get() as { count: number };
|
||||
|
||||
console.log(`Nodes with resourceLocator properties: ${totalResourceLocator.count}`);
|
||||
|
||||
// Query 2: Of those, how many have modes defined?
|
||||
const withModes = db.prepare(`
|
||||
SELECT COUNT(*) as count FROM nodes
|
||||
WHERE properties_schema LIKE '%resourceLocator%'
|
||||
AND properties_schema LIKE '%modes%'
|
||||
`).get() as { count: number };
|
||||
|
||||
console.log(`Nodes with modes defined: ${withModes.count}`);
|
||||
|
||||
// Query 3: Which nodes have resourceLocator but NO modes?
|
||||
const withoutModes = db.prepare(`
|
||||
SELECT node_type, display_name
|
||||
FROM nodes
|
||||
WHERE properties_schema LIKE '%resourceLocator%'
|
||||
AND properties_schema NOT LIKE '%modes%'
|
||||
LIMIT 10
|
||||
`).all() as Array<{ node_type: string; display_name: string }>;
|
||||
|
||||
console.log(`\nSample nodes WITHOUT modes (showing 10):`);
|
||||
withoutModes.forEach(node => {
|
||||
console.log(` - ${node.display_name} (${node.node_type})`);
|
||||
});
|
||||
|
||||
// Calculate coverage percentage
|
||||
const coverage = totalResourceLocator.count > 0
|
||||
? (withModes.count / totalResourceLocator.count) * 100
|
||||
: 0;
|
||||
|
||||
console.log(`\nSchema coverage: ${coverage.toFixed(1)}% of resourceLocator nodes have modes defined`);
|
||||
|
||||
// Query 4: Get some examples of nodes WITH modes for verification
|
||||
console.log('\nSample nodes WITH modes (showing 5):');
|
||||
const withModesExamples = db.prepare(`
|
||||
SELECT node_type, display_name
|
||||
FROM nodes
|
||||
WHERE properties_schema LIKE '%resourceLocator%'
|
||||
AND properties_schema LIKE '%modes%'
|
||||
LIMIT 5
|
||||
`).all() as Array<{ node_type: string; display_name: string }>;
|
||||
|
||||
withModesExamples.forEach(node => {
|
||||
console.log(` - ${node.display_name} (${node.node_type})`);
|
||||
});
|
||||
|
||||
// Summary
|
||||
console.log('\n=== Summary ===');
|
||||
console.log(`Total nodes in database: ${db.prepare('SELECT COUNT(*) as count FROM nodes').get() as any as { count: number }.count}`);
|
||||
console.log(`Nodes with resourceLocator: ${totalResourceLocator.count}`);
|
||||
console.log(`Nodes with complete mode schemas: ${withModes.count}`);
|
||||
console.log(`Nodes without mode schemas: ${totalResourceLocator.count - withModes.count}`);
|
||||
console.log(`\nImplication: Schema-driven validation will apply to ${withModes.count} nodes.`);
|
||||
console.log(`For the remaining ${totalResourceLocator.count - withModes.count} nodes, validation will be skipped (graceful degradation).`);
|
||||
|
||||
db.close();
|
||||
192
scripts/backfill-mutation-hashes.ts
Normal file
192
scripts/backfill-mutation-hashes.ts
Normal file
@@ -0,0 +1,192 @@
|
||||
/**
|
||||
* Backfill script to populate structural hashes for existing workflow mutations
|
||||
*
|
||||
* Purpose: Generates workflow_structure_hash_before and workflow_structure_hash_after
|
||||
* for all existing mutations to enable cross-referencing with telemetry_workflows
|
||||
*
|
||||
* Usage: npx tsx scripts/backfill-mutation-hashes.ts
|
||||
*
|
||||
* Conceived by Romuald Członkowski - https://www.aiadvisors.pl/en
|
||||
*/
|
||||
|
||||
import { WorkflowSanitizer } from '../src/telemetry/workflow-sanitizer.js';
|
||||
import { createClient } from '@supabase/supabase-js';
|
||||
|
||||
// Initialize Supabase client
|
||||
const supabaseUrl = process.env.SUPABASE_URL || '';
|
||||
const supabaseKey = process.env.SUPABASE_SERVICE_ROLE_KEY || '';
|
||||
|
||||
if (!supabaseUrl || !supabaseKey) {
|
||||
console.error('Error: SUPABASE_URL and SUPABASE_SERVICE_ROLE_KEY environment variables are required');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const supabase = createClient(supabaseUrl, supabaseKey);
|
||||
|
||||
interface MutationRecord {
|
||||
id: string;
|
||||
workflow_before: any;
|
||||
workflow_after: any;
|
||||
workflow_structure_hash_before: string | null;
|
||||
workflow_structure_hash_after: string | null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch all mutations that need structural hashes
|
||||
*/
|
||||
async function fetchMutationsToBackfill(): Promise<MutationRecord[]> {
|
||||
console.log('Fetching mutations without structural hashes...');
|
||||
|
||||
const { data, error } = await supabase
|
||||
.from('workflow_mutations')
|
||||
.select('id, workflow_before, workflow_after, workflow_structure_hash_before, workflow_structure_hash_after')
|
||||
.is('workflow_structure_hash_before', null);
|
||||
|
||||
if (error) {
|
||||
throw new Error(`Failed to fetch mutations: ${error.message}`);
|
||||
}
|
||||
|
||||
console.log(`Found ${data?.length || 0} mutations to backfill`);
|
||||
return data || [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate structural hash for a workflow
|
||||
*/
|
||||
function generateStructuralHash(workflow: any): string {
|
||||
try {
|
||||
return WorkflowSanitizer.generateWorkflowHash(workflow);
|
||||
} catch (error) {
|
||||
console.error('Error generating hash:', error);
|
||||
return '';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update a single mutation with structural hashes
|
||||
*/
|
||||
async function updateMutation(id: string, structureHashBefore: string, structureHashAfter: string): Promise<boolean> {
|
||||
const { error } = await supabase
|
||||
.from('workflow_mutations')
|
||||
.update({
|
||||
workflow_structure_hash_before: structureHashBefore,
|
||||
workflow_structure_hash_after: structureHashAfter,
|
||||
})
|
||||
.eq('id', id);
|
||||
|
||||
if (error) {
|
||||
console.error(`Failed to update mutation ${id}:`, error.message);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Process mutations in batches
|
||||
*/
|
||||
async function backfillMutations() {
|
||||
const startTime = Date.now();
|
||||
console.log('Starting backfill process...\n');
|
||||
|
||||
// Fetch mutations
|
||||
const mutations = await fetchMutationsToBackfill();
|
||||
|
||||
if (mutations.length === 0) {
|
||||
console.log('No mutations need backfilling. All done!');
|
||||
return;
|
||||
}
|
||||
|
||||
let processedCount = 0;
|
||||
let successCount = 0;
|
||||
let errorCount = 0;
|
||||
const errors: Array<{ id: string; error: string }> = [];
|
||||
|
||||
// Process each mutation
|
||||
for (const mutation of mutations) {
|
||||
try {
|
||||
// Generate structural hashes
|
||||
const structureHashBefore = generateStructuralHash(mutation.workflow_before);
|
||||
const structureHashAfter = generateStructuralHash(mutation.workflow_after);
|
||||
|
||||
if (!structureHashBefore || !structureHashAfter) {
|
||||
console.warn(`Skipping mutation ${mutation.id}: Failed to generate hashes`);
|
||||
errors.push({ id: mutation.id, error: 'Failed to generate hashes' });
|
||||
errorCount++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Update database
|
||||
const success = await updateMutation(mutation.id, structureHashBefore, structureHashAfter);
|
||||
|
||||
if (success) {
|
||||
successCount++;
|
||||
} else {
|
||||
errorCount++;
|
||||
errors.push({ id: mutation.id, error: 'Database update failed' });
|
||||
}
|
||||
|
||||
processedCount++;
|
||||
|
||||
// Progress update every 100 mutations
|
||||
if (processedCount % 100 === 0) {
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
const rate = (processedCount / (Date.now() - startTime) * 1000).toFixed(1);
|
||||
console.log(
|
||||
`Progress: ${processedCount}/${mutations.length} (${((processedCount / mutations.length) * 100).toFixed(1)}%) | ` +
|
||||
`Success: ${successCount} | Errors: ${errorCount} | Rate: ${rate}/s | Elapsed: ${elapsed}s`
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Unexpected error processing mutation ${mutation.id}:`, error);
|
||||
errors.push({ id: mutation.id, error: String(error) });
|
||||
errorCount++;
|
||||
}
|
||||
}
|
||||
|
||||
// Final summary
|
||||
const duration = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
console.log('\n' + '='.repeat(80));
|
||||
console.log('BACKFILL COMPLETE');
|
||||
console.log('='.repeat(80));
|
||||
console.log(`Total mutations processed: ${processedCount}`);
|
||||
console.log(`Successfully updated: ${successCount}`);
|
||||
console.log(`Errors: ${errorCount}`);
|
||||
console.log(`Duration: ${duration}s`);
|
||||
console.log(`Average rate: ${(processedCount / (Date.now() - startTime) * 1000).toFixed(1)} mutations/s`);
|
||||
|
||||
if (errors.length > 0) {
|
||||
console.log('\nErrors encountered:');
|
||||
errors.slice(0, 10).forEach(({ id, error }) => {
|
||||
console.log(` - ${id}: ${error}`);
|
||||
});
|
||||
if (errors.length > 10) {
|
||||
console.log(` ... and ${errors.length - 10} more errors`);
|
||||
}
|
||||
}
|
||||
|
||||
// Verify cross-reference matches
|
||||
console.log('\n' + '='.repeat(80));
|
||||
console.log('VERIFYING CROSS-REFERENCE MATCHES');
|
||||
console.log('='.repeat(80));
|
||||
|
||||
const { data: statsData, error: statsError } = await supabase.rpc('get_mutation_crossref_stats');
|
||||
|
||||
if (statsError) {
|
||||
console.error('Failed to get cross-reference stats:', statsError.message);
|
||||
} else if (statsData && statsData.length > 0) {
|
||||
const stats = statsData[0];
|
||||
console.log(`Total mutations: ${stats.total_mutations}`);
|
||||
console.log(`Before matches: ${stats.before_matches} (${stats.before_match_rate}%)`);
|
||||
console.log(`After matches: ${stats.after_matches} (${stats.after_match_rate}%)`);
|
||||
console.log(`Both matches: ${stats.both_matches}`);
|
||||
}
|
||||
|
||||
console.log('\nBackfill process completed successfully! ✓');
|
||||
}
|
||||
|
||||
// Run the backfill
|
||||
backfillMutations().catch((error) => {
|
||||
console.error('Fatal error during backfill:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
41
scripts/export-webhook-workflows.ts
Normal file
41
scripts/export-webhook-workflows.ts
Normal file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env tsx
|
||||
|
||||
/**
|
||||
* Export Webhook Workflow JSONs
|
||||
*
|
||||
* Generates the 4 webhook workflow JSON files needed for integration testing.
|
||||
* These workflows must be imported into n8n and activated manually.
|
||||
*/
|
||||
|
||||
import { writeFileSync, mkdirSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
import { exportAllWebhookWorkflows } from '../tests/integration/n8n-api/utils/webhook-workflows';
|
||||
|
||||
const OUTPUT_DIR = join(process.cwd(), 'workflows-for-import');
|
||||
|
||||
// Create output directory
|
||||
mkdirSync(OUTPUT_DIR, { recursive: true });
|
||||
|
||||
// Generate all workflow JSONs
|
||||
const workflows = exportAllWebhookWorkflows();
|
||||
|
||||
// Write each workflow to a separate file
|
||||
Object.entries(workflows).forEach(([method, workflow]) => {
|
||||
const filename = `webhook-${method.toLowerCase()}.json`;
|
||||
const filepath = join(OUTPUT_DIR, filename);
|
||||
|
||||
writeFileSync(filepath, JSON.stringify(workflow, null, 2), 'utf-8');
|
||||
|
||||
console.log(`✓ Generated: ${filename}`);
|
||||
});
|
||||
|
||||
console.log(`\n✓ All workflow JSONs written to: ${OUTPUT_DIR}`);
|
||||
console.log('\nNext steps:');
|
||||
console.log('1. Import each JSON file into your n8n instance');
|
||||
console.log('2. Activate each workflow in the n8n UI');
|
||||
console.log('3. Copy the webhook URLs from each workflow (open workflow → Webhook node → copy URL)');
|
||||
console.log('4. Add them to your .env file:');
|
||||
console.log(' N8N_TEST_WEBHOOK_GET_URL=https://your-n8n.com/webhook/mcp-test-get');
|
||||
console.log(' N8N_TEST_WEBHOOK_POST_URL=https://your-n8n.com/webhook/mcp-test-post');
|
||||
console.log(' N8N_TEST_WEBHOOK_PUT_URL=https://your-n8n.com/webhook/mcp-test-put');
|
||||
console.log(' N8N_TEST_WEBHOOK_DELETE_URL=https://your-n8n.com/webhook/mcp-test-delete');
|
||||
45
scripts/generate-initial-release-notes.js
Normal file
45
scripts/generate-initial-release-notes.js
Normal file
@@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Generate release notes for the initial release
|
||||
* Used by GitHub Actions when no previous tag exists
|
||||
*/
|
||||
|
||||
const { execSync } = require('child_process');
|
||||
|
||||
function generateInitialReleaseNotes(version) {
|
||||
try {
|
||||
// Get total commit count
|
||||
const commitCount = execSync('git rev-list --count HEAD', { encoding: 'utf8' }).trim();
|
||||
|
||||
// Generate release notes
|
||||
const releaseNotes = [
|
||||
'### 🎉 Initial Release',
|
||||
'',
|
||||
`This is the initial release of n8n-mcp v${version}.`,
|
||||
'',
|
||||
'---',
|
||||
'',
|
||||
'**Release Statistics:**',
|
||||
`- Commit count: ${commitCount}`,
|
||||
'- First release setup'
|
||||
];
|
||||
|
||||
return releaseNotes.join('\n');
|
||||
|
||||
} catch (error) {
|
||||
console.error(`Error generating initial release notes: ${error.message}`);
|
||||
return `Failed to generate initial release notes: ${error.message}`;
|
||||
}
|
||||
}
|
||||
|
||||
// Parse command line arguments
|
||||
const version = process.argv[2];
|
||||
|
||||
if (!version) {
|
||||
console.error('Usage: generate-initial-release-notes.js <version>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const releaseNotes = generateInitialReleaseNotes(version);
|
||||
console.log(releaseNotes);
|
||||
121
scripts/generate-release-notes.js
Normal file
121
scripts/generate-release-notes.js
Normal file
@@ -0,0 +1,121 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Generate release notes from commit messages between two tags
|
||||
* Used by GitHub Actions to create automated release notes
|
||||
*/
|
||||
|
||||
const { execSync } = require('child_process');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
function generateReleaseNotes(previousTag, currentTag) {
|
||||
try {
|
||||
console.log(`Generating release notes from ${previousTag} to ${currentTag}`);
|
||||
|
||||
// Get commits between tags
|
||||
const gitLogCommand = `git log --pretty=format:"%H|%s|%an|%ae|%ad" --date=short --no-merges ${previousTag}..${currentTag}`;
|
||||
const commitsOutput = execSync(gitLogCommand, { encoding: 'utf8' });
|
||||
|
||||
if (!commitsOutput.trim()) {
|
||||
console.log('No commits found between tags');
|
||||
return 'No changes in this release.';
|
||||
}
|
||||
|
||||
const commits = commitsOutput.trim().split('\n').map(line => {
|
||||
const [hash, subject, author, email, date] = line.split('|');
|
||||
return { hash, subject, author, email, date };
|
||||
});
|
||||
|
||||
// Categorize commits
|
||||
const categories = {
|
||||
'feat': { title: '✨ Features', commits: [] },
|
||||
'fix': { title: '🐛 Bug Fixes', commits: [] },
|
||||
'docs': { title: '📚 Documentation', commits: [] },
|
||||
'refactor': { title: '♻️ Refactoring', commits: [] },
|
||||
'test': { title: '🧪 Testing', commits: [] },
|
||||
'perf': { title: '⚡ Performance', commits: [] },
|
||||
'style': { title: '💅 Styling', commits: [] },
|
||||
'ci': { title: '🔧 CI/CD', commits: [] },
|
||||
'build': { title: '📦 Build', commits: [] },
|
||||
'chore': { title: '🔧 Maintenance', commits: [] },
|
||||
'other': { title: '📝 Other Changes', commits: [] }
|
||||
};
|
||||
|
||||
commits.forEach(commit => {
|
||||
const subject = commit.subject.toLowerCase();
|
||||
let categorized = false;
|
||||
|
||||
// Check for conventional commit prefixes
|
||||
for (const [prefix, category] of Object.entries(categories)) {
|
||||
if (prefix !== 'other' && subject.startsWith(`${prefix}:`)) {
|
||||
category.commits.push(commit);
|
||||
categorized = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If not categorized, put in other
|
||||
if (!categorized) {
|
||||
categories.other.commits.push(commit);
|
||||
}
|
||||
});
|
||||
|
||||
// Generate release notes
|
||||
const releaseNotes = [];
|
||||
|
||||
for (const [key, category] of Object.entries(categories)) {
|
||||
if (category.commits.length > 0) {
|
||||
releaseNotes.push(`### ${category.title}`);
|
||||
releaseNotes.push('');
|
||||
|
||||
category.commits.forEach(commit => {
|
||||
// Clean up the subject by removing the prefix if it exists
|
||||
let cleanSubject = commit.subject;
|
||||
const colonIndex = cleanSubject.indexOf(':');
|
||||
if (colonIndex !== -1 && cleanSubject.substring(0, colonIndex).match(/^(feat|fix|docs|refactor|test|perf|style|ci|build|chore)$/)) {
|
||||
cleanSubject = cleanSubject.substring(colonIndex + 1).trim();
|
||||
// Capitalize first letter
|
||||
cleanSubject = cleanSubject.charAt(0).toUpperCase() + cleanSubject.slice(1);
|
||||
}
|
||||
|
||||
releaseNotes.push(`- ${cleanSubject} (${commit.hash.substring(0, 7)})`);
|
||||
});
|
||||
|
||||
releaseNotes.push('');
|
||||
}
|
||||
}
|
||||
|
||||
// Add commit statistics
|
||||
const totalCommits = commits.length;
|
||||
const contributors = [...new Set(commits.map(c => c.author))];
|
||||
|
||||
releaseNotes.push('---');
|
||||
releaseNotes.push('');
|
||||
releaseNotes.push(`**Release Statistics:**`);
|
||||
releaseNotes.push(`- ${totalCommits} commit${totalCommits !== 1 ? 's' : ''}`);
|
||||
releaseNotes.push(`- ${contributors.length} contributor${contributors.length !== 1 ? 's' : ''}`);
|
||||
|
||||
if (contributors.length <= 5) {
|
||||
releaseNotes.push(`- Contributors: ${contributors.join(', ')}`);
|
||||
}
|
||||
|
||||
return releaseNotes.join('\n');
|
||||
|
||||
} catch (error) {
|
||||
console.error(`Error generating release notes: ${error.message}`);
|
||||
return `Failed to generate release notes: ${error.message}`;
|
||||
}
|
||||
}
|
||||
|
||||
// Parse command line arguments
|
||||
const previousTag = process.argv[2];
|
||||
const currentTag = process.argv[3];
|
||||
|
||||
if (!previousTag || !currentTag) {
|
||||
console.error('Usage: generate-release-notes.js <previous-tag> <current-tag>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const releaseNotes = generateReleaseNotes(previousTag, currentTag);
|
||||
console.log(releaseNotes);
|
||||
99
scripts/process-batch-metadata.ts
Normal file
99
scripts/process-batch-metadata.ts
Normal file
@@ -0,0 +1,99 @@
|
||||
#!/usr/bin/env ts-node
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { createDatabaseAdapter } from '../src/database/database-adapter';
|
||||
|
||||
interface BatchResponse {
|
||||
id: string;
|
||||
custom_id: string;
|
||||
response: {
|
||||
status_code: number;
|
||||
body: {
|
||||
choices: Array<{
|
||||
message: {
|
||||
content: string;
|
||||
};
|
||||
}>;
|
||||
};
|
||||
};
|
||||
error: any;
|
||||
}
|
||||
|
||||
async function processBatchMetadata(batchFile: string) {
|
||||
console.log(`📥 Processing batch file: ${batchFile}`);
|
||||
|
||||
// Read the JSONL file
|
||||
const content = fs.readFileSync(batchFile, 'utf-8');
|
||||
const lines = content.trim().split('\n');
|
||||
|
||||
console.log(`📊 Found ${lines.length} batch responses`);
|
||||
|
||||
// Initialize database
|
||||
const db = await createDatabaseAdapter('./data/nodes.db');
|
||||
|
||||
let updated = 0;
|
||||
let skipped = 0;
|
||||
let errors = 0;
|
||||
|
||||
for (const line of lines) {
|
||||
try {
|
||||
const response: BatchResponse = JSON.parse(line);
|
||||
|
||||
// Extract template ID from custom_id (format: "template-9100")
|
||||
const templateId = parseInt(response.custom_id.replace('template-', ''));
|
||||
|
||||
// Check for errors
|
||||
if (response.error || response.response.status_code !== 200) {
|
||||
console.warn(`⚠️ Template ${templateId}: API error`, response.error);
|
||||
errors++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Extract metadata from response
|
||||
const metadataJson = response.response.body.choices[0].message.content;
|
||||
|
||||
// Validate it's valid JSON
|
||||
JSON.parse(metadataJson); // Will throw if invalid
|
||||
|
||||
// Update database
|
||||
const stmt = db.prepare(`
|
||||
UPDATE templates
|
||||
SET metadata_json = ?
|
||||
WHERE id = ?
|
||||
`);
|
||||
|
||||
stmt.run(metadataJson, templateId);
|
||||
updated++;
|
||||
|
||||
console.log(`✅ Template ${templateId}: Updated metadata`);
|
||||
|
||||
} catch (error: any) {
|
||||
console.error(`❌ Error processing line:`, error.message);
|
||||
errors++;
|
||||
}
|
||||
}
|
||||
|
||||
// Close database
|
||||
if ('close' in db && typeof db.close === 'function') {
|
||||
db.close();
|
||||
}
|
||||
|
||||
console.log(`\n📈 Summary:`);
|
||||
console.log(` - Updated: ${updated}`);
|
||||
console.log(` - Skipped: ${skipped}`);
|
||||
console.log(` - Errors: ${errors}`);
|
||||
console.log(` - Total: ${lines.length}`);
|
||||
}
|
||||
|
||||
// Main
|
||||
const batchFile = process.argv[2] || '/Users/romualdczlonkowski/Pliki/n8n-mcp/n8n-mcp/docs/batch_68fff7242850819091cfed64f10fb6b4_output.jsonl';
|
||||
|
||||
processBatchMetadata(batchFile)
|
||||
.then(() => {
|
||||
console.log('\n✅ Batch processing complete!');
|
||||
process.exit(0);
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error('\n❌ Batch processing failed:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -11,29 +11,8 @@ NC='\033[0m' # No Color
|
||||
|
||||
echo "🚀 Preparing n8n-mcp for npm publish..."
|
||||
|
||||
# Run tests first to ensure quality
|
||||
echo "🧪 Running tests..."
|
||||
TEST_OUTPUT=$(npm test 2>&1)
|
||||
TEST_EXIT_CODE=$?
|
||||
|
||||
# Check test results - look for actual test failures vs coverage issues
|
||||
if echo "$TEST_OUTPUT" | grep -q "Tests.*failed"; then
|
||||
# Extract failed count using sed (portable)
|
||||
FAILED_COUNT=$(echo "$TEST_OUTPUT" | sed -n 's/.*Tests.*\([0-9]*\) failed.*/\1/p' | head -1)
|
||||
if [ "$FAILED_COUNT" != "0" ] && [ "$FAILED_COUNT" != "" ]; then
|
||||
echo -e "${RED}❌ $FAILED_COUNT test(s) failed. Aborting publish.${NC}"
|
||||
echo "$TEST_OUTPUT" | tail -20
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# If we got here, tests passed - check coverage
|
||||
if echo "$TEST_OUTPUT" | grep -q "Coverage.*does not meet global threshold"; then
|
||||
echo -e "${YELLOW}⚠️ All tests passed but coverage is below threshold${NC}"
|
||||
echo -e "${YELLOW} Consider improving test coverage before next release${NC}"
|
||||
else
|
||||
echo -e "${GREEN}✅ All tests passed with good coverage!${NC}"
|
||||
fi
|
||||
# Skip tests - they already run in CI before merge/publish
|
||||
echo "⏭️ Skipping tests (already verified in CI)"
|
||||
|
||||
# Sync version to runtime package first
|
||||
echo "🔄 Syncing version to package.runtime.json..."
|
||||
@@ -80,6 +59,15 @@ node -e "
|
||||
const pkg = require('./package.json');
|
||||
pkg.name = 'n8n-mcp';
|
||||
pkg.description = 'Integration between n8n workflow automation and Model Context Protocol (MCP)';
|
||||
pkg.main = 'dist/index.js';
|
||||
pkg.types = 'dist/index.d.ts';
|
||||
pkg.exports = {
|
||||
'.': {
|
||||
types: './dist/index.d.ts',
|
||||
require: './dist/index.js',
|
||||
import: './dist/index.js'
|
||||
}
|
||||
};
|
||||
pkg.bin = { 'n8n-mcp': './dist/mcp/index.js' };
|
||||
pkg.repository = { type: 'git', url: 'git+https://github.com/czlonkowski/n8n-mcp.git' };
|
||||
pkg.keywords = ['n8n', 'mcp', 'model-context-protocol', 'ai', 'workflow', 'automation'];
|
||||
|
||||
189
scripts/test-ai-validation-debug.ts
Normal file
189
scripts/test-ai-validation-debug.ts
Normal file
@@ -0,0 +1,189 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Debug test for AI validation issues
|
||||
* Reproduces the bugs found by n8n-mcp-tester
|
||||
*/
|
||||
|
||||
import { validateAISpecificNodes, buildReverseConnectionMap } from '../src/services/ai-node-validator';
|
||||
import type { WorkflowJson } from '../src/services/ai-tool-validators';
|
||||
import { NodeTypeNormalizer } from '../src/utils/node-type-normalizer';
|
||||
|
||||
console.log('=== AI Validation Debug Tests ===\n');
|
||||
|
||||
// Test 1: AI Agent with NO language model connection
|
||||
console.log('Test 1: Missing Language Model Detection');
|
||||
const workflow1: WorkflowJson = {
|
||||
name: 'Test Missing LM',
|
||||
nodes: [
|
||||
{
|
||||
id: 'ai-agent-1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [500, 300],
|
||||
parameters: {
|
||||
promptType: 'define',
|
||||
text: 'You are a helpful assistant'
|
||||
},
|
||||
typeVersion: 1.7
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
// NO connections - AI Agent is isolated
|
||||
}
|
||||
};
|
||||
|
||||
console.log('Workflow:', JSON.stringify(workflow1, null, 2));
|
||||
|
||||
const reverseMap1 = buildReverseConnectionMap(workflow1);
|
||||
console.log('\nReverse connection map for AI Agent:');
|
||||
console.log('Entries:', Array.from(reverseMap1.entries()));
|
||||
console.log('AI Agent connections:', reverseMap1.get('AI Agent'));
|
||||
|
||||
// Check node normalization
|
||||
const normalizedType1 = NodeTypeNormalizer.normalizeToFullForm(workflow1.nodes[0].type);
|
||||
console.log(`\nNode type: ${workflow1.nodes[0].type}`);
|
||||
console.log(`Normalized type: ${normalizedType1}`);
|
||||
console.log(`Match check: ${normalizedType1 === '@n8n/n8n-nodes-langchain.agent'}`);
|
||||
|
||||
const issues1 = validateAISpecificNodes(workflow1);
|
||||
console.log('\nValidation issues:');
|
||||
console.log(JSON.stringify(issues1, null, 2));
|
||||
|
||||
const hasMissingLMError = issues1.some(
|
||||
i => i.severity === 'error' && i.code === 'MISSING_LANGUAGE_MODEL'
|
||||
);
|
||||
console.log(`\n✓ Has MISSING_LANGUAGE_MODEL error: ${hasMissingLMError}`);
|
||||
console.log(`✗ Expected: true, Got: ${hasMissingLMError}`);
|
||||
|
||||
// Test 2: AI Agent WITH language model connection
|
||||
console.log('\n\n' + '='.repeat(60));
|
||||
console.log('Test 2: AI Agent WITH Language Model (Should be valid)');
|
||||
const workflow2: WorkflowJson = {
|
||||
name: 'Test With LM',
|
||||
nodes: [
|
||||
{
|
||||
id: 'openai-1',
|
||||
name: 'OpenAI Chat Model',
|
||||
type: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
|
||||
position: [200, 300],
|
||||
parameters: {
|
||||
modelName: 'gpt-4'
|
||||
},
|
||||
typeVersion: 1
|
||||
},
|
||||
{
|
||||
id: 'ai-agent-1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [500, 300],
|
||||
parameters: {
|
||||
promptType: 'define',
|
||||
text: 'You are a helpful assistant'
|
||||
},
|
||||
typeVersion: 1.7
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'OpenAI Chat Model': {
|
||||
ai_languageModel: [
|
||||
[
|
||||
{
|
||||
node: 'AI Agent',
|
||||
type: 'ai_languageModel',
|
||||
index: 0
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
console.log('\nConnections:', JSON.stringify(workflow2.connections, null, 2));
|
||||
|
||||
const reverseMap2 = buildReverseConnectionMap(workflow2);
|
||||
console.log('\nReverse connection map for AI Agent:');
|
||||
console.log('AI Agent connections:', reverseMap2.get('AI Agent'));
|
||||
|
||||
const issues2 = validateAISpecificNodes(workflow2);
|
||||
console.log('\nValidation issues:');
|
||||
console.log(JSON.stringify(issues2, null, 2));
|
||||
|
||||
const hasMissingLMError2 = issues2.some(
|
||||
i => i.severity === 'error' && i.code === 'MISSING_LANGUAGE_MODEL'
|
||||
);
|
||||
console.log(`\n✓ Should NOT have MISSING_LANGUAGE_MODEL error: ${!hasMissingLMError2}`);
|
||||
console.log(`Expected: false, Got: ${hasMissingLMError2}`);
|
||||
|
||||
// Test 3: AI Agent with tools but no language model
|
||||
console.log('\n\n' + '='.repeat(60));
|
||||
console.log('Test 3: AI Agent with Tools but NO Language Model');
|
||||
const workflow3: WorkflowJson = {
|
||||
name: 'Test Tools No LM',
|
||||
nodes: [
|
||||
{
|
||||
id: 'http-tool-1',
|
||||
name: 'HTTP Request Tool',
|
||||
type: '@n8n/n8n-nodes-langchain.toolHttpRequest',
|
||||
position: [200, 300],
|
||||
parameters: {
|
||||
toolDescription: 'Calls an API',
|
||||
url: 'https://api.example.com'
|
||||
},
|
||||
typeVersion: 1.1
|
||||
},
|
||||
{
|
||||
id: 'ai-agent-1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [500, 300],
|
||||
parameters: {
|
||||
promptType: 'define',
|
||||
text: 'You are a helpful assistant'
|
||||
},
|
||||
typeVersion: 1.7
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'HTTP Request Tool': {
|
||||
ai_tool: [
|
||||
[
|
||||
{
|
||||
node: 'AI Agent',
|
||||
type: 'ai_tool',
|
||||
index: 0
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
console.log('\nConnections:', JSON.stringify(workflow3.connections, null, 2));
|
||||
|
||||
const reverseMap3 = buildReverseConnectionMap(workflow3);
|
||||
console.log('\nReverse connection map for AI Agent:');
|
||||
const aiAgentConns = reverseMap3.get('AI Agent');
|
||||
console.log('AI Agent connections:', aiAgentConns);
|
||||
console.log('Connection types:', aiAgentConns?.map(c => c.type));
|
||||
|
||||
const issues3 = validateAISpecificNodes(workflow3);
|
||||
console.log('\nValidation issues:');
|
||||
console.log(JSON.stringify(issues3, null, 2));
|
||||
|
||||
const hasMissingLMError3 = issues3.some(
|
||||
i => i.severity === 'error' && i.code === 'MISSING_LANGUAGE_MODEL'
|
||||
);
|
||||
const hasNoToolsInfo3 = issues3.some(
|
||||
i => i.severity === 'info' && i.message.includes('no ai_tool connections')
|
||||
);
|
||||
|
||||
console.log(`\n✓ Should have MISSING_LANGUAGE_MODEL error: ${hasMissingLMError3}`);
|
||||
console.log(`Expected: true, Got: ${hasMissingLMError3}`);
|
||||
console.log(`✗ Should NOT have "no tools" info: ${!hasNoToolsInfo3}`);
|
||||
console.log(`Expected: false, Got: ${hasNoToolsInfo3}`);
|
||||
|
||||
console.log('\n' + '='.repeat(60));
|
||||
console.log('Summary:');
|
||||
console.log(`Test 1 (No LM): ${hasMissingLMError ? 'PASS ✓' : 'FAIL ✗'}`);
|
||||
console.log(`Test 2 (With LM): ${!hasMissingLMError2 ? 'PASS ✓' : 'FAIL ✗'}`);
|
||||
console.log(`Test 3 (Tools, No LM): ${hasMissingLMError3 && !hasNoToolsInfo3 ? 'PASS ✓' : 'FAIL ✗'}`);
|
||||
163
scripts/test-docker-fingerprint.ts
Normal file
163
scripts/test-docker-fingerprint.ts
Normal file
@@ -0,0 +1,163 @@
|
||||
/**
|
||||
* Test Docker Host Fingerprinting
|
||||
* Verifies that host machine characteristics are stable across container recreations
|
||||
*/
|
||||
|
||||
import { existsSync, readFileSync } from 'fs';
|
||||
import { platform, arch } from 'os';
|
||||
import { createHash } from 'crypto';
|
||||
|
||||
console.log('=== Docker Host Fingerprinting Test ===\n');
|
||||
|
||||
function generateHostFingerprint(): string {
|
||||
try {
|
||||
const signals: string[] = [];
|
||||
|
||||
console.log('Collecting host signals...\n');
|
||||
|
||||
// CPU info (stable across container recreations)
|
||||
if (existsSync('/proc/cpuinfo')) {
|
||||
const cpuinfo = readFileSync('/proc/cpuinfo', 'utf-8');
|
||||
const modelMatch = cpuinfo.match(/model name\s*:\s*(.+)/);
|
||||
const coresMatch = cpuinfo.match(/processor\s*:/g);
|
||||
|
||||
if (modelMatch) {
|
||||
const cpuModel = modelMatch[1].trim();
|
||||
signals.push(cpuModel);
|
||||
console.log('✓ CPU Model:', cpuModel);
|
||||
}
|
||||
|
||||
if (coresMatch) {
|
||||
const cores = `cores:${coresMatch.length}`;
|
||||
signals.push(cores);
|
||||
console.log('✓ CPU Cores:', coresMatch.length);
|
||||
}
|
||||
} else {
|
||||
console.log('✗ /proc/cpuinfo not available (Windows/Mac Docker)');
|
||||
}
|
||||
|
||||
// Memory (stable)
|
||||
if (existsSync('/proc/meminfo')) {
|
||||
const meminfo = readFileSync('/proc/meminfo', 'utf-8');
|
||||
const totalMatch = meminfo.match(/MemTotal:\s+(\d+)/);
|
||||
|
||||
if (totalMatch) {
|
||||
const memory = `mem:${totalMatch[1]}`;
|
||||
signals.push(memory);
|
||||
console.log('✓ Total Memory:', totalMatch[1], 'kB');
|
||||
}
|
||||
} else {
|
||||
console.log('✗ /proc/meminfo not available (Windows/Mac Docker)');
|
||||
}
|
||||
|
||||
// Docker network subnet
|
||||
const networkInfo = getDockerNetworkInfo();
|
||||
if (networkInfo) {
|
||||
signals.push(networkInfo);
|
||||
console.log('✓ Network Info:', networkInfo);
|
||||
} else {
|
||||
console.log('✗ Network info not available');
|
||||
}
|
||||
|
||||
// Platform basics (stable)
|
||||
signals.push(platform(), arch());
|
||||
console.log('✓ Platform:', platform());
|
||||
console.log('✓ Architecture:', arch());
|
||||
|
||||
// Generate stable ID from all signals
|
||||
console.log('\nCombined signals:', signals.join(' | '));
|
||||
const fingerprint = signals.join('-');
|
||||
const userId = createHash('sha256').update(fingerprint).digest('hex').substring(0, 16);
|
||||
|
||||
return userId;
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error generating fingerprint:', error);
|
||||
// Fallback
|
||||
return createHash('sha256')
|
||||
.update(`${platform()}-${arch()}-docker`)
|
||||
.digest('hex')
|
||||
.substring(0, 16);
|
||||
}
|
||||
}
|
||||
|
||||
function getDockerNetworkInfo(): string | null {
|
||||
try {
|
||||
// Read routing table to get bridge network
|
||||
if (existsSync('/proc/net/route')) {
|
||||
const routes = readFileSync('/proc/net/route', 'utf-8');
|
||||
const lines = routes.split('\n');
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.includes('eth0')) {
|
||||
const parts = line.split(/\s+/);
|
||||
if (parts[2]) {
|
||||
const gateway = parseInt(parts[2], 16).toString(16);
|
||||
return `net:${gateway}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Ignore errors
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// Test environment detection
|
||||
console.log('\n=== Environment Detection ===\n');
|
||||
|
||||
const isDocker = process.env.IS_DOCKER === 'true';
|
||||
const isCloudEnvironment = !!(
|
||||
process.env.RAILWAY_ENVIRONMENT ||
|
||||
process.env.RENDER ||
|
||||
process.env.FLY_APP_NAME ||
|
||||
process.env.HEROKU_APP_NAME ||
|
||||
process.env.AWS_EXECUTION_ENV ||
|
||||
process.env.KUBERNETES_SERVICE_HOST
|
||||
);
|
||||
|
||||
console.log('IS_DOCKER env:', process.env.IS_DOCKER);
|
||||
console.log('Docker detected:', isDocker);
|
||||
console.log('Cloud environment:', isCloudEnvironment);
|
||||
|
||||
// Generate fingerprints
|
||||
console.log('\n=== Fingerprint Generation ===\n');
|
||||
|
||||
const fingerprint1 = generateHostFingerprint();
|
||||
const fingerprint2 = generateHostFingerprint();
|
||||
const fingerprint3 = generateHostFingerprint();
|
||||
|
||||
console.log('\nFingerprint 1:', fingerprint1);
|
||||
console.log('Fingerprint 2:', fingerprint2);
|
||||
console.log('Fingerprint 3:', fingerprint3);
|
||||
|
||||
const consistent = fingerprint1 === fingerprint2 && fingerprint2 === fingerprint3;
|
||||
console.log('\nConsistent:', consistent ? '✓ YES' : '✗ NO');
|
||||
|
||||
// Test explicit ID override
|
||||
console.log('\n=== Environment Variable Override Test ===\n');
|
||||
|
||||
if (process.env.N8N_MCP_USER_ID) {
|
||||
console.log('Explicit user ID:', process.env.N8N_MCP_USER_ID);
|
||||
console.log('This would override the fingerprint');
|
||||
} else {
|
||||
console.log('No explicit user ID set');
|
||||
console.log('To test: N8N_MCP_USER_ID=my-custom-id npx tsx ' + process.argv[1]);
|
||||
}
|
||||
|
||||
// Stability estimate
|
||||
console.log('\n=== Stability Analysis ===\n');
|
||||
|
||||
const hasStableSignals = existsSync('/proc/cpuinfo') || existsSync('/proc/meminfo');
|
||||
if (hasStableSignals) {
|
||||
console.log('✓ Host-based signals available');
|
||||
console.log('✓ Fingerprint should be stable across container recreations');
|
||||
console.log('✓ Different fingerprints on different physical hosts');
|
||||
} else {
|
||||
console.log('⚠️ Limited host signals (Windows/Mac Docker Desktop)');
|
||||
console.log('⚠️ Fingerprint may not be fully stable');
|
||||
console.log('💡 Recommendation: Use N8N_MCP_USER_ID env var for stability');
|
||||
}
|
||||
|
||||
console.log('\n');
|
||||
58
scripts/test-error-message-tracking.ts
Normal file
58
scripts/test-error-message-tracking.ts
Normal file
@@ -0,0 +1,58 @@
|
||||
/**
|
||||
* Test script to verify error message tracking is working
|
||||
*/
|
||||
|
||||
import { telemetry } from '../src/telemetry';
|
||||
|
||||
async function testErrorTracking() {
|
||||
console.log('=== Testing Error Message Tracking ===\n');
|
||||
|
||||
// Track session first
|
||||
console.log('1. Starting session...');
|
||||
telemetry.trackSessionStart();
|
||||
|
||||
// Track an error WITH a message
|
||||
console.log('\n2. Tracking error WITH message:');
|
||||
const testErrorMessage = 'This is a test error message with sensitive data: password=secret123 and test@example.com';
|
||||
telemetry.trackError(
|
||||
'TypeError',
|
||||
'tool_execution',
|
||||
'test_tool',
|
||||
testErrorMessage
|
||||
);
|
||||
console.log(` Original message: "${testErrorMessage}"`);
|
||||
|
||||
// Track an error WITHOUT a message
|
||||
console.log('\n3. Tracking error WITHOUT message:');
|
||||
telemetry.trackError(
|
||||
'Error',
|
||||
'tool_execution',
|
||||
'test_tool2'
|
||||
);
|
||||
|
||||
// Check the event queue
|
||||
const metrics = telemetry.getMetrics();
|
||||
console.log('\n4. Telemetry metrics:');
|
||||
console.log(' Status:', metrics.status);
|
||||
console.log(' Events queued:', metrics.tracking.eventsQueued);
|
||||
|
||||
// Get raw event queue to inspect
|
||||
const eventTracker = (telemetry as any).eventTracker;
|
||||
const queue = eventTracker.getEventQueue();
|
||||
|
||||
console.log('\n5. Event queue contents:');
|
||||
queue.forEach((event, i) => {
|
||||
console.log(`\n Event ${i + 1}:`);
|
||||
console.log(` - Type: ${event.event}`);
|
||||
console.log(` - Properties:`, JSON.stringify(event.properties, null, 6));
|
||||
});
|
||||
|
||||
// Flush to database
|
||||
console.log('\n6. Flushing to database...');
|
||||
await telemetry.flush();
|
||||
|
||||
console.log('\n7. Done! Check Supabase for error events with "error" field.');
|
||||
console.log(' Query: SELECT * FROM telemetry_events WHERE event = \'error_occurred\' ORDER BY created_at DESC LIMIT 5;');
|
||||
}
|
||||
|
||||
testErrorTracking().catch(console.error);
|
||||
470
scripts/test-structure-validation.ts
Normal file
470
scripts/test-structure-validation.ts
Normal file
@@ -0,0 +1,470 @@
|
||||
#!/usr/bin/env ts-node
|
||||
/**
|
||||
* Phase 3: Real-World Type Structure Validation
|
||||
*
|
||||
* Tests type structure validation against real workflow templates from n8n.io
|
||||
* to ensure production readiness. Validates filter, resourceMapper,
|
||||
* assignmentCollection, and resourceLocator types.
|
||||
*
|
||||
* Usage:
|
||||
* npm run build && node dist/scripts/test-structure-validation.js
|
||||
*
|
||||
* or with ts-node:
|
||||
* npx ts-node scripts/test-structure-validation.ts
|
||||
*/
|
||||
|
||||
import { createDatabaseAdapter } from '../src/database/database-adapter';
|
||||
import { EnhancedConfigValidator } from '../src/services/enhanced-config-validator';
|
||||
import type { NodePropertyTypes } from 'n8n-workflow';
|
||||
import { gunzipSync } from 'zlib';
|
||||
|
||||
interface ValidationResult {
|
||||
templateId: number;
|
||||
templateName: string;
|
||||
templateViews: number;
|
||||
nodeId: string;
|
||||
nodeName: string;
|
||||
nodeType: string;
|
||||
propertyName: string;
|
||||
propertyType: NodePropertyTypes;
|
||||
valid: boolean;
|
||||
errors: Array<{ type: string; property?: string; message: string }>;
|
||||
warnings: Array<{ type: string; property?: string; message: string }>;
|
||||
validationTimeMs: number;
|
||||
}
|
||||
|
||||
interface ValidationStats {
|
||||
totalTemplates: number;
|
||||
totalNodes: number;
|
||||
totalValidations: number;
|
||||
passedValidations: number;
|
||||
failedValidations: number;
|
||||
byType: Record<string, { passed: number; failed: number }>;
|
||||
byError: Record<string, number>;
|
||||
avgValidationTimeMs: number;
|
||||
maxValidationTimeMs: number;
|
||||
}
|
||||
|
||||
// Special types we want to validate
|
||||
const SPECIAL_TYPES: NodePropertyTypes[] = [
|
||||
'filter',
|
||||
'resourceMapper',
|
||||
'assignmentCollection',
|
||||
'resourceLocator',
|
||||
];
|
||||
|
||||
function decompressWorkflow(compressed: string): any {
|
||||
try {
|
||||
const buffer = Buffer.from(compressed, 'base64');
|
||||
const decompressed = gunzipSync(buffer);
|
||||
return JSON.parse(decompressed.toString('utf-8'));
|
||||
} catch (error: any) {
|
||||
throw new Error(`Failed to decompress workflow: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
async function loadTopTemplates(db: any, limit: number = 100) {
|
||||
console.log(`📥 Loading top ${limit} templates by popularity...\n`);
|
||||
|
||||
const stmt = db.prepare(`
|
||||
SELECT
|
||||
id,
|
||||
name,
|
||||
workflow_json_compressed,
|
||||
views
|
||||
FROM templates
|
||||
WHERE workflow_json_compressed IS NOT NULL
|
||||
ORDER BY views DESC
|
||||
LIMIT ?
|
||||
`);
|
||||
|
||||
const templates = stmt.all(limit);
|
||||
console.log(`✓ Loaded ${templates.length} templates\n`);
|
||||
|
||||
return templates;
|
||||
}
|
||||
|
||||
function extractNodesWithSpecialTypes(workflowJson: any): Array<{
|
||||
nodeId: string;
|
||||
nodeName: string;
|
||||
nodeType: string;
|
||||
properties: Array<{ name: string; type: NodePropertyTypes; value: any }>;
|
||||
}> {
|
||||
const results: Array<any> = [];
|
||||
|
||||
if (!workflowJson || !workflowJson.nodes || !Array.isArray(workflowJson.nodes)) {
|
||||
return results;
|
||||
}
|
||||
|
||||
for (const node of workflowJson.nodes) {
|
||||
// Check if node has parameters with special types
|
||||
if (!node.parameters || typeof node.parameters !== 'object') {
|
||||
continue;
|
||||
}
|
||||
|
||||
const specialProperties: Array<{ name: string; type: NodePropertyTypes; value: any }> = [];
|
||||
|
||||
// Check each parameter against our special types
|
||||
for (const [paramName, paramValue] of Object.entries(node.parameters)) {
|
||||
// Try to infer type from structure
|
||||
const inferredType = inferPropertyType(paramValue);
|
||||
|
||||
if (inferredType && SPECIAL_TYPES.includes(inferredType)) {
|
||||
specialProperties.push({
|
||||
name: paramName,
|
||||
type: inferredType,
|
||||
value: paramValue,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (specialProperties.length > 0) {
|
||||
results.push({
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
nodeType: node.type,
|
||||
properties: specialProperties,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
function inferPropertyType(value: any): NodePropertyTypes | null {
|
||||
if (!value || typeof value !== 'object') {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Filter type: has combinator and conditions
|
||||
if (value.combinator && value.conditions) {
|
||||
return 'filter';
|
||||
}
|
||||
|
||||
// ResourceMapper type: has mappingMode
|
||||
if (value.mappingMode) {
|
||||
return 'resourceMapper';
|
||||
}
|
||||
|
||||
// AssignmentCollection type: has assignments array
|
||||
if (value.assignments && Array.isArray(value.assignments)) {
|
||||
return 'assignmentCollection';
|
||||
}
|
||||
|
||||
// ResourceLocator type: has mode and value
|
||||
if (value.mode && value.hasOwnProperty('value')) {
|
||||
return 'resourceLocator';
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
async function validateTemplate(
|
||||
templateId: number,
|
||||
templateName: string,
|
||||
templateViews: number,
|
||||
workflowJson: any
|
||||
): Promise<ValidationResult[]> {
|
||||
const results: ValidationResult[] = [];
|
||||
|
||||
// Extract nodes with special types
|
||||
const nodesWithSpecialTypes = extractNodesWithSpecialTypes(workflowJson);
|
||||
|
||||
for (const node of nodesWithSpecialTypes) {
|
||||
for (const prop of node.properties) {
|
||||
const startTime = Date.now();
|
||||
|
||||
// Create property definition for validation
|
||||
const properties = [
|
||||
{
|
||||
name: prop.name,
|
||||
type: prop.type,
|
||||
required: true,
|
||||
displayName: prop.name,
|
||||
default: {},
|
||||
},
|
||||
];
|
||||
|
||||
// Create config with just this property
|
||||
const config = {
|
||||
[prop.name]: prop.value,
|
||||
};
|
||||
|
||||
try {
|
||||
// Run validation
|
||||
const validationResult = EnhancedConfigValidator.validateWithMode(
|
||||
node.nodeType,
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
const validationTimeMs = Date.now() - startTime;
|
||||
|
||||
results.push({
|
||||
templateId,
|
||||
templateName,
|
||||
templateViews,
|
||||
nodeId: node.nodeId,
|
||||
nodeName: node.nodeName,
|
||||
nodeType: node.nodeType,
|
||||
propertyName: prop.name,
|
||||
propertyType: prop.type,
|
||||
valid: validationResult.valid,
|
||||
errors: validationResult.errors || [],
|
||||
warnings: validationResult.warnings || [],
|
||||
validationTimeMs,
|
||||
});
|
||||
} catch (error: any) {
|
||||
const validationTimeMs = Date.now() - startTime;
|
||||
|
||||
results.push({
|
||||
templateId,
|
||||
templateName,
|
||||
templateViews,
|
||||
nodeId: node.nodeId,
|
||||
nodeName: node.nodeName,
|
||||
nodeType: node.nodeType,
|
||||
propertyName: prop.name,
|
||||
propertyType: prop.type,
|
||||
valid: false,
|
||||
errors: [
|
||||
{
|
||||
type: 'exception',
|
||||
property: prop.name,
|
||||
message: `Validation threw exception: ${error.message}`,
|
||||
},
|
||||
],
|
||||
warnings: [],
|
||||
validationTimeMs,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
function calculateStats(results: ValidationResult[]): ValidationStats {
|
||||
const stats: ValidationStats = {
|
||||
totalTemplates: new Set(results.map(r => r.templateId)).size,
|
||||
totalNodes: new Set(results.map(r => `${r.templateId}-${r.nodeId}`)).size,
|
||||
totalValidations: results.length,
|
||||
passedValidations: results.filter(r => r.valid).length,
|
||||
failedValidations: results.filter(r => !r.valid).length,
|
||||
byType: {},
|
||||
byError: {},
|
||||
avgValidationTimeMs: 0,
|
||||
maxValidationTimeMs: 0,
|
||||
};
|
||||
|
||||
// Stats by type
|
||||
for (const type of SPECIAL_TYPES) {
|
||||
const typeResults = results.filter(r => r.propertyType === type);
|
||||
stats.byType[type] = {
|
||||
passed: typeResults.filter(r => r.valid).length,
|
||||
failed: typeResults.filter(r => !r.valid).length,
|
||||
};
|
||||
}
|
||||
|
||||
// Error frequency
|
||||
for (const result of results.filter(r => !r.valid)) {
|
||||
for (const error of result.errors) {
|
||||
const key = `${error.type}: ${error.message}`;
|
||||
stats.byError[key] = (stats.byError[key] || 0) + 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Performance stats
|
||||
if (results.length > 0) {
|
||||
stats.avgValidationTimeMs =
|
||||
results.reduce((sum, r) => sum + r.validationTimeMs, 0) / results.length;
|
||||
stats.maxValidationTimeMs = Math.max(...results.map(r => r.validationTimeMs));
|
||||
}
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
function printStats(stats: ValidationStats) {
|
||||
console.log('\n' + '='.repeat(80));
|
||||
console.log('VALIDATION STATISTICS');
|
||||
console.log('='.repeat(80) + '\n');
|
||||
|
||||
console.log(`📊 Total Templates Tested: ${stats.totalTemplates}`);
|
||||
console.log(`📊 Total Nodes with Special Types: ${stats.totalNodes}`);
|
||||
console.log(`📊 Total Property Validations: ${stats.totalValidations}\n`);
|
||||
|
||||
const passRate = (stats.passedValidations / stats.totalValidations * 100).toFixed(2);
|
||||
const failRate = (stats.failedValidations / stats.totalValidations * 100).toFixed(2);
|
||||
|
||||
console.log(`✅ Passed: ${stats.passedValidations} (${passRate}%)`);
|
||||
console.log(`❌ Failed: ${stats.failedValidations} (${failRate}%)\n`);
|
||||
|
||||
console.log('By Property Type:');
|
||||
console.log('-'.repeat(80));
|
||||
for (const [type, counts] of Object.entries(stats.byType)) {
|
||||
const total = counts.passed + counts.failed;
|
||||
if (total === 0) {
|
||||
console.log(` ${type}: No occurrences found`);
|
||||
} else {
|
||||
const typePassRate = (counts.passed / total * 100).toFixed(2);
|
||||
console.log(` ${type}: ${counts.passed}/${total} passed (${typePassRate}%)`);
|
||||
}
|
||||
}
|
||||
|
||||
console.log('\n⚡ Performance:');
|
||||
console.log('-'.repeat(80));
|
||||
console.log(` Average validation time: ${stats.avgValidationTimeMs.toFixed(2)}ms`);
|
||||
console.log(` Maximum validation time: ${stats.maxValidationTimeMs.toFixed(2)}ms`);
|
||||
|
||||
const meetsTarget = stats.avgValidationTimeMs < 50;
|
||||
console.log(` Target (<50ms): ${meetsTarget ? '✅ MET' : '❌ NOT MET'}\n`);
|
||||
|
||||
if (Object.keys(stats.byError).length > 0) {
|
||||
console.log('🔍 Most Common Errors:');
|
||||
console.log('-'.repeat(80));
|
||||
|
||||
const sortedErrors = Object.entries(stats.byError)
|
||||
.sort((a, b) => b[1] - a[1])
|
||||
.slice(0, 10);
|
||||
|
||||
for (const [error, count] of sortedErrors) {
|
||||
console.log(` ${count}x: ${error}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function printFailures(results: ValidationResult[], maxFailures: number = 20) {
|
||||
const failures = results.filter(r => !r.valid);
|
||||
|
||||
if (failures.length === 0) {
|
||||
console.log('\n✨ No failures! All validations passed.\n');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('\n' + '='.repeat(80));
|
||||
console.log(`VALIDATION FAILURES (showing first ${Math.min(maxFailures, failures.length)})` );
|
||||
console.log('='.repeat(80) + '\n');
|
||||
|
||||
for (let i = 0; i < Math.min(maxFailures, failures.length); i++) {
|
||||
const failure = failures[i];
|
||||
|
||||
console.log(`Failure ${i + 1}/${failures.length}:`);
|
||||
console.log(` Template: ${failure.templateName} (ID: ${failure.templateId}, Views: ${failure.templateViews})`);
|
||||
console.log(` Node: ${failure.nodeName} (${failure.nodeType})`);
|
||||
console.log(` Property: ${failure.propertyName} (type: ${failure.propertyType})`);
|
||||
console.log(` Errors:`);
|
||||
|
||||
for (const error of failure.errors) {
|
||||
console.log(` - [${error.type}] ${error.property}: ${error.message}`);
|
||||
}
|
||||
|
||||
if (failure.warnings.length > 0) {
|
||||
console.log(` Warnings:`);
|
||||
for (const warning of failure.warnings) {
|
||||
console.log(` - [${warning.type}] ${warning.property}: ${warning.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
console.log('');
|
||||
}
|
||||
|
||||
if (failures.length > maxFailures) {
|
||||
console.log(`... and ${failures.length - maxFailures} more failures\n`);
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
console.log('='.repeat(80));
|
||||
console.log('PHASE 3: REAL-WORLD TYPE STRUCTURE VALIDATION');
|
||||
console.log('='.repeat(80) + '\n');
|
||||
|
||||
// Initialize database
|
||||
console.log('🔌 Connecting to database...');
|
||||
const db = await createDatabaseAdapter('./data/nodes.db');
|
||||
console.log('✓ Database connected\n');
|
||||
|
||||
// Load templates
|
||||
const templates = await loadTopTemplates(db, 100);
|
||||
|
||||
// Validate each template
|
||||
console.log('🔍 Validating templates...\n');
|
||||
|
||||
const allResults: ValidationResult[] = [];
|
||||
let processedCount = 0;
|
||||
let nodesFound = 0;
|
||||
|
||||
for (const template of templates) {
|
||||
processedCount++;
|
||||
|
||||
let workflowJson;
|
||||
try {
|
||||
workflowJson = decompressWorkflow(template.workflow_json_compressed);
|
||||
} catch (error) {
|
||||
console.warn(`⚠️ Template ${template.id}: Decompression failed, skipping`);
|
||||
continue;
|
||||
}
|
||||
|
||||
const results = await validateTemplate(
|
||||
template.id,
|
||||
template.name,
|
||||
template.views,
|
||||
workflowJson
|
||||
);
|
||||
|
||||
if (results.length > 0) {
|
||||
nodesFound += new Set(results.map(r => r.nodeId)).size;
|
||||
allResults.push(...results);
|
||||
|
||||
const passedCount = results.filter(r => r.valid).length;
|
||||
const status = passedCount === results.length ? '✓' : '✗';
|
||||
console.log(
|
||||
`${status} Template ${processedCount}/${templates.length}: ` +
|
||||
`"${template.name}" (${results.length} validations, ${passedCount} passed)`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`\n✓ Processed ${processedCount} templates`);
|
||||
console.log(`✓ Found ${nodesFound} nodes with special types\n`);
|
||||
|
||||
// Calculate and print statistics
|
||||
const stats = calculateStats(allResults);
|
||||
printStats(stats);
|
||||
|
||||
// Print detailed failures
|
||||
printFailures(allResults);
|
||||
|
||||
// Success criteria check
|
||||
console.log('='.repeat(80));
|
||||
console.log('SUCCESS CRITERIA CHECK');
|
||||
console.log('='.repeat(80) + '\n');
|
||||
|
||||
const passRate = (stats.passedValidations / stats.totalValidations * 100);
|
||||
const falsePositiveRate = (stats.failedValidations / stats.totalValidations * 100);
|
||||
const avgTime = stats.avgValidationTimeMs;
|
||||
|
||||
console.log(`Pass Rate: ${passRate.toFixed(2)}% (target: >95%) ${passRate > 95 ? '✅' : '❌'}`);
|
||||
console.log(`False Positive Rate: ${falsePositiveRate.toFixed(2)}% (target: <5%) ${falsePositiveRate < 5 ? '✅' : '❌'}`);
|
||||
console.log(`Avg Validation Time: ${avgTime.toFixed(2)}ms (target: <50ms) ${avgTime < 50 ? '✅' : '❌'}\n`);
|
||||
|
||||
const allCriteriaMet = passRate > 95 && falsePositiveRate < 5 && avgTime < 50;
|
||||
|
||||
if (allCriteriaMet) {
|
||||
console.log('🎉 ALL SUCCESS CRITERIA MET! Phase 3 validation complete.\n');
|
||||
} else {
|
||||
console.log('⚠️ Some success criteria not met. Iteration required.\n');
|
||||
}
|
||||
|
||||
// Close database
|
||||
db.close();
|
||||
|
||||
process.exit(allCriteriaMet ? 0 : 1);
|
||||
}
|
||||
|
||||
// Run the script
|
||||
main().catch((error) => {
|
||||
console.error('Fatal error:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -23,7 +23,7 @@ async function testIntegration() {
|
||||
|
||||
// Track errors
|
||||
console.log('Tracking errors...');
|
||||
telemetry.trackError('ValidationError', 'workflow_validation', 'validate_workflow');
|
||||
telemetry.trackError('ValidationError', 'workflow_validation', 'validate_workflow', 'Required field missing: nodes array is empty');
|
||||
|
||||
// Track a test workflow
|
||||
console.log('Tracking workflow creation...');
|
||||
|
||||
119
scripts/test-user-id-persistence.ts
Normal file
119
scripts/test-user-id-persistence.ts
Normal file
@@ -0,0 +1,119 @@
|
||||
/**
|
||||
* Test User ID Persistence
|
||||
* Verifies that user IDs are consistent across sessions and modes
|
||||
*/
|
||||
|
||||
import { TelemetryConfigManager } from '../src/telemetry/config-manager';
|
||||
import { hostname, platform, arch, homedir } from 'os';
|
||||
import { createHash } from 'crypto';
|
||||
|
||||
console.log('=== User ID Persistence Test ===\n');
|
||||
|
||||
// Test 1: Verify deterministic ID generation
|
||||
console.log('Test 1: Deterministic ID Generation');
|
||||
console.log('-----------------------------------');
|
||||
|
||||
const machineId = `${hostname()}-${platform()}-${arch()}-${homedir()}`;
|
||||
const expectedUserId = createHash('sha256')
|
||||
.update(machineId)
|
||||
.digest('hex')
|
||||
.substring(0, 16);
|
||||
|
||||
console.log('Machine characteristics:');
|
||||
console.log(' hostname:', hostname());
|
||||
console.log(' platform:', platform());
|
||||
console.log(' arch:', arch());
|
||||
console.log(' homedir:', homedir());
|
||||
console.log('\nGenerated machine ID:', machineId);
|
||||
console.log('Expected user ID:', expectedUserId);
|
||||
|
||||
// Test 2: Load actual config
|
||||
console.log('\n\nTest 2: Actual Config Manager');
|
||||
console.log('-----------------------------------');
|
||||
|
||||
const configManager = TelemetryConfigManager.getInstance();
|
||||
const actualUserId = configManager.getUserId();
|
||||
const config = configManager.loadConfig();
|
||||
|
||||
console.log('Actual user ID:', actualUserId);
|
||||
console.log('Config first run:', config.firstRun || 'Unknown');
|
||||
console.log('Config version:', config.version || 'Unknown');
|
||||
console.log('Telemetry enabled:', config.enabled);
|
||||
|
||||
// Test 3: Verify consistency
|
||||
console.log('\n\nTest 3: Consistency Check');
|
||||
console.log('-----------------------------------');
|
||||
|
||||
const match = actualUserId === expectedUserId;
|
||||
console.log('User IDs match:', match ? '✓ YES' : '✗ NO');
|
||||
|
||||
if (!match) {
|
||||
console.log('WARNING: User ID mismatch detected!');
|
||||
console.log('This could indicate an implementation issue.');
|
||||
}
|
||||
|
||||
// Test 4: Multiple loads (simulate multiple sessions)
|
||||
console.log('\n\nTest 4: Multiple Session Simulation');
|
||||
console.log('-----------------------------------');
|
||||
|
||||
const userId1 = configManager.getUserId();
|
||||
const userId2 = TelemetryConfigManager.getInstance().getUserId();
|
||||
const userId3 = configManager.getUserId();
|
||||
|
||||
console.log('Session 1 user ID:', userId1);
|
||||
console.log('Session 2 user ID:', userId2);
|
||||
console.log('Session 3 user ID:', userId3);
|
||||
|
||||
const consistent = userId1 === userId2 && userId2 === userId3;
|
||||
console.log('All sessions consistent:', consistent ? '✓ YES' : '✗ NO');
|
||||
|
||||
// Test 5: Docker environment simulation
|
||||
console.log('\n\nTest 5: Docker Environment Check');
|
||||
console.log('-----------------------------------');
|
||||
|
||||
const isDocker = process.env.IS_DOCKER === 'true';
|
||||
console.log('Running in Docker:', isDocker);
|
||||
|
||||
if (isDocker) {
|
||||
console.log('\n⚠️ DOCKER MODE DETECTED');
|
||||
console.log('In Docker, user IDs may change across container recreations because:');
|
||||
console.log(' 1. Container hostname changes each time');
|
||||
console.log(' 2. Config file is not persisted (no volume mount)');
|
||||
console.log(' 3. Each container gets a new ephemeral filesystem');
|
||||
console.log('\nRecommendation: Mount ~/.n8n-mcp as a volume for persistent user IDs');
|
||||
}
|
||||
|
||||
// Test 6: Environment variable override check
|
||||
console.log('\n\nTest 6: Environment Variable Override');
|
||||
console.log('-----------------------------------');
|
||||
|
||||
const telemetryDisabledVars = [
|
||||
'N8N_MCP_TELEMETRY_DISABLED',
|
||||
'TELEMETRY_DISABLED',
|
||||
'DISABLE_TELEMETRY'
|
||||
];
|
||||
|
||||
telemetryDisabledVars.forEach(varName => {
|
||||
const value = process.env[varName];
|
||||
if (value !== undefined) {
|
||||
console.log(`${varName}:`, value);
|
||||
}
|
||||
});
|
||||
|
||||
console.log('\nTelemetry status:', configManager.isEnabled() ? 'ENABLED' : 'DISABLED');
|
||||
|
||||
// Summary
|
||||
console.log('\n\n=== SUMMARY ===');
|
||||
console.log('User ID:', actualUserId);
|
||||
console.log('Deterministic:', match ? 'YES ✓' : 'NO ✗');
|
||||
console.log('Persistent across sessions:', consistent ? 'YES ✓' : 'NO ✗');
|
||||
console.log('Telemetry enabled:', config.enabled ? 'YES' : 'NO');
|
||||
console.log('Docker mode:', isDocker ? 'YES' : 'NO');
|
||||
|
||||
if (isDocker && !process.env.N8N_MCP_CONFIG_VOLUME) {
|
||||
console.log('\n⚠️ WARNING: Running in Docker without persistent volume!');
|
||||
console.log('User IDs will change on container recreation.');
|
||||
console.log('Mount /home/nodejs/.n8n-mcp to persist telemetry config.');
|
||||
}
|
||||
|
||||
console.log('\n');
|
||||
287
scripts/test-workflow-versioning.ts
Normal file
287
scripts/test-workflow-versioning.ts
Normal file
@@ -0,0 +1,287 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Test Workflow Versioning System
|
||||
*
|
||||
* Tests the complete workflow rollback and versioning functionality:
|
||||
* - Automatic backup creation
|
||||
* - Auto-pruning to 10 versions
|
||||
* - Version history retrieval
|
||||
* - Rollback with validation
|
||||
* - Manual pruning and cleanup
|
||||
* - Storage statistics
|
||||
*/
|
||||
|
||||
import { NodeRepository } from '../src/database/node-repository';
|
||||
import { createDatabaseAdapter } from '../src/database/database-adapter';
|
||||
import { WorkflowVersioningService } from '../src/services/workflow-versioning-service';
|
||||
import { logger } from '../src/utils/logger';
|
||||
import { existsSync } from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
// Mock workflow for testing
|
||||
const createMockWorkflow = (id: string, name: string, nodeCount: number = 3) => ({
|
||||
id,
|
||||
name,
|
||||
active: false,
|
||||
nodes: Array.from({ length: nodeCount }, (_, i) => ({
|
||||
id: `node-${i}`,
|
||||
name: `Node ${i}`,
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 1,
|
||||
position: [250 + i * 200, 300],
|
||||
parameters: { values: { string: [{ name: `field${i}`, value: `value${i}` }] } }
|
||||
})),
|
||||
connections: nodeCount > 1 ? {
|
||||
'node-0': { main: [[{ node: 'node-1', type: 'main', index: 0 }]] },
|
||||
...(nodeCount > 2 && { 'node-1': { main: [[{ node: 'node-2', type: 'main', index: 0 }]] } })
|
||||
} : {},
|
||||
settings: {}
|
||||
});
|
||||
|
||||
async function runTests() {
|
||||
console.log('🧪 Testing Workflow Versioning System\n');
|
||||
|
||||
// Find database path
|
||||
const possiblePaths = [
|
||||
path.join(process.cwd(), 'data', 'nodes.db'),
|
||||
path.join(__dirname, '../../data', 'nodes.db'),
|
||||
'./data/nodes.db'
|
||||
];
|
||||
|
||||
let dbPath: string | null = null;
|
||||
for (const p of possiblePaths) {
|
||||
if (existsSync(p)) {
|
||||
dbPath = p;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!dbPath) {
|
||||
console.error('❌ Database not found. Please run npm run rebuild first.');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log(`📁 Using database: ${dbPath}\n`);
|
||||
|
||||
// Initialize repository
|
||||
const db = await createDatabaseAdapter(dbPath);
|
||||
const repository = new NodeRepository(db);
|
||||
const service = new WorkflowVersioningService(repository);
|
||||
|
||||
const workflowId = 'test-workflow-001';
|
||||
let testsPassed = 0;
|
||||
let testsFailed = 0;
|
||||
|
||||
try {
|
||||
// Test 1: Create initial backup
|
||||
console.log('📝 Test 1: Create initial backup');
|
||||
const workflow1 = createMockWorkflow(workflowId, 'Test Workflow v1', 3);
|
||||
const backup1 = await service.createBackup(workflowId, workflow1, {
|
||||
trigger: 'partial_update',
|
||||
operations: [{ type: 'addNode', node: workflow1.nodes[0] }]
|
||||
});
|
||||
|
||||
if (backup1.versionId && backup1.versionNumber === 1 && backup1.pruned === 0) {
|
||||
console.log('✅ Initial backup created successfully');
|
||||
console.log(` Version ID: ${backup1.versionId}, Version Number: ${backup1.versionNumber}`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Failed to create initial backup');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 2: Create multiple backups to test auto-pruning
|
||||
console.log('\n📝 Test 2: Create 12 backups to test auto-pruning (should keep only 10)');
|
||||
for (let i = 2; i <= 12; i++) {
|
||||
const workflow = createMockWorkflow(workflowId, `Test Workflow v${i}`, 3 + i);
|
||||
await service.createBackup(workflowId, workflow, {
|
||||
trigger: i % 3 === 0 ? 'full_update' : 'partial_update',
|
||||
operations: [{ type: 'addNode', node: { id: `node-${i}` } }]
|
||||
});
|
||||
}
|
||||
|
||||
const versions = await service.getVersionHistory(workflowId, 100);
|
||||
if (versions.length === 10) {
|
||||
console.log(`✅ Auto-pruning works correctly (kept exactly 10 versions)`);
|
||||
console.log(` Latest version: ${versions[0].versionNumber}, Oldest: ${versions[9].versionNumber}`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log(`❌ Auto-pruning failed (expected 10 versions, got ${versions.length})`);
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 3: Get version history
|
||||
console.log('\n📝 Test 3: Get version history');
|
||||
const history = await service.getVersionHistory(workflowId, 5);
|
||||
if (history.length === 5 && history[0].versionNumber > history[4].versionNumber) {
|
||||
console.log(`✅ Version history retrieved successfully (${history.length} versions)`);
|
||||
console.log(' Recent versions:');
|
||||
history.forEach(v => {
|
||||
console.log(` - v${v.versionNumber} (${v.trigger}) - ${v.workflowName} - ${(v.size / 1024).toFixed(2)} KB`);
|
||||
});
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Failed to get version history');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 4: Get specific version
|
||||
console.log('\n📝 Test 4: Get specific version details');
|
||||
const specificVersion = await service.getVersion(history[2].id);
|
||||
if (specificVersion && specificVersion.workflowSnapshot) {
|
||||
console.log(`✅ Retrieved version ${specificVersion.versionNumber} successfully`);
|
||||
console.log(` Workflow name: ${specificVersion.workflowName}`);
|
||||
console.log(` Node count: ${specificVersion.workflowSnapshot.nodes.length}`);
|
||||
console.log(` Trigger: ${specificVersion.trigger}`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Failed to get specific version');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 5: Compare two versions
|
||||
console.log('\n📝 Test 5: Compare two versions');
|
||||
if (history.length >= 2) {
|
||||
const diff = await service.compareVersions(history[0].id, history[1].id);
|
||||
console.log(`✅ Version comparison successful`);
|
||||
console.log(` Comparing v${diff.version1Number} → v${diff.version2Number}`);
|
||||
console.log(` Added nodes: ${diff.addedNodes.length}`);
|
||||
console.log(` Removed nodes: ${diff.removedNodes.length}`);
|
||||
console.log(` Modified nodes: ${diff.modifiedNodes.length}`);
|
||||
console.log(` Connection changes: ${diff.connectionChanges}`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Not enough versions to compare');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 6: Manual pruning
|
||||
console.log('\n📝 Test 6: Manual pruning (keep only 5 versions)');
|
||||
const pruneResult = await service.pruneVersions(workflowId, 5);
|
||||
if (pruneResult.pruned === 5 && pruneResult.remaining === 5) {
|
||||
console.log(`✅ Manual pruning successful`);
|
||||
console.log(` Pruned: ${pruneResult.pruned} versions, Remaining: ${pruneResult.remaining}`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log(`❌ Manual pruning failed (expected 5 pruned, 5 remaining, got ${pruneResult.pruned} pruned, ${pruneResult.remaining} remaining)`);
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 7: Storage statistics
|
||||
console.log('\n📝 Test 7: Storage statistics');
|
||||
const stats = await service.getStorageStats();
|
||||
if (stats.totalVersions > 0 && stats.byWorkflow.length > 0) {
|
||||
console.log(`✅ Storage stats retrieved successfully`);
|
||||
console.log(` Total versions: ${stats.totalVersions}`);
|
||||
console.log(` Total size: ${stats.totalSizeFormatted}`);
|
||||
console.log(` Workflows with versions: ${stats.byWorkflow.length}`);
|
||||
stats.byWorkflow.forEach(w => {
|
||||
console.log(` - ${w.workflowName}: ${w.versionCount} versions, ${w.totalSizeFormatted}`);
|
||||
});
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Failed to get storage stats');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 8: Delete specific version
|
||||
console.log('\n📝 Test 8: Delete specific version');
|
||||
const versionsBeforeDelete = await service.getVersionHistory(workflowId, 100);
|
||||
const versionToDelete = versionsBeforeDelete[versionsBeforeDelete.length - 1];
|
||||
const deleteResult = await service.deleteVersion(versionToDelete.id);
|
||||
const versionsAfterDelete = await service.getVersionHistory(workflowId, 100);
|
||||
|
||||
if (deleteResult.success && versionsAfterDelete.length === versionsBeforeDelete.length - 1) {
|
||||
console.log(`✅ Version deletion successful`);
|
||||
console.log(` Deleted version ${versionToDelete.versionNumber}`);
|
||||
console.log(` Remaining versions: ${versionsAfterDelete.length}`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Failed to delete version');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 9: Test different trigger types
|
||||
console.log('\n📝 Test 9: Test different trigger types');
|
||||
const workflow2 = createMockWorkflow(workflowId, 'Test Workflow Autofix', 2);
|
||||
const backupAutofix = await service.createBackup(workflowId, workflow2, {
|
||||
trigger: 'autofix',
|
||||
fixTypes: ['expression-format', 'typeversion-correction']
|
||||
});
|
||||
|
||||
const workflow3 = createMockWorkflow(workflowId, 'Test Workflow Full Update', 4);
|
||||
const backupFull = await service.createBackup(workflowId, workflow3, {
|
||||
trigger: 'full_update',
|
||||
metadata: { reason: 'Major refactoring' }
|
||||
});
|
||||
|
||||
const allVersions = await service.getVersionHistory(workflowId, 100);
|
||||
const autofixVersions = allVersions.filter(v => v.trigger === 'autofix');
|
||||
const fullUpdateVersions = allVersions.filter(v => v.trigger === 'full_update');
|
||||
const partialUpdateVersions = allVersions.filter(v => v.trigger === 'partial_update');
|
||||
|
||||
if (autofixVersions.length > 0 && fullUpdateVersions.length > 0 && partialUpdateVersions.length > 0) {
|
||||
console.log(`✅ All trigger types working correctly`);
|
||||
console.log(` Partial updates: ${partialUpdateVersions.length}`);
|
||||
console.log(` Full updates: ${fullUpdateVersions.length}`);
|
||||
console.log(` Autofixes: ${autofixVersions.length}`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Failed to create versions with different trigger types');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 10: Cleanup - Delete all versions for workflow
|
||||
console.log('\n📝 Test 10: Delete all versions for workflow');
|
||||
const deleteAllResult = await service.deleteAllVersions(workflowId);
|
||||
const versionsAfterDeleteAll = await service.getVersionHistory(workflowId, 100);
|
||||
|
||||
if (deleteAllResult.deleted > 0 && versionsAfterDeleteAll.length === 0) {
|
||||
console.log(`✅ Delete all versions successful`);
|
||||
console.log(` Deleted ${deleteAllResult.deleted} versions`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Failed to delete all versions');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 11: Truncate all versions (requires confirmation)
|
||||
console.log('\n📝 Test 11: Test truncate without confirmation');
|
||||
const truncateResult1 = await service.truncateAllVersions(false);
|
||||
if (truncateResult1.deleted === 0 && truncateResult1.message.includes('not confirmed')) {
|
||||
console.log(`✅ Truncate safety check works (requires confirmation)`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Truncate safety check failed');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Summary
|
||||
console.log('\n' + '='.repeat(60));
|
||||
console.log('📊 Test Summary');
|
||||
console.log('='.repeat(60));
|
||||
console.log(`✅ Passed: ${testsPassed}`);
|
||||
console.log(`❌ Failed: ${testsFailed}`);
|
||||
console.log(`📈 Success Rate: ${((testsPassed / (testsPassed + testsFailed)) * 100).toFixed(1)}%`);
|
||||
console.log('='.repeat(60));
|
||||
|
||||
if (testsFailed === 0) {
|
||||
console.log('\n🎉 All tests passed! Workflow versioning system is working correctly.');
|
||||
process.exit(0);
|
||||
} else {
|
||||
console.log('\n⚠️ Some tests failed. Please review the implementation.');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
} catch (error: any) {
|
||||
console.error('\n❌ Test suite failed with error:', error.message);
|
||||
console.error(error.stack);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Run tests
|
||||
runTests().catch(error => {
|
||||
console.error('Fatal error:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
741
src/constants/type-structures.ts
Normal file
741
src/constants/type-structures.ts
Normal file
@@ -0,0 +1,741 @@
|
||||
/**
|
||||
* Type Structure Constants
|
||||
*
|
||||
* Complete definitions for all n8n NodePropertyTypes.
|
||||
* These structures define the expected data format, JavaScript type,
|
||||
* validation rules, and examples for each property type.
|
||||
*
|
||||
* Based on n8n-workflow v1.120.3 NodePropertyTypes
|
||||
*
|
||||
* @module constants/type-structures
|
||||
* @since 2.23.0
|
||||
*/
|
||||
|
||||
import type { NodePropertyTypes } from 'n8n-workflow';
|
||||
import type { TypeStructure } from '../types/type-structures';
|
||||
|
||||
/**
|
||||
* Complete type structure definitions for all 22 NodePropertyTypes
|
||||
*
|
||||
* Each entry defines:
|
||||
* - type: Category (primitive/object/collection/special)
|
||||
* - jsType: Underlying JavaScript type
|
||||
* - description: What this type represents
|
||||
* - structure: Expected data shape (for complex types)
|
||||
* - example: Working example value
|
||||
* - validation: Type-specific validation rules
|
||||
*
|
||||
* @constant
|
||||
*/
|
||||
export const TYPE_STRUCTURES: Record<NodePropertyTypes, TypeStructure> = {
|
||||
// ============================================================================
|
||||
// PRIMITIVE TYPES - Simple JavaScript values
|
||||
// ============================================================================
|
||||
|
||||
string: {
|
||||
type: 'primitive',
|
||||
jsType: 'string',
|
||||
description: 'A text value that can contain any characters',
|
||||
example: 'Hello World',
|
||||
examples: ['', 'A simple text', '{{ $json.name }}', 'https://example.com'],
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: ['Most common property type', 'Supports n8n expressions'],
|
||||
},
|
||||
|
||||
number: {
|
||||
type: 'primitive',
|
||||
jsType: 'number',
|
||||
description: 'A numeric value (integer or decimal)',
|
||||
example: 42,
|
||||
examples: [0, -10, 3.14, 100],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: ['Can be constrained with min/max in typeOptions'],
|
||||
},
|
||||
|
||||
boolean: {
|
||||
type: 'primitive',
|
||||
jsType: 'boolean',
|
||||
description: 'A true/false toggle value',
|
||||
example: true,
|
||||
examples: [true, false],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: ['Rendered as checkbox in n8n UI'],
|
||||
},
|
||||
|
||||
dateTime: {
|
||||
type: 'primitive',
|
||||
jsType: 'string',
|
||||
description: 'A date and time value in ISO 8601 format',
|
||||
example: '2024-01-20T10:30:00Z',
|
||||
examples: [
|
||||
'2024-01-20T10:30:00Z',
|
||||
'2024-01-20',
|
||||
'{{ $now }}',
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
pattern: '^\\d{4}-\\d{2}-\\d{2}(T\\d{2}:\\d{2}:\\d{2}(\\.\\d{3})?Z?)?$',
|
||||
},
|
||||
notes: ['Accepts ISO 8601 format', 'Can use n8n date expressions'],
|
||||
},
|
||||
|
||||
color: {
|
||||
type: 'primitive',
|
||||
jsType: 'string',
|
||||
description: 'A color value in hex format',
|
||||
example: '#FF5733',
|
||||
examples: ['#FF5733', '#000000', '#FFFFFF', '{{ $json.color }}'],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
pattern: '^#[0-9A-Fa-f]{6}$',
|
||||
},
|
||||
notes: ['Must be 6-digit hex color', 'Rendered with color picker in UI'],
|
||||
},
|
||||
|
||||
json: {
|
||||
type: 'primitive',
|
||||
jsType: 'string',
|
||||
description: 'A JSON string that can be parsed into any structure',
|
||||
example: '{"key": "value", "nested": {"data": 123}}',
|
||||
examples: [
|
||||
'{}',
|
||||
'{"name": "John", "age": 30}',
|
||||
'[1, 2, 3]',
|
||||
'{{ $json }}',
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: ['Must be valid JSON when parsed', 'Often used for custom payloads'],
|
||||
},
|
||||
|
||||
// ============================================================================
|
||||
// OPTION TYPES - Selection from predefined choices
|
||||
// ============================================================================
|
||||
|
||||
options: {
|
||||
type: 'primitive',
|
||||
jsType: 'string',
|
||||
description: 'Single selection from a list of predefined options',
|
||||
example: 'option1',
|
||||
examples: ['GET', 'POST', 'channelMessage', 'update'],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'Value must match one of the defined option values',
|
||||
'Rendered as dropdown in UI',
|
||||
'Options defined in property.options array',
|
||||
],
|
||||
},
|
||||
|
||||
multiOptions: {
|
||||
type: 'array',
|
||||
jsType: 'array',
|
||||
description: 'Multiple selections from a list of predefined options',
|
||||
structure: {
|
||||
items: {
|
||||
type: 'string',
|
||||
description: 'Selected option value',
|
||||
},
|
||||
},
|
||||
example: ['option1', 'option2'],
|
||||
examples: [[], ['GET', 'POST'], ['read', 'write', 'delete']],
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'Array of option values',
|
||||
'Each value must exist in property.options',
|
||||
'Rendered as multi-select dropdown',
|
||||
],
|
||||
},
|
||||
|
||||
// ============================================================================
|
||||
// COLLECTION TYPES - Complex nested structures
|
||||
// ============================================================================
|
||||
|
||||
collection: {
|
||||
type: 'collection',
|
||||
jsType: 'object',
|
||||
description: 'A group of related properties with dynamic values',
|
||||
structure: {
|
||||
properties: {
|
||||
'<propertyName>': {
|
||||
type: 'any',
|
||||
description: 'Any nested property from the collection definition',
|
||||
},
|
||||
},
|
||||
flexible: true,
|
||||
},
|
||||
example: {
|
||||
name: 'John Doe',
|
||||
email: 'john@example.com',
|
||||
age: 30,
|
||||
},
|
||||
examples: [
|
||||
{},
|
||||
{ key1: 'value1', key2: 123 },
|
||||
{ nested: { deep: { value: true } } },
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Properties defined in property.values array',
|
||||
'Each property can be any type',
|
||||
'UI renders as expandable section',
|
||||
],
|
||||
},
|
||||
|
||||
fixedCollection: {
|
||||
type: 'collection',
|
||||
jsType: 'object',
|
||||
description: 'A collection with predefined groups of properties',
|
||||
structure: {
|
||||
properties: {
|
||||
'<collectionName>': {
|
||||
type: 'array',
|
||||
description: 'Array of collection items',
|
||||
items: {
|
||||
type: 'object',
|
||||
description: 'Collection item with defined properties',
|
||||
},
|
||||
},
|
||||
},
|
||||
required: [],
|
||||
},
|
||||
example: {
|
||||
headers: [
|
||||
{ name: 'Content-Type', value: 'application/json' },
|
||||
{ name: 'Authorization', value: 'Bearer token' },
|
||||
],
|
||||
},
|
||||
examples: [
|
||||
{},
|
||||
{ queryParameters: [{ name: 'id', value: '123' }] },
|
||||
{
|
||||
headers: [{ name: 'Accept', value: '*/*' }],
|
||||
queryParameters: [{ name: 'limit', value: '10' }],
|
||||
},
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Each collection has predefined structure',
|
||||
'Often used for headers, parameters, etc.',
|
||||
'Supports multiple values per collection',
|
||||
],
|
||||
},
|
||||
|
||||
// ============================================================================
|
||||
// SPECIAL n8n TYPES - Advanced functionality
|
||||
// ============================================================================
|
||||
|
||||
resourceLocator: {
|
||||
type: 'special',
|
||||
jsType: 'object',
|
||||
description: 'A flexible way to specify a resource by ID, name, URL, or list',
|
||||
structure: {
|
||||
properties: {
|
||||
mode: {
|
||||
type: 'string',
|
||||
description: 'How the resource is specified',
|
||||
enum: ['id', 'url', 'list'],
|
||||
required: true,
|
||||
},
|
||||
value: {
|
||||
type: 'string',
|
||||
description: 'The resource identifier',
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
required: ['mode', 'value'],
|
||||
},
|
||||
example: {
|
||||
mode: 'id',
|
||||
value: 'abc123',
|
||||
},
|
||||
examples: [
|
||||
{ mode: 'url', value: 'https://example.com/resource/123' },
|
||||
{ mode: 'list', value: 'item-from-dropdown' },
|
||||
{ mode: 'id', value: '{{ $json.resourceId }}' },
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Provides flexible resource selection',
|
||||
'Mode determines how value is interpreted',
|
||||
'UI adapts based on selected mode',
|
||||
],
|
||||
},
|
||||
|
||||
resourceMapper: {
|
||||
type: 'special',
|
||||
jsType: 'object',
|
||||
description: 'Maps input data fields to resource fields with transformation options',
|
||||
structure: {
|
||||
properties: {
|
||||
mappingMode: {
|
||||
type: 'string',
|
||||
description: 'How fields are mapped',
|
||||
enum: ['defineBelow', 'autoMapInputData'],
|
||||
},
|
||||
value: {
|
||||
type: 'object',
|
||||
description: 'Field mappings',
|
||||
properties: {
|
||||
'<fieldName>': {
|
||||
type: 'string',
|
||||
description: 'Expression or value for this field',
|
||||
},
|
||||
},
|
||||
flexible: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
example: {
|
||||
mappingMode: 'defineBelow',
|
||||
value: {
|
||||
name: '{{ $json.fullName }}',
|
||||
email: '{{ $json.emailAddress }}',
|
||||
status: 'active',
|
||||
},
|
||||
},
|
||||
examples: [
|
||||
{ mappingMode: 'autoMapInputData', value: {} },
|
||||
{
|
||||
mappingMode: 'defineBelow',
|
||||
value: { id: '{{ $json.userId }}', name: '{{ $json.name }}' },
|
||||
},
|
||||
],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Complex mapping with UI assistance',
|
||||
'Can auto-map or manually define',
|
||||
'Supports field transformations',
|
||||
],
|
||||
},
|
||||
|
||||
filter: {
|
||||
type: 'special',
|
||||
jsType: 'object',
|
||||
description: 'Defines conditions for filtering data with boolean logic',
|
||||
structure: {
|
||||
properties: {
|
||||
conditions: {
|
||||
type: 'array',
|
||||
description: 'Array of filter conditions',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
id: {
|
||||
type: 'string',
|
||||
description: 'Unique condition identifier',
|
||||
required: true,
|
||||
},
|
||||
leftValue: {
|
||||
type: 'any',
|
||||
description: 'Left side of comparison',
|
||||
},
|
||||
operator: {
|
||||
type: 'object',
|
||||
description: 'Comparison operator',
|
||||
required: true,
|
||||
properties: {
|
||||
type: {
|
||||
type: 'string',
|
||||
enum: ['string', 'number', 'boolean', 'dateTime', 'array', 'object'],
|
||||
required: true,
|
||||
},
|
||||
operation: {
|
||||
type: 'string',
|
||||
description: 'Operation to perform',
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
rightValue: {
|
||||
type: 'any',
|
||||
description: 'Right side of comparison',
|
||||
},
|
||||
},
|
||||
},
|
||||
required: true,
|
||||
},
|
||||
combinator: {
|
||||
type: 'string',
|
||||
description: 'How to combine conditions',
|
||||
enum: ['and', 'or'],
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
required: ['conditions', 'combinator'],
|
||||
},
|
||||
example: {
|
||||
conditions: [
|
||||
{
|
||||
id: 'abc-123',
|
||||
leftValue: '{{ $json.status }}',
|
||||
operator: { type: 'string', operation: 'equals' },
|
||||
rightValue: 'active',
|
||||
},
|
||||
],
|
||||
combinator: 'and',
|
||||
},
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Advanced filtering UI in n8n',
|
||||
'Supports complex boolean logic',
|
||||
'Operations vary by data type',
|
||||
],
|
||||
},
|
||||
|
||||
assignmentCollection: {
|
||||
type: 'special',
|
||||
jsType: 'object',
|
||||
description: 'Defines variable assignments with expressions',
|
||||
structure: {
|
||||
properties: {
|
||||
assignments: {
|
||||
type: 'array',
|
||||
description: 'Array of variable assignments',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
id: {
|
||||
type: 'string',
|
||||
description: 'Unique assignment identifier',
|
||||
required: true,
|
||||
},
|
||||
name: {
|
||||
type: 'string',
|
||||
description: 'Variable name',
|
||||
required: true,
|
||||
},
|
||||
value: {
|
||||
type: 'any',
|
||||
description: 'Value to assign',
|
||||
required: true,
|
||||
},
|
||||
type: {
|
||||
type: 'string',
|
||||
description: 'Data type of the value',
|
||||
enum: ['string', 'number', 'boolean', 'array', 'object'],
|
||||
},
|
||||
},
|
||||
},
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
required: ['assignments'],
|
||||
},
|
||||
example: {
|
||||
assignments: [
|
||||
{
|
||||
id: 'abc-123',
|
||||
name: 'userName',
|
||||
value: '{{ $json.name }}',
|
||||
type: 'string',
|
||||
},
|
||||
{
|
||||
id: 'def-456',
|
||||
name: 'userAge',
|
||||
value: 30,
|
||||
type: 'number',
|
||||
},
|
||||
],
|
||||
},
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Used in Set node and similar',
|
||||
'Each assignment can use expressions',
|
||||
'Type helps with validation',
|
||||
],
|
||||
},
|
||||
|
||||
// ============================================================================
|
||||
// CREDENTIAL TYPES - Authentication and credentials
|
||||
// ============================================================================
|
||||
|
||||
credentials: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Reference to credential configuration',
|
||||
example: 'googleSheetsOAuth2Api',
|
||||
examples: ['httpBasicAuth', 'slackOAuth2Api', 'postgresApi'],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'References credential type name',
|
||||
'Credential must be configured in n8n',
|
||||
'Type name matches credential definition',
|
||||
],
|
||||
},
|
||||
|
||||
credentialsSelect: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Dropdown to select from available credentials',
|
||||
example: 'credential-id-123',
|
||||
examples: ['cred-abc', 'cred-def', '{{ $credentials.id }}'],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'User selects from configured credentials',
|
||||
'Returns credential ID',
|
||||
'Used when multiple credential instances exist',
|
||||
],
|
||||
},
|
||||
|
||||
// ============================================================================
|
||||
// UI-ONLY TYPES - Display elements without data
|
||||
// ============================================================================
|
||||
|
||||
hidden: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Hidden property not shown in UI (used for internal logic)',
|
||||
example: '',
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Not rendered in UI',
|
||||
'Can store metadata or computed values',
|
||||
'Often used for version tracking',
|
||||
],
|
||||
},
|
||||
|
||||
button: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Clickable button that triggers an action',
|
||||
example: '',
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'Triggers action when clicked',
|
||||
'Does not store a value',
|
||||
'Action defined in routing property',
|
||||
],
|
||||
},
|
||||
|
||||
callout: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Informational message box (warning, info, success, error)',
|
||||
example: '',
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'Display-only, no value stored',
|
||||
'Used for warnings and hints',
|
||||
'Style controlled by typeOptions',
|
||||
],
|
||||
},
|
||||
|
||||
notice: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Notice message displayed to user',
|
||||
example: '',
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: ['Similar to callout', 'Display-only element', 'Provides contextual information'],
|
||||
},
|
||||
|
||||
// ============================================================================
|
||||
// UTILITY TYPES - Special-purpose functionality
|
||||
// ============================================================================
|
||||
|
||||
workflowSelector: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Dropdown to select another workflow',
|
||||
example: 'workflow-123',
|
||||
examples: ['wf-abc', '{{ $json.workflowId }}'],
|
||||
validation: {
|
||||
allowEmpty: false,
|
||||
allowExpressions: true,
|
||||
},
|
||||
notes: [
|
||||
'Selects from available workflows',
|
||||
'Returns workflow ID',
|
||||
'Used in Execute Workflow node',
|
||||
],
|
||||
},
|
||||
|
||||
curlImport: {
|
||||
type: 'special',
|
||||
jsType: 'string',
|
||||
description: 'Import configuration from cURL command',
|
||||
example: 'curl -X GET https://api.example.com/data',
|
||||
validation: {
|
||||
allowEmpty: true,
|
||||
allowExpressions: false,
|
||||
},
|
||||
notes: [
|
||||
'Parses cURL command to populate fields',
|
||||
'Used in HTTP Request node',
|
||||
'One-time import feature',
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* Real-world examples for complex types
|
||||
*
|
||||
* These examples come from actual n8n workflows and demonstrate
|
||||
* correct usage patterns for complex property types.
|
||||
*
|
||||
* @constant
|
||||
*/
|
||||
export const COMPLEX_TYPE_EXAMPLES = {
|
||||
collection: {
|
||||
basic: {
|
||||
name: 'John Doe',
|
||||
email: 'john@example.com',
|
||||
},
|
||||
nested: {
|
||||
user: {
|
||||
firstName: 'Jane',
|
||||
lastName: 'Smith',
|
||||
},
|
||||
preferences: {
|
||||
theme: 'dark',
|
||||
notifications: true,
|
||||
},
|
||||
},
|
||||
withExpressions: {
|
||||
id: '{{ $json.userId }}',
|
||||
timestamp: '{{ $now }}',
|
||||
data: '{{ $json.payload }}',
|
||||
},
|
||||
},
|
||||
|
||||
fixedCollection: {
|
||||
httpHeaders: {
|
||||
headers: [
|
||||
{ name: 'Content-Type', value: 'application/json' },
|
||||
{ name: 'Authorization', value: 'Bearer {{ $credentials.token }}' },
|
||||
],
|
||||
},
|
||||
queryParameters: {
|
||||
queryParameters: [
|
||||
{ name: 'page', value: '1' },
|
||||
{ name: 'limit', value: '100' },
|
||||
],
|
||||
},
|
||||
multipleCollections: {
|
||||
headers: [{ name: 'Accept', value: 'application/json' }],
|
||||
queryParameters: [{ name: 'filter', value: 'active' }],
|
||||
},
|
||||
},
|
||||
|
||||
filter: {
|
||||
simple: {
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
leftValue: '{{ $json.status }}',
|
||||
operator: { type: 'string', operation: 'equals' },
|
||||
rightValue: 'active',
|
||||
},
|
||||
],
|
||||
combinator: 'and',
|
||||
},
|
||||
complex: {
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
leftValue: '{{ $json.age }}',
|
||||
operator: { type: 'number', operation: 'gt' },
|
||||
rightValue: 18,
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
leftValue: '{{ $json.country }}',
|
||||
operator: { type: 'string', operation: 'equals' },
|
||||
rightValue: 'US',
|
||||
},
|
||||
],
|
||||
combinator: 'and',
|
||||
},
|
||||
},
|
||||
|
||||
resourceMapper: {
|
||||
autoMap: {
|
||||
mappingMode: 'autoMapInputData',
|
||||
value: {},
|
||||
},
|
||||
manual: {
|
||||
mappingMode: 'defineBelow',
|
||||
value: {
|
||||
firstName: '{{ $json.first_name }}',
|
||||
lastName: '{{ $json.last_name }}',
|
||||
email: '{{ $json.email_address }}',
|
||||
status: 'active',
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
assignmentCollection: {
|
||||
basic: {
|
||||
assignments: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'fullName',
|
||||
value: '{{ $json.firstName }} {{ $json.lastName }}',
|
||||
type: 'string',
|
||||
},
|
||||
],
|
||||
},
|
||||
multiple: {
|
||||
assignments: [
|
||||
{ id: '1', name: 'userName', value: '{{ $json.name }}', type: 'string' },
|
||||
{ id: '2', name: 'userAge', value: '{{ $json.age }}', type: 'number' },
|
||||
{ id: '3', name: 'isActive', value: true, type: 'boolean' },
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
310
src/data/canonical-ai-tool-examples.json
Normal file
310
src/data/canonical-ai-tool-examples.json
Normal file
@@ -0,0 +1,310 @@
|
||||
{
|
||||
"description": "Canonical configuration examples for critical AI tools based on FINAL_AI_VALIDATION_SPEC.md",
|
||||
"version": "1.0.0",
|
||||
"examples": [
|
||||
{
|
||||
"node_type": "@n8n/n8n-nodes-langchain.toolHttpRequest",
|
||||
"display_name": "HTTP Request Tool",
|
||||
"examples": [
|
||||
{
|
||||
"name": "Weather API Tool",
|
||||
"use_case": "Fetch current weather data for AI Agent",
|
||||
"complexity": "simple",
|
||||
"parameters": {
|
||||
"method": "GET",
|
||||
"url": "https://api.weatherapi.com/v1/current.json?key={{$credentials.weatherApiKey}}&q={city}",
|
||||
"toolDescription": "Get current weather conditions for a city. Provide the city name (e.g., 'London', 'New York') and receive temperature, humidity, wind speed, and conditions.",
|
||||
"placeholderDefinitions": {
|
||||
"values": [
|
||||
{
|
||||
"name": "city",
|
||||
"description": "Name of the city to get weather for",
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"authentication": "predefinedCredentialType",
|
||||
"nodeCredentialType": "weatherApiApi"
|
||||
},
|
||||
"credentials": {
|
||||
"weatherApiApi": {
|
||||
"id": "1",
|
||||
"name": "Weather API account"
|
||||
}
|
||||
},
|
||||
"notes": "Example shows proper toolDescription, URL with placeholder, and credential configuration"
|
||||
},
|
||||
{
|
||||
"name": "GitHub Issues Tool",
|
||||
"use_case": "Create GitHub issues from AI Agent conversations",
|
||||
"complexity": "medium",
|
||||
"parameters": {
|
||||
"method": "POST",
|
||||
"url": "https://api.github.com/repos/{owner}/{repo}/issues",
|
||||
"toolDescription": "Create a new GitHub issue. Requires owner (repo owner username), repo (repository name), title, and body. Returns the created issue URL and number.",
|
||||
"placeholderDefinitions": {
|
||||
"values": [
|
||||
{
|
||||
"name": "owner",
|
||||
"description": "GitHub repository owner username",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "repo",
|
||||
"description": "Repository name",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "title",
|
||||
"description": "Issue title",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "body",
|
||||
"description": "Issue description and details",
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"sendBody": true,
|
||||
"specifyBody": "json",
|
||||
"jsonBody": "={{ { \"title\": $json.title, \"body\": $json.body } }}",
|
||||
"authentication": "predefinedCredentialType",
|
||||
"nodeCredentialType": "githubApi"
|
||||
},
|
||||
"credentials": {
|
||||
"githubApi": {
|
||||
"id": "2",
|
||||
"name": "GitHub credentials"
|
||||
}
|
||||
},
|
||||
"notes": "Example shows POST request with JSON body, multiple placeholders, and expressions"
|
||||
},
|
||||
{
|
||||
"name": "Slack Message Tool",
|
||||
"use_case": "Send Slack messages from AI Agent",
|
||||
"complexity": "simple",
|
||||
"parameters": {
|
||||
"method": "POST",
|
||||
"url": "https://slack.com/api/chat.postMessage",
|
||||
"toolDescription": "Send a message to a Slack channel. Provide channel ID or name (e.g., '#general', 'C1234567890') and message text.",
|
||||
"placeholderDefinitions": {
|
||||
"values": [
|
||||
{
|
||||
"name": "channel",
|
||||
"description": "Channel ID or name (e.g., #general)",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "text",
|
||||
"description": "Message text to send",
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"sendHeaders": true,
|
||||
"headerParameters": {
|
||||
"parameters": [
|
||||
{
|
||||
"name": "Content-Type",
|
||||
"value": "application/json; charset=utf-8"
|
||||
},
|
||||
{
|
||||
"name": "Authorization",
|
||||
"value": "=Bearer {{$credentials.slackApi.accessToken}}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"sendBody": true,
|
||||
"specifyBody": "json",
|
||||
"jsonBody": "={{ { \"channel\": $json.channel, \"text\": $json.text } }}",
|
||||
"authentication": "predefinedCredentialType",
|
||||
"nodeCredentialType": "slackApi"
|
||||
},
|
||||
"credentials": {
|
||||
"slackApi": {
|
||||
"id": "3",
|
||||
"name": "Slack account"
|
||||
}
|
||||
},
|
||||
"notes": "Example shows headers with credential expressions and JSON body construction"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"node_type": "@n8n/n8n-nodes-langchain.toolCode",
|
||||
"display_name": "Code Tool",
|
||||
"examples": [
|
||||
{
|
||||
"name": "Calculate Shipping Cost",
|
||||
"use_case": "Calculate shipping costs based on weight and distance",
|
||||
"complexity": "simple",
|
||||
"parameters": {
|
||||
"name": "calculate_shipping_cost",
|
||||
"description": "Calculate shipping cost based on package weight (in kg) and distance (in km). Returns the cost in USD.",
|
||||
"language": "javaScript",
|
||||
"code": "const baseRate = 5;\nconst perKgRate = 2;\nconst perKmRate = 0.1;\n\nconst weight = $input.weight || 0;\nconst distance = $input.distance || 0;\n\nconst cost = baseRate + (weight * perKgRate) + (distance * perKmRate);\n\nreturn { cost: parseFloat(cost.toFixed(2)), currency: 'USD' };",
|
||||
"specifyInputSchema": true,
|
||||
"schemaType": "manual",
|
||||
"inputSchema": "{\n \"type\": \"object\",\n \"properties\": {\n \"weight\": {\n \"type\": \"number\",\n \"description\": \"Package weight in kilograms\"\n },\n \"distance\": {\n \"type\": \"number\",\n \"description\": \"Shipping distance in kilometers\"\n }\n },\n \"required\": [\"weight\", \"distance\"]\n}"
|
||||
},
|
||||
"notes": "Example shows proper function naming, detailed description, input schema, and return value"
|
||||
},
|
||||
{
|
||||
"name": "Format Customer Data",
|
||||
"use_case": "Transform and validate customer information",
|
||||
"complexity": "medium",
|
||||
"parameters": {
|
||||
"name": "format_customer_data",
|
||||
"description": "Format and validate customer data. Takes raw customer info (name, email, phone) and returns formatted object with validation status.",
|
||||
"language": "javaScript",
|
||||
"code": "const { name, email, phone } = $input;\n\n// Validation\nconst emailRegex = /^[^\\s@]+@[^\\s@]+\\.[^\\s@]+$/;\nconst phoneRegex = /^\\+?[1-9]\\d{1,14}$/;\n\nconst errors = [];\nif (!emailRegex.test(email)) errors.push('Invalid email format');\nif (!phoneRegex.test(phone)) errors.push('Invalid phone format');\n\n// Formatting\nconst formatted = {\n name: name.trim(),\n email: email.toLowerCase().trim(),\n phone: phone.replace(/\\s/g, ''),\n valid: errors.length === 0,\n errors: errors\n};\n\nreturn formatted;",
|
||||
"specifyInputSchema": true,
|
||||
"schemaType": "manual",
|
||||
"inputSchema": "{\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\",\n \"description\": \"Customer full name\"\n },\n \"email\": {\n \"type\": \"string\",\n \"description\": \"Customer email address\"\n },\n \"phone\": {\n \"type\": \"string\",\n \"description\": \"Customer phone number\"\n }\n },\n \"required\": [\"name\", \"email\", \"phone\"]\n}"
|
||||
},
|
||||
"notes": "Example shows data validation, formatting, and structured error handling"
|
||||
},
|
||||
{
|
||||
"name": "Parse Date Range",
|
||||
"use_case": "Convert natural language date ranges to ISO format",
|
||||
"complexity": "medium",
|
||||
"parameters": {
|
||||
"name": "parse_date_range",
|
||||
"description": "Parse natural language date ranges (e.g., 'last 7 days', 'this month', 'Q1 2024') into start and end dates in ISO format.",
|
||||
"language": "javaScript",
|
||||
"code": "const input = $input.dateRange || '';\nconst now = new Date();\nlet start, end;\n\nif (input.includes('last') && input.includes('days')) {\n const days = parseInt(input.match(/\\d+/)[0]);\n start = new Date(now.getTime() - (days * 24 * 60 * 60 * 1000));\n end = now;\n} else if (input === 'this month') {\n start = new Date(now.getFullYear(), now.getMonth(), 1);\n end = new Date(now.getFullYear(), now.getMonth() + 1, 0);\n} else if (input === 'this year') {\n start = new Date(now.getFullYear(), 0, 1);\n end = new Date(now.getFullYear(), 11, 31);\n} else {\n throw new Error('Unsupported date range format');\n}\n\nreturn {\n startDate: start.toISOString().split('T')[0],\n endDate: end.toISOString().split('T')[0],\n daysCount: Math.ceil((end - start) / (24 * 60 * 60 * 1000))\n};",
|
||||
"specifyInputSchema": true,
|
||||
"schemaType": "manual",
|
||||
"inputSchema": "{\n \"type\": \"object\",\n \"properties\": {\n \"dateRange\": {\n \"type\": \"string\",\n \"description\": \"Natural language date range (e.g., 'last 7 days', 'this month')\"\n }\n },\n \"required\": [\"dateRange\"]\n}"
|
||||
},
|
||||
"notes": "Example shows complex logic, error handling, and date manipulation"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"node_type": "@n8n/n8n-nodes-langchain.agentTool",
|
||||
"display_name": "AI Agent Tool",
|
||||
"examples": [
|
||||
{
|
||||
"name": "Research Specialist Agent",
|
||||
"use_case": "Specialized sub-agent for in-depth research tasks",
|
||||
"complexity": "medium",
|
||||
"parameters": {
|
||||
"name": "research_specialist",
|
||||
"description": "Expert research agent that can search multiple sources, synthesize information, and provide comprehensive analysis on any topic. Use this when you need detailed, well-researched information.",
|
||||
"promptType": "define",
|
||||
"text": "You are a research specialist. Your role is to:\n1. Search for relevant information from multiple sources\n2. Synthesize findings into a coherent analysis\n3. Cite your sources\n4. Highlight key insights and patterns\n\nProvide thorough, well-structured research that answers the user's question comprehensively.",
|
||||
"systemMessage": "You are a meticulous researcher focused on accuracy and completeness. Always cite sources and acknowledge limitations in available information."
|
||||
},
|
||||
"connections": {
|
||||
"ai_languageModel": [
|
||||
{
|
||||
"node": "OpenAI GPT-4",
|
||||
"type": "ai_languageModel",
|
||||
"index": 0
|
||||
}
|
||||
],
|
||||
"ai_tool": [
|
||||
{
|
||||
"node": "SerpApi Tool",
|
||||
"type": "ai_tool",
|
||||
"index": 0
|
||||
},
|
||||
{
|
||||
"node": "Wikipedia Tool",
|
||||
"type": "ai_tool",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
},
|
||||
"notes": "Example shows specialized sub-agent with custom prompt, specific system message, and multiple search tools"
|
||||
},
|
||||
{
|
||||
"name": "Data Analysis Agent",
|
||||
"use_case": "Sub-agent for analyzing and visualizing data",
|
||||
"complexity": "complex",
|
||||
"parameters": {
|
||||
"name": "data_analyst",
|
||||
"description": "Data analysis specialist that can process datasets, calculate statistics, identify trends, and generate insights. Use for any data analysis or statistical questions.",
|
||||
"promptType": "auto",
|
||||
"systemMessage": "You are a data analyst with expertise in statistics and data interpretation. Break down complex datasets into understandable insights. Use the Code Tool to perform calculations when needed.",
|
||||
"maxIterations": 10
|
||||
},
|
||||
"connections": {
|
||||
"ai_languageModel": [
|
||||
{
|
||||
"node": "Anthropic Claude",
|
||||
"type": "ai_languageModel",
|
||||
"index": 0
|
||||
}
|
||||
],
|
||||
"ai_tool": [
|
||||
{
|
||||
"node": "Code Tool - Stats",
|
||||
"type": "ai_tool",
|
||||
"index": 0
|
||||
},
|
||||
{
|
||||
"node": "HTTP Request Tool - Data API",
|
||||
"type": "ai_tool",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
},
|
||||
"notes": "Example shows auto prompt type with specialized system message and analytical tools"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"node_type": "@n8n/n8n-nodes-langchain.mcpClientTool",
|
||||
"display_name": "MCP Client Tool",
|
||||
"examples": [
|
||||
{
|
||||
"name": "Filesystem MCP Tool",
|
||||
"use_case": "Access filesystem operations via MCP protocol",
|
||||
"complexity": "medium",
|
||||
"parameters": {
|
||||
"description": "Access file system operations through MCP. Can read files, list directories, create files, and search for content.",
|
||||
"mcpServer": {
|
||||
"transport": "stdio",
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-filesystem", "/path/to/allowed/directory"]
|
||||
},
|
||||
"tool": "read_file"
|
||||
},
|
||||
"notes": "Example shows stdio transport MCP server with filesystem access tool"
|
||||
},
|
||||
{
|
||||
"name": "Puppeteer MCP Tool",
|
||||
"use_case": "Browser automation via MCP for AI Agents",
|
||||
"complexity": "complex",
|
||||
"parameters": {
|
||||
"description": "Control a web browser to navigate pages, take screenshots, and extract content. Useful for web scraping and automated testing.",
|
||||
"mcpServer": {
|
||||
"transport": "stdio",
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-puppeteer"]
|
||||
},
|
||||
"tool": "puppeteer_navigate"
|
||||
},
|
||||
"notes": "Example shows Puppeteer MCP server for browser automation"
|
||||
},
|
||||
{
|
||||
"name": "Database MCP Tool",
|
||||
"use_case": "Query databases via MCP protocol",
|
||||
"complexity": "complex",
|
||||
"parameters": {
|
||||
"description": "Execute SQL queries and retrieve data from PostgreSQL databases. Supports SELECT, INSERT, UPDATE operations with proper escaping.",
|
||||
"mcpServer": {
|
||||
"transport": "sse",
|
||||
"url": "https://mcp-server.example.com/database"
|
||||
},
|
||||
"tool": "execute_query"
|
||||
},
|
||||
"notes": "Example shows SSE transport MCP server for remote database access"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -232,15 +232,45 @@ class BetterSQLiteAdapter implements DatabaseAdapter {
|
||||
*/
|
||||
class SQLJSAdapter implements DatabaseAdapter {
|
||||
private saveTimer: NodeJS.Timeout | null = null;
|
||||
|
||||
private saveIntervalMs: number;
|
||||
private closed = false; // Prevent multiple close() calls
|
||||
|
||||
// Default save interval: 5 seconds (balance between data safety and performance)
|
||||
// Configurable via SQLJS_SAVE_INTERVAL_MS environment variable
|
||||
//
|
||||
// DATA LOSS WINDOW: Up to 5 seconds of database changes may be lost if process
|
||||
// crashes before scheduleSave() timer fires. This is acceptable because:
|
||||
// 1. close() calls saveToFile() immediately on graceful shutdown
|
||||
// 2. Docker/Kubernetes SIGTERM provides 30s for cleanup (more than enough)
|
||||
// 3. The alternative (100ms interval) caused 2.2GB memory leaks in production
|
||||
// 4. MCP server is primarily read-heavy (writes are rare)
|
||||
private static readonly DEFAULT_SAVE_INTERVAL_MS = 5000;
|
||||
|
||||
constructor(private db: any, private dbPath: string) {
|
||||
// Set up auto-save on changes
|
||||
this.scheduleSave();
|
||||
// Read save interval from environment or use default
|
||||
const envInterval = process.env.SQLJS_SAVE_INTERVAL_MS;
|
||||
this.saveIntervalMs = envInterval ? parseInt(envInterval, 10) : SQLJSAdapter.DEFAULT_SAVE_INTERVAL_MS;
|
||||
|
||||
// Validate interval (minimum 100ms, maximum 60000ms = 1 minute)
|
||||
if (isNaN(this.saveIntervalMs) || this.saveIntervalMs < 100 || this.saveIntervalMs > 60000) {
|
||||
logger.warn(
|
||||
`Invalid SQLJS_SAVE_INTERVAL_MS value: ${envInterval} (must be 100-60000ms), ` +
|
||||
`using default ${SQLJSAdapter.DEFAULT_SAVE_INTERVAL_MS}ms`
|
||||
);
|
||||
this.saveIntervalMs = SQLJSAdapter.DEFAULT_SAVE_INTERVAL_MS;
|
||||
}
|
||||
|
||||
logger.debug(`SQLJSAdapter initialized with save interval: ${this.saveIntervalMs}ms`);
|
||||
|
||||
// NOTE: No initial save scheduled here (optimization)
|
||||
// Database is either:
|
||||
// 1. Loaded from existing file (already persisted), or
|
||||
// 2. New database (will be saved on first write operation)
|
||||
}
|
||||
|
||||
prepare(sql: string): PreparedStatement {
|
||||
const stmt = this.db.prepare(sql);
|
||||
this.scheduleSave();
|
||||
// Don't schedule save on prepare - only on actual writes (via SQLJSStatement.run())
|
||||
return new SQLJSStatement(stmt, () => this.scheduleSave());
|
||||
}
|
||||
|
||||
@@ -250,11 +280,18 @@ class SQLJSAdapter implements DatabaseAdapter {
|
||||
}
|
||||
|
||||
close(): void {
|
||||
if (this.closed) {
|
||||
logger.debug('SQLJSAdapter already closed, skipping');
|
||||
return;
|
||||
}
|
||||
|
||||
this.saveToFile();
|
||||
if (this.saveTimer) {
|
||||
clearTimeout(this.saveTimer);
|
||||
this.saveTimer = null;
|
||||
}
|
||||
this.db.close();
|
||||
this.closed = true;
|
||||
}
|
||||
|
||||
pragma(key: string, value?: any): any {
|
||||
@@ -301,19 +338,32 @@ class SQLJSAdapter implements DatabaseAdapter {
|
||||
if (this.saveTimer) {
|
||||
clearTimeout(this.saveTimer);
|
||||
}
|
||||
|
||||
// Save after 100ms of inactivity
|
||||
|
||||
// Save after configured interval of inactivity (default: 5000ms)
|
||||
// This debouncing reduces memory churn from frequent buffer allocations
|
||||
//
|
||||
// NOTE: Under constant write load, saves may be delayed until writes stop.
|
||||
// This is acceptable because:
|
||||
// 1. MCP server is primarily read-heavy (node lookups, searches)
|
||||
// 2. Writes are rare (only during database rebuilds)
|
||||
// 3. close() saves immediately on shutdown, flushing any pending changes
|
||||
this.saveTimer = setTimeout(() => {
|
||||
this.saveToFile();
|
||||
}, 100);
|
||||
}, this.saveIntervalMs);
|
||||
}
|
||||
|
||||
private saveToFile(): void {
|
||||
try {
|
||||
// Export database to Uint8Array (2-5MB typical)
|
||||
const data = this.db.export();
|
||||
const buffer = Buffer.from(data);
|
||||
fsSync.writeFileSync(this.dbPath, buffer);
|
||||
|
||||
// Write directly without Buffer.from() copy (saves 50% memory allocation)
|
||||
// writeFileSync accepts Uint8Array directly, no need for Buffer conversion
|
||||
fsSync.writeFileSync(this.dbPath, data);
|
||||
logger.debug(`Database saved to ${this.dbPath}`);
|
||||
|
||||
// Note: 'data' reference is automatically cleared when function exits
|
||||
// V8 GC will reclaim the Uint8Array once it's no longer referenced
|
||||
} catch (error) {
|
||||
logger.error('Failed to save database', error);
|
||||
}
|
||||
|
||||
59
src/database/migrations/add-template-node-configs.sql
Normal file
59
src/database/migrations/add-template-node-configs.sql
Normal file
@@ -0,0 +1,59 @@
|
||||
-- Migration: Add template_node_configs table
|
||||
-- Run during `npm run rebuild` or `npm run fetch:templates`
|
||||
-- This migration is idempotent - safe to run multiple times
|
||||
|
||||
-- Create table if it doesn't exist
|
||||
CREATE TABLE IF NOT EXISTS template_node_configs (
|
||||
id INTEGER PRIMARY KEY,
|
||||
node_type TEXT NOT NULL,
|
||||
template_id INTEGER NOT NULL,
|
||||
template_name TEXT NOT NULL,
|
||||
template_views INTEGER DEFAULT 0,
|
||||
|
||||
-- Node configuration (extracted from workflow)
|
||||
node_name TEXT, -- Node name in workflow (e.g., "HTTP Request")
|
||||
parameters_json TEXT NOT NULL, -- JSON: node.parameters
|
||||
credentials_json TEXT, -- JSON: node.credentials (if present)
|
||||
|
||||
-- Pre-calculated metadata for filtering
|
||||
has_credentials INTEGER DEFAULT 0,
|
||||
has_expressions INTEGER DEFAULT 0, -- Contains {{...}} or $json/$node
|
||||
complexity TEXT CHECK(complexity IN ('simple', 'medium', 'complex')),
|
||||
use_cases TEXT, -- JSON array from template.metadata.use_cases
|
||||
|
||||
-- Pre-calculated ranking (1 = best, 2 = second best, etc.)
|
||||
rank INTEGER DEFAULT 0,
|
||||
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (template_id) REFERENCES templates(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Create indexes if they don't exist
|
||||
CREATE INDEX IF NOT EXISTS idx_config_node_type_rank
|
||||
ON template_node_configs(node_type, rank);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_config_complexity
|
||||
ON template_node_configs(node_type, complexity, rank);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_config_auth
|
||||
ON template_node_configs(node_type, has_credentials, rank);
|
||||
|
||||
-- Create view if it doesn't exist
|
||||
CREATE VIEW IF NOT EXISTS ranked_node_configs AS
|
||||
SELECT
|
||||
node_type,
|
||||
template_name,
|
||||
template_views,
|
||||
parameters_json,
|
||||
credentials_json,
|
||||
has_credentials,
|
||||
has_expressions,
|
||||
complexity,
|
||||
use_cases,
|
||||
rank
|
||||
FROM template_node_configs
|
||||
WHERE rank <= 5 -- Top 5 per node type
|
||||
ORDER BY node_type, rank;
|
||||
|
||||
-- Note: Actual data population is handled by the fetch-templates script
|
||||
-- This migration only creates the schema
|
||||
@@ -1,16 +1,18 @@
|
||||
import { DatabaseAdapter } from './database-adapter';
|
||||
import { ParsedNode } from '../parsers/node-parser';
|
||||
import { SQLiteStorageService } from '../services/sqlite-storage-service';
|
||||
import { NodeTypeNormalizer } from '../utils/node-type-normalizer';
|
||||
|
||||
export class NodeRepository {
|
||||
private db: DatabaseAdapter;
|
||||
|
||||
constructor(dbOrService: DatabaseAdapter | SQLiteStorageService) {
|
||||
if ('db' in dbOrService) {
|
||||
if (dbOrService instanceof SQLiteStorageService) {
|
||||
this.db = dbOrService.db;
|
||||
} else {
|
||||
this.db = dbOrService;
|
||||
return;
|
||||
}
|
||||
|
||||
this.db = dbOrService;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -50,33 +52,30 @@ export class NodeRepository {
|
||||
|
||||
/**
|
||||
* Get node with proper JSON deserialization
|
||||
* Automatically normalizes node type to full form for consistent lookups
|
||||
*/
|
||||
getNode(nodeType: string): any {
|
||||
// Normalize to full form first for consistent lookups
|
||||
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM nodes WHERE node_type = ?
|
||||
`).get(nodeType) as any;
|
||||
|
||||
`).get(normalizedType) as any;
|
||||
|
||||
// Fallback: try original type if normalization didn't help (e.g., community nodes)
|
||||
if (!row && normalizedType !== nodeType) {
|
||||
const originalRow = this.db.prepare(`
|
||||
SELECT * FROM nodes WHERE node_type = ?
|
||||
`).get(nodeType) as any;
|
||||
|
||||
if (originalRow) {
|
||||
return this.parseNodeRow(originalRow);
|
||||
}
|
||||
}
|
||||
|
||||
if (!row) return null;
|
||||
|
||||
return {
|
||||
nodeType: row.node_type,
|
||||
displayName: row.display_name,
|
||||
description: row.description,
|
||||
category: row.category,
|
||||
developmentStyle: row.development_style,
|
||||
package: row.package_name,
|
||||
isAITool: Number(row.is_ai_tool) === 1,
|
||||
isTrigger: Number(row.is_trigger) === 1,
|
||||
isWebhook: Number(row.is_webhook) === 1,
|
||||
isVersioned: Number(row.is_versioned) === 1,
|
||||
version: row.version,
|
||||
properties: this.safeJsonParse(row.properties_schema, []),
|
||||
operations: this.safeJsonParse(row.operations, []),
|
||||
credentials: this.safeJsonParse(row.credentials_required, []),
|
||||
hasDocumentation: !!row.documentation,
|
||||
outputs: row.outputs ? this.safeJsonParse(row.outputs, null) : null,
|
||||
outputNames: row.output_names ? this.safeJsonParse(row.output_names, null) : null
|
||||
};
|
||||
|
||||
return this.parseNodeRow(row);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -124,10 +123,22 @@ export class NodeRepository {
|
||||
return rows.map(row => this.parseNodeRow(row));
|
||||
}
|
||||
|
||||
/**
|
||||
* Legacy LIKE-based search method for direct repository usage.
|
||||
*
|
||||
* NOTE: MCP tools do NOT use this method. They use MCPServer.searchNodes()
|
||||
* which automatically detects and uses FTS5 full-text search when available.
|
||||
* See src/mcp/server.ts:1135-1148 for FTS5 implementation.
|
||||
*
|
||||
* This method remains for:
|
||||
* - Direct repository access in scripts/benchmarks
|
||||
* - Fallback when FTS5 table doesn't exist
|
||||
* - Legacy compatibility
|
||||
*/
|
||||
searchNodes(query: string, mode: 'OR' | 'AND' | 'FUZZY' = 'OR', limit: number = 20): any[] {
|
||||
let sql = '';
|
||||
const params: any[] = [];
|
||||
|
||||
|
||||
if (mode === 'FUZZY') {
|
||||
// Simple fuzzy search
|
||||
sql = `
|
||||
@@ -451,4 +462,501 @@ export class NodeRepository {
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* VERSION MANAGEMENT METHODS
|
||||
* Methods for working with node_versions and version_property_changes tables
|
||||
*/
|
||||
|
||||
/**
|
||||
* Save a specific node version to the database
|
||||
*/
|
||||
saveNodeVersion(versionData: {
|
||||
nodeType: string;
|
||||
version: string;
|
||||
packageName: string;
|
||||
displayName: string;
|
||||
description?: string;
|
||||
category?: string;
|
||||
isCurrentMax?: boolean;
|
||||
propertiesSchema?: any;
|
||||
operations?: any;
|
||||
credentialsRequired?: any;
|
||||
outputs?: any;
|
||||
minimumN8nVersion?: string;
|
||||
breakingChanges?: any[];
|
||||
deprecatedProperties?: string[];
|
||||
addedProperties?: string[];
|
||||
releasedAt?: Date;
|
||||
}): void {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT OR REPLACE INTO node_versions (
|
||||
node_type, version, package_name, display_name, description,
|
||||
category, is_current_max, properties_schema, operations,
|
||||
credentials_required, outputs, minimum_n8n_version,
|
||||
breaking_changes, deprecated_properties, added_properties,
|
||||
released_at
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`);
|
||||
|
||||
stmt.run(
|
||||
versionData.nodeType,
|
||||
versionData.version,
|
||||
versionData.packageName,
|
||||
versionData.displayName,
|
||||
versionData.description || null,
|
||||
versionData.category || null,
|
||||
versionData.isCurrentMax ? 1 : 0,
|
||||
versionData.propertiesSchema ? JSON.stringify(versionData.propertiesSchema) : null,
|
||||
versionData.operations ? JSON.stringify(versionData.operations) : null,
|
||||
versionData.credentialsRequired ? JSON.stringify(versionData.credentialsRequired) : null,
|
||||
versionData.outputs ? JSON.stringify(versionData.outputs) : null,
|
||||
versionData.minimumN8nVersion || null,
|
||||
versionData.breakingChanges ? JSON.stringify(versionData.breakingChanges) : null,
|
||||
versionData.deprecatedProperties ? JSON.stringify(versionData.deprecatedProperties) : null,
|
||||
versionData.addedProperties ? JSON.stringify(versionData.addedProperties) : null,
|
||||
versionData.releasedAt || null
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all available versions for a specific node type
|
||||
*/
|
||||
getNodeVersions(nodeType: string): any[] {
|
||||
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
|
||||
const rows = this.db.prepare(`
|
||||
SELECT * FROM node_versions
|
||||
WHERE node_type = ?
|
||||
ORDER BY version DESC
|
||||
`).all(normalizedType) as any[];
|
||||
|
||||
return rows.map(row => this.parseNodeVersionRow(row));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the latest (current max) version for a node type
|
||||
*/
|
||||
getLatestNodeVersion(nodeType: string): any | null {
|
||||
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM node_versions
|
||||
WHERE node_type = ? AND is_current_max = 1
|
||||
LIMIT 1
|
||||
`).get(normalizedType) as any;
|
||||
|
||||
if (!row) return null;
|
||||
return this.parseNodeVersionRow(row);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a specific version of a node
|
||||
*/
|
||||
getNodeVersion(nodeType: string, version: string): any | null {
|
||||
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM node_versions
|
||||
WHERE node_type = ? AND version = ?
|
||||
`).get(normalizedType, version) as any;
|
||||
|
||||
if (!row) return null;
|
||||
return this.parseNodeVersionRow(row);
|
||||
}
|
||||
|
||||
/**
|
||||
* Save a property change between versions
|
||||
*/
|
||||
savePropertyChange(changeData: {
|
||||
nodeType: string;
|
||||
fromVersion: string;
|
||||
toVersion: string;
|
||||
propertyName: string;
|
||||
changeType: 'added' | 'removed' | 'renamed' | 'type_changed' | 'requirement_changed' | 'default_changed';
|
||||
isBreaking?: boolean;
|
||||
oldValue?: string;
|
||||
newValue?: string;
|
||||
migrationHint?: string;
|
||||
autoMigratable?: boolean;
|
||||
migrationStrategy?: any;
|
||||
severity?: 'LOW' | 'MEDIUM' | 'HIGH';
|
||||
}): void {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT INTO version_property_changes (
|
||||
node_type, from_version, to_version, property_name, change_type,
|
||||
is_breaking, old_value, new_value, migration_hint, auto_migratable,
|
||||
migration_strategy, severity
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`);
|
||||
|
||||
stmt.run(
|
||||
changeData.nodeType,
|
||||
changeData.fromVersion,
|
||||
changeData.toVersion,
|
||||
changeData.propertyName,
|
||||
changeData.changeType,
|
||||
changeData.isBreaking ? 1 : 0,
|
||||
changeData.oldValue || null,
|
||||
changeData.newValue || null,
|
||||
changeData.migrationHint || null,
|
||||
changeData.autoMigratable ? 1 : 0,
|
||||
changeData.migrationStrategy ? JSON.stringify(changeData.migrationStrategy) : null,
|
||||
changeData.severity || 'MEDIUM'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get property changes between two versions
|
||||
*/
|
||||
getPropertyChanges(nodeType: string, fromVersion: string, toVersion: string): any[] {
|
||||
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
|
||||
const rows = this.db.prepare(`
|
||||
SELECT * FROM version_property_changes
|
||||
WHERE node_type = ? AND from_version = ? AND to_version = ?
|
||||
ORDER BY severity DESC, property_name
|
||||
`).all(normalizedType, fromVersion, toVersion) as any[];
|
||||
|
||||
return rows.map(row => this.parsePropertyChangeRow(row));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all breaking changes for upgrading from one version to another
|
||||
* Can handle multi-step upgrades (e.g., 1.0 -> 2.0 via 1.5)
|
||||
*/
|
||||
getBreakingChanges(nodeType: string, fromVersion: string, toVersion?: string): any[] {
|
||||
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
|
||||
let sql = `
|
||||
SELECT * FROM version_property_changes
|
||||
WHERE node_type = ? AND is_breaking = 1
|
||||
`;
|
||||
const params: any[] = [normalizedType];
|
||||
|
||||
if (toVersion) {
|
||||
// Get changes between specific versions
|
||||
sql += ` AND from_version >= ? AND to_version <= ?`;
|
||||
params.push(fromVersion, toVersion);
|
||||
} else {
|
||||
// Get all breaking changes from this version onwards
|
||||
sql += ` AND from_version >= ?`;
|
||||
params.push(fromVersion);
|
||||
}
|
||||
|
||||
sql += ` ORDER BY from_version, to_version, severity DESC`;
|
||||
|
||||
const rows = this.db.prepare(sql).all(...params) as any[];
|
||||
return rows.map(row => this.parsePropertyChangeRow(row));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get auto-migratable changes for a version upgrade
|
||||
*/
|
||||
getAutoMigratableChanges(nodeType: string, fromVersion: string, toVersion: string): any[] {
|
||||
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
|
||||
const rows = this.db.prepare(`
|
||||
SELECT * FROM version_property_changes
|
||||
WHERE node_type = ?
|
||||
AND from_version = ?
|
||||
AND to_version = ?
|
||||
AND auto_migratable = 1
|
||||
ORDER BY severity DESC
|
||||
`).all(normalizedType, fromVersion, toVersion) as any[];
|
||||
|
||||
return rows.map(row => this.parsePropertyChangeRow(row));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a version upgrade path exists between two versions
|
||||
*/
|
||||
hasVersionUpgradePath(nodeType: string, fromVersion: string, toVersion: string): boolean {
|
||||
const versions = this.getNodeVersions(nodeType);
|
||||
if (versions.length === 0) return false;
|
||||
|
||||
// Check if both versions exist
|
||||
const fromExists = versions.some(v => v.version === fromVersion);
|
||||
const toExists = versions.some(v => v.version === toVersion);
|
||||
|
||||
return fromExists && toExists;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get count of nodes with multiple versions
|
||||
*/
|
||||
getVersionedNodesCount(): number {
|
||||
const result = this.db.prepare(`
|
||||
SELECT COUNT(DISTINCT node_type) as count
|
||||
FROM node_versions
|
||||
`).get() as any;
|
||||
return result.count;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse node version row from database
|
||||
*/
|
||||
private parseNodeVersionRow(row: any): any {
|
||||
return {
|
||||
id: row.id,
|
||||
nodeType: row.node_type,
|
||||
version: row.version,
|
||||
packageName: row.package_name,
|
||||
displayName: row.display_name,
|
||||
description: row.description,
|
||||
category: row.category,
|
||||
isCurrentMax: Number(row.is_current_max) === 1,
|
||||
propertiesSchema: row.properties_schema ? this.safeJsonParse(row.properties_schema, []) : null,
|
||||
operations: row.operations ? this.safeJsonParse(row.operations, []) : null,
|
||||
credentialsRequired: row.credentials_required ? this.safeJsonParse(row.credentials_required, []) : null,
|
||||
outputs: row.outputs ? this.safeJsonParse(row.outputs, null) : null,
|
||||
minimumN8nVersion: row.minimum_n8n_version,
|
||||
breakingChanges: row.breaking_changes ? this.safeJsonParse(row.breaking_changes, []) : [],
|
||||
deprecatedProperties: row.deprecated_properties ? this.safeJsonParse(row.deprecated_properties, []) : [],
|
||||
addedProperties: row.added_properties ? this.safeJsonParse(row.added_properties, []) : [],
|
||||
releasedAt: row.released_at,
|
||||
createdAt: row.created_at
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse property change row from database
|
||||
*/
|
||||
private parsePropertyChangeRow(row: any): any {
|
||||
return {
|
||||
id: row.id,
|
||||
nodeType: row.node_type,
|
||||
fromVersion: row.from_version,
|
||||
toVersion: row.to_version,
|
||||
propertyName: row.property_name,
|
||||
changeType: row.change_type,
|
||||
isBreaking: Number(row.is_breaking) === 1,
|
||||
oldValue: row.old_value,
|
||||
newValue: row.new_value,
|
||||
migrationHint: row.migration_hint,
|
||||
autoMigratable: Number(row.auto_migratable) === 1,
|
||||
migrationStrategy: row.migration_strategy ? this.safeJsonParse(row.migration_strategy, null) : null,
|
||||
severity: row.severity,
|
||||
createdAt: row.created_at
|
||||
};
|
||||
}
|
||||
|
||||
// ========================================
|
||||
// Workflow Versioning Methods
|
||||
// ========================================
|
||||
|
||||
/**
|
||||
* Create a new workflow version (backup before modification)
|
||||
*/
|
||||
createWorkflowVersion(data: {
|
||||
workflowId: string;
|
||||
versionNumber: number;
|
||||
workflowName: string;
|
||||
workflowSnapshot: any;
|
||||
trigger: 'partial_update' | 'full_update' | 'autofix';
|
||||
operations?: any[];
|
||||
fixTypes?: string[];
|
||||
metadata?: any;
|
||||
}): number {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT INTO workflow_versions (
|
||||
workflow_id, version_number, workflow_name, workflow_snapshot,
|
||||
trigger, operations, fix_types, metadata
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`);
|
||||
|
||||
const result = stmt.run(
|
||||
data.workflowId,
|
||||
data.versionNumber,
|
||||
data.workflowName,
|
||||
JSON.stringify(data.workflowSnapshot),
|
||||
data.trigger,
|
||||
data.operations ? JSON.stringify(data.operations) : null,
|
||||
data.fixTypes ? JSON.stringify(data.fixTypes) : null,
|
||||
data.metadata ? JSON.stringify(data.metadata) : null
|
||||
);
|
||||
|
||||
return result.lastInsertRowid as number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get workflow versions ordered by version number (newest first)
|
||||
*/
|
||||
getWorkflowVersions(workflowId: string, limit?: number): any[] {
|
||||
let sql = `
|
||||
SELECT * FROM workflow_versions
|
||||
WHERE workflow_id = ?
|
||||
ORDER BY version_number DESC
|
||||
`;
|
||||
|
||||
if (limit) {
|
||||
sql += ` LIMIT ?`;
|
||||
const rows = this.db.prepare(sql).all(workflowId, limit) as any[];
|
||||
return rows.map(row => this.parseWorkflowVersionRow(row));
|
||||
}
|
||||
|
||||
const rows = this.db.prepare(sql).all(workflowId) as any[];
|
||||
return rows.map(row => this.parseWorkflowVersionRow(row));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a specific workflow version by ID
|
||||
*/
|
||||
getWorkflowVersion(versionId: number): any | null {
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM workflow_versions WHERE id = ?
|
||||
`).get(versionId) as any;
|
||||
|
||||
if (!row) return null;
|
||||
return this.parseWorkflowVersionRow(row);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the latest workflow version for a workflow
|
||||
*/
|
||||
getLatestWorkflowVersion(workflowId: string): any | null {
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM workflow_versions
|
||||
WHERE workflow_id = ?
|
||||
ORDER BY version_number DESC
|
||||
LIMIT 1
|
||||
`).get(workflowId) as any;
|
||||
|
||||
if (!row) return null;
|
||||
return this.parseWorkflowVersionRow(row);
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a specific workflow version
|
||||
*/
|
||||
deleteWorkflowVersion(versionId: number): void {
|
||||
this.db.prepare(`
|
||||
DELETE FROM workflow_versions WHERE id = ?
|
||||
`).run(versionId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete all versions for a specific workflow
|
||||
*/
|
||||
deleteWorkflowVersionsByWorkflowId(workflowId: string): number {
|
||||
const result = this.db.prepare(`
|
||||
DELETE FROM workflow_versions WHERE workflow_id = ?
|
||||
`).run(workflowId);
|
||||
|
||||
return result.changes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prune old workflow versions, keeping only the most recent N versions
|
||||
* Returns number of versions deleted
|
||||
*/
|
||||
pruneWorkflowVersions(workflowId: string, keepCount: number): number {
|
||||
// Get all versions ordered by version_number DESC
|
||||
const versions = this.db.prepare(`
|
||||
SELECT id FROM workflow_versions
|
||||
WHERE workflow_id = ?
|
||||
ORDER BY version_number DESC
|
||||
`).all(workflowId) as any[];
|
||||
|
||||
// If we have fewer versions than keepCount, no pruning needed
|
||||
if (versions.length <= keepCount) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Get IDs of versions to delete (all except the most recent keepCount)
|
||||
const idsToDelete = versions.slice(keepCount).map(v => v.id);
|
||||
|
||||
if (idsToDelete.length === 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Delete old versions
|
||||
const placeholders = idsToDelete.map(() => '?').join(',');
|
||||
const result = this.db.prepare(`
|
||||
DELETE FROM workflow_versions WHERE id IN (${placeholders})
|
||||
`).run(...idsToDelete);
|
||||
|
||||
return result.changes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Truncate the entire workflow_versions table
|
||||
* Returns number of rows deleted
|
||||
*/
|
||||
truncateWorkflowVersions(): number {
|
||||
const result = this.db.prepare(`
|
||||
DELETE FROM workflow_versions
|
||||
`).run();
|
||||
|
||||
return result.changes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get count of versions for a specific workflow
|
||||
*/
|
||||
getWorkflowVersionCount(workflowId: string): number {
|
||||
const result = this.db.prepare(`
|
||||
SELECT COUNT(*) as count FROM workflow_versions WHERE workflow_id = ?
|
||||
`).get(workflowId) as any;
|
||||
|
||||
return result.count;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get storage statistics for workflow versions
|
||||
*/
|
||||
getVersionStorageStats(): any {
|
||||
// Total versions
|
||||
const totalResult = this.db.prepare(`
|
||||
SELECT COUNT(*) as count FROM workflow_versions
|
||||
`).get() as any;
|
||||
|
||||
// Total size (approximate - sum of JSON lengths)
|
||||
const sizeResult = this.db.prepare(`
|
||||
SELECT SUM(LENGTH(workflow_snapshot)) as total_size FROM workflow_versions
|
||||
`).get() as any;
|
||||
|
||||
// Per-workflow breakdown
|
||||
const byWorkflow = this.db.prepare(`
|
||||
SELECT
|
||||
workflow_id,
|
||||
workflow_name,
|
||||
COUNT(*) as version_count,
|
||||
SUM(LENGTH(workflow_snapshot)) as total_size,
|
||||
MAX(created_at) as last_backup
|
||||
FROM workflow_versions
|
||||
GROUP BY workflow_id
|
||||
ORDER BY version_count DESC
|
||||
`).all() as any[];
|
||||
|
||||
return {
|
||||
totalVersions: totalResult.count,
|
||||
totalSize: sizeResult.total_size || 0,
|
||||
byWorkflow: byWorkflow.map(row => ({
|
||||
workflowId: row.workflow_id,
|
||||
workflowName: row.workflow_name,
|
||||
versionCount: row.version_count,
|
||||
totalSize: row.total_size,
|
||||
lastBackup: row.last_backup
|
||||
}))
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse workflow version row from database
|
||||
*/
|
||||
private parseWorkflowVersionRow(row: any): any {
|
||||
return {
|
||||
id: row.id,
|
||||
workflowId: row.workflow_id,
|
||||
versionNumber: row.version_number,
|
||||
workflowName: row.workflow_name,
|
||||
workflowSnapshot: this.safeJsonParse(row.workflow_snapshot, null),
|
||||
trigger: row.trigger,
|
||||
operations: row.operations ? this.safeJsonParse(row.operations, null) : null,
|
||||
fixTypes: row.fix_types ? this.safeJsonParse(row.fix_types, null) : null,
|
||||
metadata: row.metadata ? this.safeJsonParse(row.metadata, null) : null,
|
||||
createdAt: row.created_at
|
||||
};
|
||||
}
|
||||
}
|
||||
0
src/database/nodes.db
Normal file
0
src/database/nodes.db
Normal file
@@ -25,6 +25,40 @@ CREATE INDEX IF NOT EXISTS idx_package ON nodes(package_name);
|
||||
CREATE INDEX IF NOT EXISTS idx_ai_tool ON nodes(is_ai_tool);
|
||||
CREATE INDEX IF NOT EXISTS idx_category ON nodes(category);
|
||||
|
||||
-- FTS5 full-text search index for nodes
|
||||
CREATE VIRTUAL TABLE IF NOT EXISTS nodes_fts USING fts5(
|
||||
node_type,
|
||||
display_name,
|
||||
description,
|
||||
documentation,
|
||||
operations,
|
||||
content=nodes,
|
||||
content_rowid=rowid
|
||||
);
|
||||
|
||||
-- Triggers to keep FTS5 in sync with nodes table
|
||||
CREATE TRIGGER IF NOT EXISTS nodes_fts_insert AFTER INSERT ON nodes
|
||||
BEGIN
|
||||
INSERT INTO nodes_fts(rowid, node_type, display_name, description, documentation, operations)
|
||||
VALUES (new.rowid, new.node_type, new.display_name, new.description, new.documentation, new.operations);
|
||||
END;
|
||||
|
||||
CREATE TRIGGER IF NOT EXISTS nodes_fts_update AFTER UPDATE ON nodes
|
||||
BEGIN
|
||||
UPDATE nodes_fts
|
||||
SET node_type = new.node_type,
|
||||
display_name = new.display_name,
|
||||
description = new.description,
|
||||
documentation = new.documentation,
|
||||
operations = new.operations
|
||||
WHERE rowid = new.rowid;
|
||||
END;
|
||||
|
||||
CREATE TRIGGER IF NOT EXISTS nodes_fts_delete AFTER DELETE ON nodes
|
||||
BEGIN
|
||||
DELETE FROM nodes_fts WHERE rowid = old.rowid;
|
||||
END;
|
||||
|
||||
-- Templates table for n8n workflow templates
|
||||
CREATE TABLE IF NOT EXISTS templates (
|
||||
id INTEGER PRIMARY KEY,
|
||||
@@ -53,5 +87,150 @@ CREATE INDEX IF NOT EXISTS idx_template_updated ON templates(updated_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_template_name ON templates(name);
|
||||
CREATE INDEX IF NOT EXISTS idx_template_metadata ON templates(metadata_generated_at);
|
||||
|
||||
-- Note: FTS5 tables are created conditionally at runtime if FTS5 is supported
|
||||
-- See template-repository.ts initializeFTS5() method
|
||||
-- Pre-extracted node configurations from templates
|
||||
-- This table stores the top node configurations from popular templates
|
||||
-- Provides fast access to real-world configuration examples
|
||||
CREATE TABLE IF NOT EXISTS template_node_configs (
|
||||
id INTEGER PRIMARY KEY,
|
||||
node_type TEXT NOT NULL,
|
||||
template_id INTEGER NOT NULL,
|
||||
template_name TEXT NOT NULL,
|
||||
template_views INTEGER DEFAULT 0,
|
||||
|
||||
-- Node configuration (extracted from workflow)
|
||||
node_name TEXT, -- Node name in workflow (e.g., "HTTP Request")
|
||||
parameters_json TEXT NOT NULL, -- JSON: node.parameters
|
||||
credentials_json TEXT, -- JSON: node.credentials (if present)
|
||||
|
||||
-- Pre-calculated metadata for filtering
|
||||
has_credentials INTEGER DEFAULT 0,
|
||||
has_expressions INTEGER DEFAULT 0, -- Contains {{...}} or $json/$node
|
||||
complexity TEXT CHECK(complexity IN ('simple', 'medium', 'complex')),
|
||||
use_cases TEXT, -- JSON array from template.metadata.use_cases
|
||||
|
||||
-- Pre-calculated ranking (1 = best, 2 = second best, etc.)
|
||||
rank INTEGER DEFAULT 0,
|
||||
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (template_id) REFERENCES templates(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Indexes for fast queries
|
||||
CREATE INDEX IF NOT EXISTS idx_config_node_type_rank
|
||||
ON template_node_configs(node_type, rank);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_config_complexity
|
||||
ON template_node_configs(node_type, complexity, rank);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_config_auth
|
||||
ON template_node_configs(node_type, has_credentials, rank);
|
||||
|
||||
-- View for easy querying of top configs
|
||||
CREATE VIEW IF NOT EXISTS ranked_node_configs AS
|
||||
SELECT
|
||||
node_type,
|
||||
template_name,
|
||||
template_views,
|
||||
parameters_json,
|
||||
credentials_json,
|
||||
has_credentials,
|
||||
has_expressions,
|
||||
complexity,
|
||||
use_cases,
|
||||
rank
|
||||
FROM template_node_configs
|
||||
WHERE rank <= 5 -- Top 5 per node type
|
||||
ORDER BY node_type, rank;
|
||||
|
||||
-- Note: Template FTS5 tables are created conditionally at runtime if FTS5 is supported
|
||||
-- See template-repository.ts initializeFTS5() method
|
||||
-- Node FTS5 table (nodes_fts) is created above during schema initialization
|
||||
|
||||
-- Node versions table for tracking all available versions of each node
|
||||
-- Enables version upgrade detection and migration
|
||||
CREATE TABLE IF NOT EXISTS node_versions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
node_type TEXT NOT NULL, -- e.g., "n8n-nodes-base.executeWorkflow"
|
||||
version TEXT NOT NULL, -- e.g., "1.0", "1.1", "2.0"
|
||||
package_name TEXT NOT NULL, -- e.g., "n8n-nodes-base"
|
||||
display_name TEXT NOT NULL,
|
||||
description TEXT,
|
||||
category TEXT,
|
||||
is_current_max INTEGER DEFAULT 0, -- 1 if this is the latest version
|
||||
properties_schema TEXT, -- JSON schema for this specific version
|
||||
operations TEXT, -- JSON array of operations for this version
|
||||
credentials_required TEXT, -- JSON array of required credentials
|
||||
outputs TEXT, -- JSON array of output definitions
|
||||
minimum_n8n_version TEXT, -- Minimum n8n version required (e.g., "1.0.0")
|
||||
breaking_changes TEXT, -- JSON array of breaking changes from previous version
|
||||
deprecated_properties TEXT, -- JSON array of removed/deprecated properties
|
||||
added_properties TEXT, -- JSON array of newly added properties
|
||||
released_at DATETIME, -- When this version was released
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
UNIQUE(node_type, version),
|
||||
FOREIGN KEY (node_type) REFERENCES nodes(node_type) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Indexes for version queries
|
||||
CREATE INDEX IF NOT EXISTS idx_version_node_type ON node_versions(node_type);
|
||||
CREATE INDEX IF NOT EXISTS idx_version_current_max ON node_versions(is_current_max);
|
||||
CREATE INDEX IF NOT EXISTS idx_version_composite ON node_versions(node_type, version);
|
||||
|
||||
-- Version property changes for detailed migration tracking
|
||||
-- Records specific property-level changes between versions
|
||||
CREATE TABLE IF NOT EXISTS version_property_changes (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
node_type TEXT NOT NULL,
|
||||
from_version TEXT NOT NULL, -- Version where change occurred (e.g., "1.0")
|
||||
to_version TEXT NOT NULL, -- Target version (e.g., "1.1")
|
||||
property_name TEXT NOT NULL, -- Property path (e.g., "parameters.inputFieldMapping")
|
||||
change_type TEXT NOT NULL CHECK(change_type IN (
|
||||
'added', -- Property added (may be required)
|
||||
'removed', -- Property removed/deprecated
|
||||
'renamed', -- Property renamed
|
||||
'type_changed', -- Property type changed
|
||||
'requirement_changed', -- Required → Optional or vice versa
|
||||
'default_changed' -- Default value changed
|
||||
)),
|
||||
is_breaking INTEGER DEFAULT 0, -- 1 if this is a breaking change
|
||||
old_value TEXT, -- For renamed/type_changed: old property name or type
|
||||
new_value TEXT, -- For renamed/type_changed: new property name or type
|
||||
migration_hint TEXT, -- Human-readable migration guidance
|
||||
auto_migratable INTEGER DEFAULT 0, -- 1 if can be automatically migrated
|
||||
migration_strategy TEXT, -- JSON: strategy for auto-migration
|
||||
severity TEXT CHECK(severity IN ('LOW', 'MEDIUM', 'HIGH')), -- Impact severity
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (node_type, from_version) REFERENCES node_versions(node_type, version) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Indexes for property change queries
|
||||
CREATE INDEX IF NOT EXISTS idx_prop_changes_node ON version_property_changes(node_type);
|
||||
CREATE INDEX IF NOT EXISTS idx_prop_changes_versions ON version_property_changes(node_type, from_version, to_version);
|
||||
CREATE INDEX IF NOT EXISTS idx_prop_changes_breaking ON version_property_changes(is_breaking);
|
||||
CREATE INDEX IF NOT EXISTS idx_prop_changes_auto ON version_property_changes(auto_migratable);
|
||||
|
||||
-- Workflow versions table for rollback and version history tracking
|
||||
-- Stores full workflow snapshots before modifications for guaranteed reversibility
|
||||
-- Auto-prunes to 10 versions per workflow to prevent memory leaks
|
||||
CREATE TABLE IF NOT EXISTS workflow_versions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
workflow_id TEXT NOT NULL, -- n8n workflow ID
|
||||
version_number INTEGER NOT NULL, -- Incremental version number (1, 2, 3...)
|
||||
workflow_name TEXT NOT NULL, -- Workflow name at time of backup
|
||||
workflow_snapshot TEXT NOT NULL, -- Full workflow JSON before modification
|
||||
trigger TEXT NOT NULL CHECK(trigger IN (
|
||||
'partial_update', -- Created by n8n_update_partial_workflow
|
||||
'full_update', -- Created by n8n_update_full_workflow
|
||||
'autofix' -- Created by n8n_autofix_workflow
|
||||
)),
|
||||
operations TEXT, -- JSON array of diff operations (if partial update)
|
||||
fix_types TEXT, -- JSON array of fix types (if autofix)
|
||||
metadata TEXT, -- Additional context (JSON)
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
UNIQUE(workflow_id, version_number)
|
||||
);
|
||||
|
||||
-- Indexes for workflow version queries
|
||||
CREATE INDEX IF NOT EXISTS idx_workflow_versions_workflow_id ON workflow_versions(workflow_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_workflow_versions_created_at ON workflow_versions(created_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_workflow_versions_trigger ON workflow_versions(trigger);
|
||||
@@ -5,11 +5,13 @@
|
||||
* while maintaining simplicity for single-player use case
|
||||
*/
|
||||
import express from 'express';
|
||||
import rateLimit from 'express-rate-limit';
|
||||
import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js';
|
||||
import { SSEServerTransport } from '@modelcontextprotocol/sdk/server/sse.js';
|
||||
import { N8NDocumentationMCPServer } from './mcp/server';
|
||||
import { ConsoleManager } from './utils/console-manager';
|
||||
import { logger } from './utils/logger';
|
||||
import { AuthManager } from './utils/auth';
|
||||
import { readFileSync } from 'fs';
|
||||
import dotenv from 'dotenv';
|
||||
import { getStartupBaseUrl, formatEndpointUrls, detectBaseUrl } from './utils/url-detector';
|
||||
@@ -23,6 +25,7 @@ import {
|
||||
STANDARD_PROTOCOL_VERSION
|
||||
} from './utils/protocol-version';
|
||||
import { InstanceContext, validateInstanceContext } from './types/instance-context';
|
||||
import { SessionState } from './types/session-state';
|
||||
|
||||
dotenv.config();
|
||||
|
||||
@@ -69,6 +72,30 @@ function extractMultiTenantHeaders(req: express.Request): MultiTenantHeaders {
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Security logging helper for audit trails
|
||||
* Provides structured logging for security-relevant events
|
||||
*/
|
||||
function logSecurityEvent(
|
||||
event: 'session_export' | 'session_restore' | 'session_restore_failed' | 'max_sessions_reached',
|
||||
details: {
|
||||
sessionId?: string;
|
||||
reason?: string;
|
||||
count?: number;
|
||||
instanceId?: string;
|
||||
}
|
||||
): void {
|
||||
const timestamp = new Date().toISOString();
|
||||
const logEntry = {
|
||||
timestamp,
|
||||
event,
|
||||
...details
|
||||
};
|
||||
|
||||
// Log to standard logger with [SECURITY] prefix for easy filtering
|
||||
logger.info(`[SECURITY] ${event}`, logEntry);
|
||||
}
|
||||
|
||||
export class SingleSessionHTTPServer {
|
||||
// Map to store transports by session ID (following SDK pattern)
|
||||
private transports: { [sessionId: string]: StreamableHTTPServerTransport } = {};
|
||||
@@ -153,17 +180,22 @@ export class SingleSessionHTTPServer {
|
||||
*/
|
||||
private async removeSession(sessionId: string, reason: string): Promise<void> {
|
||||
try {
|
||||
// Close transport if exists
|
||||
if (this.transports[sessionId]) {
|
||||
await this.transports[sessionId].close();
|
||||
delete this.transports[sessionId];
|
||||
}
|
||||
|
||||
// Remove server, metadata, and context
|
||||
// Store reference to transport before deletion
|
||||
const transport = this.transports[sessionId];
|
||||
|
||||
// Delete transport FIRST to prevent onclose handler from triggering recursion
|
||||
// This breaks the circular reference: removeSession -> close -> onclose -> removeSession
|
||||
delete this.transports[sessionId];
|
||||
delete this.servers[sessionId];
|
||||
delete this.sessionMetadata[sessionId];
|
||||
delete this.sessionContexts[sessionId];
|
||||
|
||||
|
||||
// Close transport AFTER deletion
|
||||
// When onclose handler fires, it won't find the transport anymore
|
||||
if (transport) {
|
||||
await transport.close();
|
||||
}
|
||||
|
||||
logger.info('Session removed', { sessionId, reason });
|
||||
} catch (error) {
|
||||
logger.warn('Error removing session', { sessionId, reason, error });
|
||||
@@ -186,11 +218,22 @@ export class SingleSessionHTTPServer {
|
||||
|
||||
/**
|
||||
* Validate session ID format
|
||||
*
|
||||
* Accepts any non-empty string to support various MCP clients:
|
||||
* - UUIDv4 (internal n8n-mcp format)
|
||||
* - instance-{userId}-{hash}-{uuid} (multi-tenant format)
|
||||
* - Custom formats from mcp-remote and other proxies
|
||||
*
|
||||
* Security: Session validation happens via lookup in this.transports,
|
||||
* not format validation. This ensures compatibility with all MCP clients.
|
||||
*
|
||||
* @param sessionId - Session identifier from MCP client
|
||||
* @returns true if valid, false otherwise
|
||||
*/
|
||||
private isValidSessionId(sessionId: string): boolean {
|
||||
// UUID v4 format validation
|
||||
const uuidv4Regex = /^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i;
|
||||
return uuidv4Regex.test(sessionId);
|
||||
// Accept any non-empty string as session ID
|
||||
// This ensures compatibility with all MCP clients and proxies
|
||||
return Boolean(sessionId && sessionId.length > 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -669,7 +712,20 @@ export class SingleSessionHTTPServer {
|
||||
if (!this.session) return true;
|
||||
return Date.now() - this.session.lastAccess.getTime() > this.sessionTimeout;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Check if a specific session is expired based on sessionId
|
||||
* Used for multi-session expiration checks during export/restore
|
||||
*
|
||||
* @param sessionId - The session ID to check
|
||||
* @returns true if session is expired or doesn't exist
|
||||
*/
|
||||
private isSessionExpired(sessionId: string): boolean {
|
||||
const metadata = this.sessionMetadata[sessionId];
|
||||
if (!metadata) return true;
|
||||
return Date.now() - metadata.lastAccess.getTime() > this.sessionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the HTTP server
|
||||
*/
|
||||
@@ -988,8 +1044,41 @@ export class SingleSessionHTTPServer {
|
||||
});
|
||||
|
||||
|
||||
// Main MCP endpoint with authentication
|
||||
app.post('/mcp', jsonParser, async (req: express.Request, res: express.Response): Promise<void> => {
|
||||
// SECURITY: Rate limiting for authentication endpoint
|
||||
// Prevents brute force attacks and DoS
|
||||
// See: https://github.com/czlonkowski/n8n-mcp/issues/265 (HIGH-02)
|
||||
const authLimiter = rateLimit({
|
||||
windowMs: parseInt(process.env.AUTH_RATE_LIMIT_WINDOW || '900000'), // 15 minutes
|
||||
max: parseInt(process.env.AUTH_RATE_LIMIT_MAX || '20'), // 20 authentication attempts per IP
|
||||
message: {
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32000,
|
||||
message: 'Too many authentication attempts. Please try again later.'
|
||||
},
|
||||
id: null
|
||||
},
|
||||
standardHeaders: true, // Return rate limit info in `RateLimit-*` headers
|
||||
legacyHeaders: false, // Disable `X-RateLimit-*` headers
|
||||
handler: (req, res) => {
|
||||
logger.warn('Rate limit exceeded', {
|
||||
ip: req.ip,
|
||||
userAgent: req.get('user-agent'),
|
||||
event: 'rate_limit'
|
||||
});
|
||||
res.status(429).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32000,
|
||||
message: 'Too many authentication attempts'
|
||||
},
|
||||
id: null
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Main MCP endpoint with authentication and rate limiting
|
||||
app.post('/mcp', authLimiter, jsonParser, async (req: express.Request, res: express.Response): Promise<void> => {
|
||||
// Log comprehensive debug info about the request
|
||||
logger.info('POST /mcp request received - DETAILED DEBUG', {
|
||||
headers: req.headers,
|
||||
@@ -1080,15 +1169,19 @@ export class SingleSessionHTTPServer {
|
||||
|
||||
// Extract token and trim whitespace
|
||||
const token = authHeader.slice(7).trim();
|
||||
|
||||
// Check if token matches
|
||||
if (token !== this.authToken) {
|
||||
logger.warn('Authentication failed: Invalid token', {
|
||||
|
||||
// SECURITY: Use timing-safe comparison to prevent timing attacks
|
||||
// See: https://github.com/czlonkowski/n8n-mcp/issues/265 (CRITICAL-02)
|
||||
const isValidToken = this.authToken &&
|
||||
AuthManager.timingSafeCompare(token, this.authToken);
|
||||
|
||||
if (!isValidToken) {
|
||||
logger.warn('Authentication failed: Invalid token', {
|
||||
ip: req.ip,
|
||||
userAgent: req.get('user-agent'),
|
||||
reason: 'invalid_token'
|
||||
});
|
||||
res.status(401).json({
|
||||
res.status(401).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32001,
|
||||
@@ -1351,6 +1444,197 @@ export class SingleSessionHTTPServer {
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Export all active session state for persistence
|
||||
*
|
||||
* Used by multi-tenant backends to dump sessions before container restart.
|
||||
* This method exports the minimal state needed to restore sessions after
|
||||
* a restart: session metadata (timing) and instance context (credentials).
|
||||
*
|
||||
* Transport and server objects are NOT persisted - they will be recreated
|
||||
* on the first request after restore.
|
||||
*
|
||||
* SECURITY WARNING: The exported data contains plaintext n8n API keys.
|
||||
* The downstream application MUST encrypt this data before persisting to disk.
|
||||
*
|
||||
* @returns Array of session state objects, excluding expired sessions
|
||||
*
|
||||
* @example
|
||||
* // Before shutdown
|
||||
* const sessions = server.exportSessionState();
|
||||
* await saveToEncryptedStorage(sessions);
|
||||
*/
|
||||
public exportSessionState(): SessionState[] {
|
||||
const sessions: SessionState[] = [];
|
||||
const seenSessionIds = new Set<string>();
|
||||
|
||||
// Iterate over all sessions with metadata (source of truth for active sessions)
|
||||
for (const sessionId of Object.keys(this.sessionMetadata)) {
|
||||
// Check for duplicates (defensive programming)
|
||||
if (seenSessionIds.has(sessionId)) {
|
||||
logger.warn(`Duplicate sessionId detected during export: ${sessionId}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip expired sessions - they're not worth persisting
|
||||
if (this.isSessionExpired(sessionId)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const metadata = this.sessionMetadata[sessionId];
|
||||
const context = this.sessionContexts[sessionId];
|
||||
|
||||
// Skip sessions without context - these can't be restored meaningfully
|
||||
// (Context is required to reconnect to the correct n8n instance)
|
||||
if (!context || !context.n8nApiUrl || !context.n8nApiKey) {
|
||||
logger.debug(`Skipping session ${sessionId} - missing required context`);
|
||||
continue;
|
||||
}
|
||||
|
||||
seenSessionIds.add(sessionId);
|
||||
sessions.push({
|
||||
sessionId,
|
||||
metadata: {
|
||||
createdAt: metadata.createdAt.toISOString(),
|
||||
lastAccess: metadata.lastAccess.toISOString()
|
||||
},
|
||||
context: {
|
||||
n8nApiUrl: context.n8nApiUrl,
|
||||
n8nApiKey: context.n8nApiKey,
|
||||
instanceId: context.instanceId || sessionId, // Use sessionId as fallback
|
||||
sessionId: context.sessionId,
|
||||
metadata: context.metadata
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
logger.info(`Exported ${sessions.length} session(s) for persistence`);
|
||||
logSecurityEvent('session_export', { count: sessions.length });
|
||||
return sessions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Restore session state from previously exported data
|
||||
*
|
||||
* Used by multi-tenant backends to restore sessions after container restart.
|
||||
* This method restores only the session metadata and instance context.
|
||||
* Transport and server objects will be recreated on the first request.
|
||||
*
|
||||
* Restored sessions are "dormant" until a client makes a request, at which
|
||||
* point the transport and server will be initialized normally.
|
||||
*
|
||||
* @param sessions - Array of session state objects from exportSessionState()
|
||||
* @returns Number of sessions successfully restored
|
||||
*
|
||||
* @example
|
||||
* // After startup
|
||||
* const sessions = await loadFromEncryptedStorage();
|
||||
* const count = server.restoreSessionState(sessions);
|
||||
* console.log(`Restored ${count} sessions`);
|
||||
*/
|
||||
public restoreSessionState(sessions: SessionState[]): number {
|
||||
let restoredCount = 0;
|
||||
|
||||
for (const sessionState of sessions) {
|
||||
try {
|
||||
// Skip null or invalid session objects
|
||||
if (!sessionState || typeof sessionState !== 'object' || !sessionState.sessionId) {
|
||||
logger.warn('Skipping invalid session state object');
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if we've hit the MAX_SESSIONS limit (check real-time count)
|
||||
if (Object.keys(this.sessionMetadata).length >= MAX_SESSIONS) {
|
||||
logger.warn(
|
||||
`Reached MAX_SESSIONS limit (${MAX_SESSIONS}), skipping remaining sessions`
|
||||
);
|
||||
logSecurityEvent('max_sessions_reached', { count: MAX_SESSIONS });
|
||||
break;
|
||||
}
|
||||
|
||||
// Skip if session already exists (duplicate sessionId)
|
||||
if (this.sessionMetadata[sessionState.sessionId]) {
|
||||
logger.debug(`Skipping session ${sessionState.sessionId} - already exists`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Parse and validate dates first
|
||||
const createdAt = new Date(sessionState.metadata.createdAt);
|
||||
const lastAccess = new Date(sessionState.metadata.lastAccess);
|
||||
|
||||
if (isNaN(createdAt.getTime()) || isNaN(lastAccess.getTime())) {
|
||||
logger.warn(
|
||||
`Skipping session ${sessionState.sessionId} - invalid date format`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Validate session isn't expired
|
||||
const age = Date.now() - lastAccess.getTime();
|
||||
if (age > this.sessionTimeout) {
|
||||
logger.debug(
|
||||
`Skipping session ${sessionState.sessionId} - expired (age: ${Math.round(age / 1000)}s)`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Validate context exists (TypeScript null narrowing)
|
||||
if (!sessionState.context) {
|
||||
logger.warn(`Skipping session ${sessionState.sessionId} - missing context`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Validate context structure using existing validation
|
||||
const validation = validateInstanceContext(sessionState.context);
|
||||
if (!validation.valid) {
|
||||
const reason = validation.errors?.join(', ') || 'invalid context';
|
||||
logger.warn(
|
||||
`Skipping session ${sessionState.sessionId} - invalid context: ${reason}`
|
||||
);
|
||||
logSecurityEvent('session_restore_failed', {
|
||||
sessionId: sessionState.sessionId,
|
||||
reason
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
// Restore session metadata
|
||||
this.sessionMetadata[sessionState.sessionId] = {
|
||||
createdAt,
|
||||
lastAccess
|
||||
};
|
||||
|
||||
// Restore session context
|
||||
this.sessionContexts[sessionState.sessionId] = {
|
||||
n8nApiUrl: sessionState.context.n8nApiUrl,
|
||||
n8nApiKey: sessionState.context.n8nApiKey,
|
||||
instanceId: sessionState.context.instanceId,
|
||||
sessionId: sessionState.context.sessionId,
|
||||
metadata: sessionState.context.metadata
|
||||
};
|
||||
|
||||
logger.debug(`Restored session ${sessionState.sessionId}`);
|
||||
logSecurityEvent('session_restore', {
|
||||
sessionId: sessionState.sessionId,
|
||||
instanceId: sessionState.context.instanceId
|
||||
});
|
||||
restoredCount++;
|
||||
} catch (error) {
|
||||
logger.error(`Failed to restore session ${sessionState.sessionId}:`, error);
|
||||
logSecurityEvent('session_restore_failed', {
|
||||
sessionId: sessionState.sessionId,
|
||||
reason: error instanceof Error ? error.message : 'unknown error'
|
||||
});
|
||||
// Continue with next session - don't let one failure break the entire restore
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`Restored ${restoredCount}/${sessions.length} session(s) from persistence`
|
||||
);
|
||||
return restoredCount;
|
||||
}
|
||||
}
|
||||
|
||||
// Start if called directly
|
||||
|
||||
@@ -9,6 +9,7 @@ import { n8nDocumentationToolsFinal } from './mcp/tools';
|
||||
import { n8nManagementTools } from './mcp/tools-n8n-manager';
|
||||
import { N8NDocumentationMCPServer } from './mcp/server';
|
||||
import { logger } from './utils/logger';
|
||||
import { AuthManager } from './utils/auth';
|
||||
import { PROJECT_VERSION } from './utils/version';
|
||||
import { isN8nApiConfigured } from './config/n8n-api';
|
||||
import dotenv from 'dotenv';
|
||||
@@ -22,6 +23,17 @@ import {
|
||||
|
||||
dotenv.config();
|
||||
|
||||
/**
|
||||
* MCP tool response format with optional structured content
|
||||
*/
|
||||
interface MCPToolResponse {
|
||||
content: Array<{
|
||||
type: 'text';
|
||||
text: string;
|
||||
}>;
|
||||
structuredContent?: unknown;
|
||||
}
|
||||
|
||||
let expressServer: any;
|
||||
let authToken: string | null = null;
|
||||
|
||||
@@ -308,15 +320,19 @@ export async function startFixedHTTPServer() {
|
||||
|
||||
// Extract token and trim whitespace
|
||||
const token = authHeader.slice(7).trim();
|
||||
|
||||
// Check if token matches
|
||||
if (token !== authToken) {
|
||||
logger.warn('Authentication failed: Invalid token', {
|
||||
|
||||
// SECURITY: Use timing-safe comparison to prevent timing attacks
|
||||
// See: https://github.com/czlonkowski/n8n-mcp/issues/265 (CRITICAL-02)
|
||||
const isValidToken = authToken &&
|
||||
AuthManager.timingSafeCompare(token, authToken);
|
||||
|
||||
if (!isValidToken) {
|
||||
logger.warn('Authentication failed: Invalid token', {
|
||||
ip: req.ip,
|
||||
userAgent: req.get('user-agent'),
|
||||
reason: 'invalid_token'
|
||||
});
|
||||
res.status(401).json({
|
||||
res.status(401).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32001,
|
||||
@@ -396,19 +412,46 @@ export async function startFixedHTTPServer() {
|
||||
// Delegate to the MCP server
|
||||
const toolName = jsonRpcRequest.params?.name;
|
||||
const toolArgs = jsonRpcRequest.params?.arguments || {};
|
||||
|
||||
|
||||
try {
|
||||
const result = await mcpServer.executeTool(toolName, toolArgs);
|
||||
|
||||
// Convert result to JSON text for content field
|
||||
let responseText = JSON.stringify(result, null, 2);
|
||||
|
||||
// Build MCP-compliant response with structuredContent for validation tools
|
||||
const mcpResult: MCPToolResponse = {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: responseText
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
// Add structuredContent for validation tools (they have outputSchema)
|
||||
// Apply 1MB safety limit to prevent memory issues (matches STDIO server behavior)
|
||||
if (toolName.startsWith('validate_')) {
|
||||
const resultSize = responseText.length;
|
||||
|
||||
if (resultSize > 1000000) {
|
||||
// Response is too large - truncate and warn
|
||||
logger.warn(
|
||||
`Validation tool ${toolName} response is very large (${resultSize} chars). ` +
|
||||
`Truncating for HTTP transport safety.`
|
||||
);
|
||||
mcpResult.content[0].text = responseText.substring(0, 999000) +
|
||||
'\n\n[Response truncated due to size limits]';
|
||||
// Don't include structuredContent for truncated responses
|
||||
} else {
|
||||
// Normal case - include structured content for MCP protocol compliance
|
||||
mcpResult.structuredContent = result;
|
||||
}
|
||||
}
|
||||
|
||||
response = {
|
||||
jsonrpc: '2.0',
|
||||
result: {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(result, null, 2)
|
||||
}
|
||||
]
|
||||
},
|
||||
result: mcpResult,
|
||||
id: jsonRpcRequest.id
|
||||
};
|
||||
} catch (error) {
|
||||
|
||||
19
src/index.ts
19
src/index.ts
@@ -10,6 +10,25 @@ export { SingleSessionHTTPServer } from './http-server-single-session';
|
||||
export { ConsoleManager } from './utils/console-manager';
|
||||
export { N8NDocumentationMCPServer } from './mcp/server';
|
||||
|
||||
// Type exports for multi-tenant and library usage
|
||||
export type {
|
||||
InstanceContext
|
||||
} from './types/instance-context';
|
||||
export {
|
||||
validateInstanceContext,
|
||||
isInstanceContext
|
||||
} from './types/instance-context';
|
||||
export type {
|
||||
SessionState
|
||||
} from './types/session-state';
|
||||
|
||||
// Re-export MCP SDK types for convenience
|
||||
export type {
|
||||
Tool,
|
||||
CallToolResult,
|
||||
ListToolsResult
|
||||
} from '@modelcontextprotocol/sdk/types.js';
|
||||
|
||||
// Default export for convenience
|
||||
import N8NMCPEngine from './mcp-engine';
|
||||
export default N8NMCPEngine;
|
||||
|
||||
@@ -9,6 +9,7 @@ import { Request, Response } from 'express';
|
||||
import { SingleSessionHTTPServer } from './http-server-single-session';
|
||||
import { logger } from './utils/logger';
|
||||
import { InstanceContext } from './types/instance-context';
|
||||
import { SessionState } from './types/session-state';
|
||||
|
||||
export interface EngineHealth {
|
||||
status: 'healthy' | 'unhealthy';
|
||||
@@ -97,7 +98,7 @@ export class N8NMCPEngine {
|
||||
total: Math.round(memoryUsage.heapTotal / 1024 / 1024),
|
||||
unit: 'MB'
|
||||
},
|
||||
version: '2.3.2'
|
||||
version: '2.24.1'
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('Health check failed:', error);
|
||||
@@ -106,7 +107,7 @@ export class N8NMCPEngine {
|
||||
uptime: 0,
|
||||
sessionActive: false,
|
||||
memoryUsage: { used: 0, total: 0, unit: 'MB' },
|
||||
version: '2.3.2'
|
||||
version: '2.24.1'
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -118,10 +119,58 @@ export class N8NMCPEngine {
|
||||
getSessionInfo(): { active: boolean; sessionId?: string; age?: number } {
|
||||
return this.server.getSessionInfo();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Export all active session state for persistence
|
||||
*
|
||||
* Used by multi-tenant backends to dump sessions before container restart.
|
||||
* Returns an array of session state objects containing metadata and credentials.
|
||||
*
|
||||
* SECURITY WARNING: Exported data contains plaintext n8n API keys.
|
||||
* Encrypt before persisting to disk.
|
||||
*
|
||||
* @returns Array of session state objects
|
||||
*
|
||||
* @example
|
||||
* // Before shutdown
|
||||
* const sessions = engine.exportSessionState();
|
||||
* await saveToEncryptedStorage(sessions);
|
||||
*/
|
||||
exportSessionState(): SessionState[] {
|
||||
if (!this.server) {
|
||||
logger.warn('Cannot export sessions: server not initialized');
|
||||
return [];
|
||||
}
|
||||
return this.server.exportSessionState();
|
||||
}
|
||||
|
||||
/**
|
||||
* Restore session state from previously exported data
|
||||
*
|
||||
* Used by multi-tenant backends to restore sessions after container restart.
|
||||
* Restores session metadata and instance context. Transports/servers are
|
||||
* recreated on first request.
|
||||
*
|
||||
* @param sessions - Array of session state objects from exportSessionState()
|
||||
* @returns Number of sessions successfully restored
|
||||
*
|
||||
* @example
|
||||
* // After startup
|
||||
* const sessions = await loadFromEncryptedStorage();
|
||||
* const count = engine.restoreSessionState(sessions);
|
||||
* console.log(`Restored ${count} sessions`);
|
||||
*/
|
||||
restoreSessionState(sessions: SessionState[]): number {
|
||||
if (!this.server) {
|
||||
logger.warn('Cannot restore sessions: server not initialized');
|
||||
return 0;
|
||||
}
|
||||
return this.server.restoreSessionState(sessions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Graceful shutdown for service lifecycle
|
||||
*
|
||||
*
|
||||
* @example
|
||||
* process.on('SIGTERM', async () => {
|
||||
* await engine.shutdown();
|
||||
|
||||
@@ -62,8 +62,12 @@ export class MCPEngine {
|
||||
hiddenProperties: []
|
||||
};
|
||||
}
|
||||
|
||||
return ConfigValidator.validate(args.nodeType, args.config, node.properties || []);
|
||||
|
||||
// CRITICAL FIX: Extract user-provided keys before validation
|
||||
// This prevents false warnings about default values
|
||||
const userProvidedKeys = new Set(Object.keys(args.config || {}));
|
||||
|
||||
return ConfigValidator.validate(args.nodeType, args.config, node.properties || [], userProvidedKeys);
|
||||
}
|
||||
|
||||
async validateNodeMinimal(args: any) {
|
||||
@@ -89,10 +93,6 @@ export class MCPEngine {
|
||||
return this.repository.searchNodeProperties(args.nodeType, args.query, args.maxResults || 20);
|
||||
}
|
||||
|
||||
async getNodeForTask(args: any) {
|
||||
return TaskTemplates.getTaskTemplate(args.task);
|
||||
}
|
||||
|
||||
async listAITools(args: any) {
|
||||
return this.repository.getAIToolNodes();
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -11,6 +11,25 @@ import { getN8nApiClient } from './handlers-n8n-manager';
|
||||
import { N8nApiError, getUserFriendlyErrorMessage } from '../utils/n8n-errors';
|
||||
import { logger } from '../utils/logger';
|
||||
import { InstanceContext } from '../types/instance-context';
|
||||
import { validateWorkflowStructure } from '../services/n8n-validation';
|
||||
import { NodeRepository } from '../database/node-repository';
|
||||
import { WorkflowVersioningService } from '../services/workflow-versioning-service';
|
||||
import { WorkflowValidator } from '../services/workflow-validator';
|
||||
import { EnhancedConfigValidator } from '../services/enhanced-config-validator';
|
||||
|
||||
// Cached validator instance to avoid recreating on every mutation
|
||||
let cachedValidator: WorkflowValidator | null = null;
|
||||
|
||||
/**
|
||||
* Get or create cached workflow validator instance
|
||||
* Reuses the same validator to avoid redundant NodeSimilarityService initialization
|
||||
*/
|
||||
function getValidator(repository: NodeRepository): WorkflowValidator {
|
||||
if (!cachedValidator) {
|
||||
cachedValidator = new WorkflowValidator(repository, EnhancedConfigValidator);
|
||||
}
|
||||
return cachedValidator;
|
||||
}
|
||||
|
||||
// Zod schema for the diff request
|
||||
const workflowDiffSchema = z.object({
|
||||
@@ -27,33 +46,55 @@ const workflowDiffSchema = z.object({
|
||||
// Connection operations
|
||||
source: z.string().optional(),
|
||||
target: z.string().optional(),
|
||||
from: z.string().optional(), // For rewireConnection
|
||||
to: z.string().optional(), // For rewireConnection
|
||||
sourceOutput: z.string().optional(),
|
||||
targetInput: z.string().optional(),
|
||||
sourceIndex: z.number().optional(),
|
||||
targetIndex: z.number().optional(),
|
||||
// Smart parameters (Phase 1 UX improvement)
|
||||
branch: z.enum(['true', 'false']).optional(),
|
||||
case: z.number().optional(),
|
||||
ignoreErrors: z.boolean().optional(),
|
||||
// Connection cleanup operations
|
||||
dryRun: z.boolean().optional(),
|
||||
connections: z.any().optional(),
|
||||
// Metadata operations
|
||||
settings: z.any().optional(),
|
||||
name: z.string().optional(),
|
||||
tag: z.string().optional(),
|
||||
})),
|
||||
validateOnly: z.boolean().optional(),
|
||||
continueOnError: z.boolean().optional(),
|
||||
createBackup: z.boolean().optional(),
|
||||
intent: z.string().optional(),
|
||||
});
|
||||
|
||||
export async function handleUpdatePartialWorkflow(args: unknown, context?: InstanceContext): Promise<McpToolResponse> {
|
||||
export async function handleUpdatePartialWorkflow(
|
||||
args: unknown,
|
||||
repository: NodeRepository,
|
||||
context?: InstanceContext
|
||||
): Promise<McpToolResponse> {
|
||||
const startTime = Date.now();
|
||||
const sessionId = `mutation_${Date.now()}_${Math.random().toString(36).slice(2, 11)}`;
|
||||
let workflowBefore: any = null;
|
||||
let validationBefore: any = null;
|
||||
let validationAfter: any = null;
|
||||
|
||||
try {
|
||||
// Debug logging (only in debug mode)
|
||||
if (process.env.DEBUG_MCP === 'true') {
|
||||
logger.debug('Workflow diff request received', {
|
||||
argsType: typeof args,
|
||||
hasWorkflowId: args && typeof args === 'object' && 'workflowId' in args,
|
||||
operationCount: args && typeof args === 'object' && 'operations' in args ?
|
||||
operationCount: args && typeof args === 'object' && 'operations' in args ?
|
||||
(args as any).operations?.length : 0
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
// Validate input
|
||||
const input = workflowDiffSchema.parse(args);
|
||||
|
||||
|
||||
// Get API client
|
||||
const client = getN8nApiClient(context);
|
||||
if (!client) {
|
||||
@@ -62,11 +103,31 @@ export async function handleUpdatePartialWorkflow(args: unknown, context?: Insta
|
||||
error: 'n8n API not configured. Please set N8N_API_URL and N8N_API_KEY environment variables.'
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
// Fetch current workflow
|
||||
let workflow;
|
||||
try {
|
||||
workflow = await client.getWorkflow(input.id);
|
||||
// Store original workflow for telemetry
|
||||
workflowBefore = JSON.parse(JSON.stringify(workflow));
|
||||
|
||||
// Validate workflow BEFORE mutation (for telemetry)
|
||||
try {
|
||||
const validator = getValidator(repository);
|
||||
validationBefore = await validator.validateWorkflow(workflowBefore, {
|
||||
validateNodes: true,
|
||||
validateConnections: true,
|
||||
validateExpressions: true,
|
||||
profile: 'runtime'
|
||||
});
|
||||
} catch (validationError) {
|
||||
logger.debug('Pre-mutation validation failed (non-blocking):', validationError);
|
||||
// Don't block mutation on validation errors
|
||||
validationBefore = {
|
||||
valid: false,
|
||||
errors: [{ type: 'validation_error', message: 'Validation failed' }]
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
if (error instanceof N8nApiError) {
|
||||
return {
|
||||
@@ -77,20 +138,56 @@ export async function handleUpdatePartialWorkflow(args: unknown, context?: Insta
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
|
||||
|
||||
// Create backup before modifying workflow (default: true)
|
||||
if (input.createBackup !== false && !input.validateOnly) {
|
||||
try {
|
||||
const versioningService = new WorkflowVersioningService(repository, client);
|
||||
const backupResult = await versioningService.createBackup(input.id, workflow, {
|
||||
trigger: 'partial_update',
|
||||
operations: input.operations
|
||||
});
|
||||
|
||||
logger.info('Workflow backup created', {
|
||||
workflowId: input.id,
|
||||
versionId: backupResult.versionId,
|
||||
versionNumber: backupResult.versionNumber,
|
||||
pruned: backupResult.pruned
|
||||
});
|
||||
} catch (error: any) {
|
||||
logger.warn('Failed to create workflow backup', {
|
||||
workflowId: input.id,
|
||||
error: error.message
|
||||
});
|
||||
// Continue with update even if backup fails (non-blocking)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply diff operations
|
||||
const diffEngine = new WorkflowDiffEngine();
|
||||
const diffResult = await diffEngine.applyDiff(workflow, input as WorkflowDiffRequest);
|
||||
|
||||
const diffRequest = input as WorkflowDiffRequest;
|
||||
const diffResult = await diffEngine.applyDiff(workflow, diffRequest);
|
||||
|
||||
// Check if this is a complete failure or partial success in continueOnError mode
|
||||
if (!diffResult.success) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Failed to apply diff operations',
|
||||
details: {
|
||||
errors: diffResult.errors,
|
||||
operationsApplied: diffResult.operationsApplied
|
||||
}
|
||||
};
|
||||
// In continueOnError mode, partial success is still valuable
|
||||
if (diffRequest.continueOnError && diffResult.workflow && diffResult.operationsApplied && diffResult.operationsApplied > 0) {
|
||||
logger.info(`continueOnError mode: Applying ${diffResult.operationsApplied} successful operations despite ${diffResult.failed?.length || 0} failures`);
|
||||
// Continue to update workflow with partial changes
|
||||
} else {
|
||||
// Complete failure - return error
|
||||
return {
|
||||
success: false,
|
||||
error: 'Failed to apply diff operations',
|
||||
details: {
|
||||
errors: diffResult.errors,
|
||||
warnings: diffResult.warnings,
|
||||
operationsApplied: diffResult.operationsApplied,
|
||||
applied: diffResult.applied,
|
||||
failed: diffResult.failed
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// If validateOnly, return validation result
|
||||
@@ -101,25 +198,204 @@ export async function handleUpdatePartialWorkflow(args: unknown, context?: Insta
|
||||
data: {
|
||||
valid: true,
|
||||
operationsToApply: input.operations.length
|
||||
},
|
||||
details: {
|
||||
warnings: diffResult.warnings
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
// Validate final workflow structure after applying all operations
|
||||
// This prevents creating workflows that pass operation-level validation
|
||||
// but fail workflow-level validation (e.g., UI can't render them)
|
||||
//
|
||||
// Validation can be skipped for specific integration tests that need to test
|
||||
// n8n API behavior with edge case workflows by setting SKIP_WORKFLOW_VALIDATION=true
|
||||
if (diffResult.workflow) {
|
||||
const structureErrors = validateWorkflowStructure(diffResult.workflow);
|
||||
if (structureErrors.length > 0) {
|
||||
const skipValidation = process.env.SKIP_WORKFLOW_VALIDATION === 'true';
|
||||
|
||||
logger.warn('Workflow structure validation failed after applying diff operations', {
|
||||
workflowId: input.id,
|
||||
errors: structureErrors,
|
||||
blocking: !skipValidation
|
||||
});
|
||||
|
||||
// Analyze error types to provide targeted recovery guidance
|
||||
const errorTypes = new Set<string>();
|
||||
structureErrors.forEach(err => {
|
||||
if (err.includes('operator') || err.includes('singleValue')) errorTypes.add('operator_issues');
|
||||
if (err.includes('connection') || err.includes('referenced')) errorTypes.add('connection_issues');
|
||||
if (err.includes('Missing') || err.includes('missing')) errorTypes.add('missing_metadata');
|
||||
if (err.includes('branch') || err.includes('output')) errorTypes.add('branch_mismatch');
|
||||
});
|
||||
|
||||
// Build recovery guidance based on error types
|
||||
const recoverySteps = [];
|
||||
if (errorTypes.has('operator_issues')) {
|
||||
recoverySteps.push('Operator structure issue detected. Use validate_node_operation to check specific nodes.');
|
||||
recoverySteps.push('Binary operators (equals, contains, greaterThan, etc.) must NOT have singleValue:true');
|
||||
recoverySteps.push('Unary operators (isEmpty, isNotEmpty, true, false) REQUIRE singleValue:true');
|
||||
}
|
||||
if (errorTypes.has('connection_issues')) {
|
||||
recoverySteps.push('Connection validation failed. Check all node connections reference existing nodes.');
|
||||
recoverySteps.push('Use cleanStaleConnections operation to remove connections to non-existent nodes.');
|
||||
}
|
||||
if (errorTypes.has('missing_metadata')) {
|
||||
recoverySteps.push('Missing metadata detected. Ensure filter-based nodes (IF v2.2+, Switch v3.2+) have complete conditions.options.');
|
||||
recoverySteps.push('Required options: {version: 2, leftValue: "", caseSensitive: true, typeValidation: "strict"}');
|
||||
}
|
||||
if (errorTypes.has('branch_mismatch')) {
|
||||
recoverySteps.push('Branch count mismatch. Ensure Switch nodes have outputs for all rules (e.g., 3 rules = 3 output branches).');
|
||||
}
|
||||
|
||||
// Add generic recovery steps if no specific guidance
|
||||
if (recoverySteps.length === 0) {
|
||||
recoverySteps.push('Review the validation errors listed above');
|
||||
recoverySteps.push('Fix issues using updateNode or cleanStaleConnections operations');
|
||||
recoverySteps.push('Run validate_workflow again to verify fixes');
|
||||
}
|
||||
|
||||
const errorMessage = structureErrors.length === 1
|
||||
? `Workflow validation failed: ${structureErrors[0]}`
|
||||
: `Workflow validation failed with ${structureErrors.length} structural issues`;
|
||||
|
||||
// If validation is not skipped, return error and block the save
|
||||
if (!skipValidation) {
|
||||
return {
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
details: {
|
||||
errors: structureErrors,
|
||||
errorCount: structureErrors.length,
|
||||
operationsApplied: diffResult.operationsApplied,
|
||||
applied: diffResult.applied,
|
||||
recoveryGuidance: recoverySteps,
|
||||
note: 'Operations were applied but created an invalid workflow structure. The workflow was NOT saved to n8n to prevent UI rendering errors.',
|
||||
autoSanitizationNote: 'Auto-sanitization runs on all nodes during updates to fix operator structures and add missing metadata. However, it cannot fix all issues (e.g., broken connections, branch mismatches). Use the recovery guidance above to resolve remaining issues.'
|
||||
}
|
||||
};
|
||||
}
|
||||
// Validation skipped: log warning but continue (for specific integration tests)
|
||||
logger.info('Workflow validation skipped (SKIP_WORKFLOW_VALIDATION=true): Allowing workflow with validation warnings to proceed', {
|
||||
workflowId: input.id,
|
||||
warningCount: structureErrors.length
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Update workflow via API
|
||||
try {
|
||||
const updatedWorkflow = await client.updateWorkflow(input.id, diffResult.workflow!);
|
||||
|
||||
|
||||
// Handle activation/deactivation if requested
|
||||
let finalWorkflow = updatedWorkflow;
|
||||
let activationMessage = '';
|
||||
|
||||
// Validate workflow AFTER mutation (for telemetry)
|
||||
try {
|
||||
const validator = getValidator(repository);
|
||||
validationAfter = await validator.validateWorkflow(finalWorkflow, {
|
||||
validateNodes: true,
|
||||
validateConnections: true,
|
||||
validateExpressions: true,
|
||||
profile: 'runtime'
|
||||
});
|
||||
} catch (validationError) {
|
||||
logger.debug('Post-mutation validation failed (non-blocking):', validationError);
|
||||
// Don't block on validation errors
|
||||
validationAfter = {
|
||||
valid: false,
|
||||
errors: [{ type: 'validation_error', message: 'Validation failed' }]
|
||||
};
|
||||
}
|
||||
|
||||
if (diffResult.shouldActivate) {
|
||||
try {
|
||||
finalWorkflow = await client.activateWorkflow(input.id);
|
||||
activationMessage = ' Workflow activated.';
|
||||
} catch (activationError) {
|
||||
logger.error('Failed to activate workflow after update', activationError);
|
||||
return {
|
||||
success: false,
|
||||
error: 'Workflow updated successfully but activation failed',
|
||||
details: {
|
||||
workflowUpdated: true,
|
||||
activationError: activationError instanceof Error ? activationError.message : 'Unknown error'
|
||||
}
|
||||
};
|
||||
}
|
||||
} else if (diffResult.shouldDeactivate) {
|
||||
try {
|
||||
finalWorkflow = await client.deactivateWorkflow(input.id);
|
||||
activationMessage = ' Workflow deactivated.';
|
||||
} catch (deactivationError) {
|
||||
logger.error('Failed to deactivate workflow after update', deactivationError);
|
||||
return {
|
||||
success: false,
|
||||
error: 'Workflow updated successfully but deactivation failed',
|
||||
details: {
|
||||
workflowUpdated: true,
|
||||
deactivationError: deactivationError instanceof Error ? deactivationError.message : 'Unknown error'
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Track successful mutation
|
||||
if (workflowBefore && !input.validateOnly) {
|
||||
trackWorkflowMutation({
|
||||
sessionId,
|
||||
toolName: 'n8n_update_partial_workflow',
|
||||
userIntent: input.intent || 'Partial workflow update',
|
||||
operations: input.operations,
|
||||
workflowBefore,
|
||||
workflowAfter: finalWorkflow,
|
||||
validationBefore,
|
||||
validationAfter,
|
||||
mutationSuccess: true,
|
||||
durationMs: Date.now() - startTime,
|
||||
}).catch(err => {
|
||||
logger.debug('Failed to track mutation telemetry:', err);
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: updatedWorkflow,
|
||||
message: `Workflow "${updatedWorkflow.name}" updated successfully. Applied ${diffResult.operationsApplied} operations.`,
|
||||
data: finalWorkflow,
|
||||
message: `Workflow "${finalWorkflow.name}" updated successfully. Applied ${diffResult.operationsApplied} operations.${activationMessage}`,
|
||||
details: {
|
||||
operationsApplied: diffResult.operationsApplied,
|
||||
workflowId: updatedWorkflow.id,
|
||||
workflowName: updatedWorkflow.name
|
||||
workflowId: finalWorkflow.id,
|
||||
workflowName: finalWorkflow.name,
|
||||
active: finalWorkflow.active,
|
||||
applied: diffResult.applied,
|
||||
failed: diffResult.failed,
|
||||
errors: diffResult.errors,
|
||||
warnings: diffResult.warnings
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
// Track failed mutation
|
||||
if (workflowBefore && !input.validateOnly) {
|
||||
trackWorkflowMutation({
|
||||
sessionId,
|
||||
toolName: 'n8n_update_partial_workflow',
|
||||
userIntent: input.intent || 'Partial workflow update',
|
||||
operations: input.operations,
|
||||
workflowBefore,
|
||||
workflowAfter: workflowBefore, // No change since it failed
|
||||
validationBefore,
|
||||
validationAfter: validationBefore, // Same as before since mutation failed
|
||||
mutationSuccess: false,
|
||||
mutationError: error instanceof Error ? error.message : 'Unknown error',
|
||||
durationMs: Date.now() - startTime,
|
||||
}).catch(err => {
|
||||
logger.warn('Failed to track mutation telemetry for failed operation:', err);
|
||||
});
|
||||
}
|
||||
|
||||
if (error instanceof N8nApiError) {
|
||||
return {
|
||||
success: false,
|
||||
@@ -138,7 +414,7 @@ export async function handleUpdatePartialWorkflow(args: unknown, context?: Insta
|
||||
details: { errors: error.errors }
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
logger.error('Failed to update partial workflow', error);
|
||||
return {
|
||||
success: false,
|
||||
@@ -147,3 +423,90 @@ export async function handleUpdatePartialWorkflow(args: unknown, context?: Insta
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Infer intent from operations when not explicitly provided
|
||||
*/
|
||||
function inferIntentFromOperations(operations: any[]): string {
|
||||
if (!operations || operations.length === 0) {
|
||||
return 'Partial workflow update';
|
||||
}
|
||||
|
||||
const opTypes = operations.map((op) => op.type);
|
||||
const opCount = operations.length;
|
||||
|
||||
// Single operation - be specific
|
||||
if (opCount === 1) {
|
||||
const op = operations[0];
|
||||
switch (op.type) {
|
||||
case 'addNode':
|
||||
return `Add ${op.node?.type || 'node'}`;
|
||||
case 'removeNode':
|
||||
return `Remove node ${op.nodeName || op.nodeId || ''}`.trim();
|
||||
case 'updateNode':
|
||||
return `Update node ${op.nodeName || op.nodeId || ''}`.trim();
|
||||
case 'addConnection':
|
||||
return `Connect ${op.source || 'node'} to ${op.target || 'node'}`;
|
||||
case 'removeConnection':
|
||||
return `Disconnect ${op.source || 'node'} from ${op.target || 'node'}`;
|
||||
case 'rewireConnection':
|
||||
return `Rewire ${op.source || 'node'} from ${op.from || ''} to ${op.to || ''}`.trim();
|
||||
case 'updateName':
|
||||
return `Rename workflow to "${op.name || ''}"`;
|
||||
case 'activateWorkflow':
|
||||
return 'Activate workflow';
|
||||
case 'deactivateWorkflow':
|
||||
return 'Deactivate workflow';
|
||||
default:
|
||||
return `Workflow ${op.type}`;
|
||||
}
|
||||
}
|
||||
|
||||
// Multiple operations - summarize pattern
|
||||
const typeSet = new Set(opTypes);
|
||||
const summary: string[] = [];
|
||||
|
||||
if (typeSet.has('addNode')) {
|
||||
const count = opTypes.filter((t) => t === 'addNode').length;
|
||||
summary.push(`add ${count} node${count > 1 ? 's' : ''}`);
|
||||
}
|
||||
if (typeSet.has('removeNode')) {
|
||||
const count = opTypes.filter((t) => t === 'removeNode').length;
|
||||
summary.push(`remove ${count} node${count > 1 ? 's' : ''}`);
|
||||
}
|
||||
if (typeSet.has('updateNode')) {
|
||||
const count = opTypes.filter((t) => t === 'updateNode').length;
|
||||
summary.push(`update ${count} node${count > 1 ? 's' : ''}`);
|
||||
}
|
||||
if (typeSet.has('addConnection') || typeSet.has('rewireConnection')) {
|
||||
summary.push('modify connections');
|
||||
}
|
||||
if (typeSet.has('updateName') || typeSet.has('updateSettings')) {
|
||||
summary.push('update metadata');
|
||||
}
|
||||
|
||||
return summary.length > 0
|
||||
? `Workflow update: ${summary.join(', ')}`
|
||||
: `Workflow update: ${opCount} operations`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Track workflow mutation for telemetry
|
||||
*/
|
||||
async function trackWorkflowMutation(data: any): Promise<void> {
|
||||
try {
|
||||
// Enhance intent if it's missing or generic
|
||||
if (
|
||||
!data.userIntent ||
|
||||
data.userIntent === 'Partial workflow update' ||
|
||||
data.userIntent.length < 10
|
||||
) {
|
||||
data.userIntent = inferIntentFromOperations(data.operations);
|
||||
}
|
||||
|
||||
const { telemetry } = await import('../telemetry/telemetry-manager.js');
|
||||
await telemetry.trackWorkflowMutation(data);
|
||||
} catch (error) {
|
||||
logger.debug('Telemetry tracking failed:', error);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
152
src/mcp/index.ts
152
src/mcp/index.ts
@@ -3,6 +3,9 @@
|
||||
import { N8NDocumentationMCPServer } from './server';
|
||||
import { logger } from '../utils/logger';
|
||||
import { TelemetryConfigManager } from '../telemetry/config-manager';
|
||||
import { EarlyErrorLogger } from '../telemetry/early-error-logger';
|
||||
import { STARTUP_CHECKPOINTS, findFailedCheckpoint, StartupCheckpoint } from '../telemetry/startup-checkpoints';
|
||||
import { existsSync } from 'fs';
|
||||
|
||||
// Add error details to stderr for Claude Desktop debugging
|
||||
process.on('uncaughtException', (error) => {
|
||||
@@ -21,9 +24,50 @@ process.on('unhandledRejection', (reason, promise) => {
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
/**
|
||||
* Detects if running in a container environment (Docker, Podman, Kubernetes, etc.)
|
||||
* Uses multiple detection methods for robustness:
|
||||
* 1. Environment variables (IS_DOCKER, IS_CONTAINER with multiple formats)
|
||||
* 2. Filesystem markers (/.dockerenv, /run/.containerenv)
|
||||
*/
|
||||
function isContainerEnvironment(): boolean {
|
||||
// Check environment variables with multiple truthy formats
|
||||
const dockerEnv = (process.env.IS_DOCKER || '').toLowerCase();
|
||||
const containerEnv = (process.env.IS_CONTAINER || '').toLowerCase();
|
||||
|
||||
if (['true', '1', 'yes'].includes(dockerEnv)) {
|
||||
return true;
|
||||
}
|
||||
if (['true', '1', 'yes'].includes(containerEnv)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Fallback: Check filesystem markers
|
||||
// /.dockerenv exists in Docker containers
|
||||
// /run/.containerenv exists in Podman containers
|
||||
try {
|
||||
return existsSync('/.dockerenv') || existsSync('/run/.containerenv');
|
||||
} catch (error) {
|
||||
// If filesystem check fails, assume not in container
|
||||
logger.debug('Container detection filesystem check failed:', error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
// Handle telemetry CLI commands
|
||||
const args = process.argv.slice(2);
|
||||
// Initialize early error logger for pre-handshake error capture (v2.18.3)
|
||||
// Now using singleton pattern with defensive initialization
|
||||
const startTime = Date.now();
|
||||
const earlyLogger = EarlyErrorLogger.getInstance();
|
||||
const checkpoints: StartupCheckpoint[] = [];
|
||||
|
||||
try {
|
||||
// Checkpoint: Process started (fire-and-forget, no await)
|
||||
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.PROCESS_STARTED);
|
||||
checkpoints.push(STARTUP_CHECKPOINTS.PROCESS_STARTED);
|
||||
|
||||
// Handle telemetry CLI commands
|
||||
const args = process.argv.slice(2);
|
||||
if (args.length > 0 && args[0] === 'telemetry') {
|
||||
const telemetryConfig = TelemetryConfigManager.getInstance();
|
||||
const action = args[1];
|
||||
@@ -58,6 +102,15 @@ Learn more: https://github.com/czlonkowski/n8n-mcp/blob/main/PRIVACY.md
|
||||
|
||||
const mode = process.env.MCP_MODE || 'stdio';
|
||||
|
||||
// Checkpoint: Telemetry initializing (fire-and-forget, no await)
|
||||
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.TELEMETRY_INITIALIZING);
|
||||
checkpoints.push(STARTUP_CHECKPOINTS.TELEMETRY_INITIALIZING);
|
||||
|
||||
// Telemetry is already initialized by TelemetryConfigManager in imports
|
||||
// Mark as ready (fire-and-forget, no await)
|
||||
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.TELEMETRY_READY);
|
||||
checkpoints.push(STARTUP_CHECKPOINTS.TELEMETRY_READY);
|
||||
|
||||
try {
|
||||
// Only show debug messages in HTTP mode to avoid corrupting stdio communication
|
||||
if (mode === 'http') {
|
||||
@@ -65,6 +118,10 @@ Learn more: https://github.com/czlonkowski/n8n-mcp/blob/main/PRIVACY.md
|
||||
console.error('Current directory:', process.cwd());
|
||||
console.error('Node version:', process.version);
|
||||
}
|
||||
|
||||
// Checkpoint: MCP handshake starting (fire-and-forget, no await)
|
||||
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.MCP_HANDSHAKE_STARTING);
|
||||
checkpoints.push(STARTUP_CHECKPOINTS.MCP_HANDSHAKE_STARTING);
|
||||
|
||||
if (mode === 'http') {
|
||||
// Check if we should use the fixed implementation
|
||||
@@ -90,15 +147,95 @@ Learn more: https://github.com/czlonkowski/n8n-mcp/blob/main/PRIVACY.md
|
||||
}
|
||||
} else {
|
||||
// Stdio mode - for local Claude Desktop
|
||||
const server = new N8NDocumentationMCPServer();
|
||||
const server = new N8NDocumentationMCPServer(undefined, earlyLogger);
|
||||
|
||||
// Graceful shutdown handler (fixes Issue #277)
|
||||
let isShuttingDown = false;
|
||||
const shutdown = async (signal: string = 'UNKNOWN') => {
|
||||
if (isShuttingDown) return; // Prevent multiple shutdown calls
|
||||
isShuttingDown = true;
|
||||
|
||||
try {
|
||||
logger.info(`Shutdown initiated by: ${signal}`);
|
||||
|
||||
await server.shutdown();
|
||||
|
||||
// Close stdin to signal we're done reading
|
||||
if (process.stdin && !process.stdin.destroyed) {
|
||||
process.stdin.pause();
|
||||
process.stdin.destroy();
|
||||
}
|
||||
|
||||
// Exit with timeout to ensure we don't hang
|
||||
// Increased to 1000ms for slower systems
|
||||
setTimeout(() => {
|
||||
logger.warn('Shutdown timeout exceeded, forcing exit');
|
||||
process.exit(0);
|
||||
}, 1000).unref();
|
||||
|
||||
// Let the timeout handle the exit for graceful shutdown
|
||||
// (removed immediate exit to allow cleanup to complete)
|
||||
} catch (error) {
|
||||
logger.error('Error during shutdown:', error);
|
||||
process.exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
// Handle termination signals (fixes Issue #277)
|
||||
// Signal handling strategy:
|
||||
// - Claude Desktop (Windows/macOS/Linux): stdin handlers + signal handlers
|
||||
// Primary: stdin close when Claude quits | Fallback: SIGTERM/SIGINT/SIGHUP
|
||||
// - Container environments: signal handlers ONLY
|
||||
// stdin closed in detached mode would trigger immediate shutdown
|
||||
// Container detection via IS_DOCKER/IS_CONTAINER env vars + filesystem markers
|
||||
// - Manual execution: Both stdin and signal handlers work
|
||||
process.on('SIGTERM', () => shutdown('SIGTERM'));
|
||||
process.on('SIGINT', () => shutdown('SIGINT'));
|
||||
process.on('SIGHUP', () => shutdown('SIGHUP'));
|
||||
|
||||
// Handle stdio disconnect - PRIMARY shutdown mechanism for Claude Desktop
|
||||
// Skip in container environments (Docker, Kubernetes, Podman) to prevent
|
||||
// premature shutdown when stdin is closed in detached mode.
|
||||
// Containers rely on signal handlers (SIGTERM/SIGINT/SIGHUP) for proper shutdown.
|
||||
const isContainer = isContainerEnvironment();
|
||||
|
||||
if (!isContainer && process.stdin.readable && !process.stdin.destroyed) {
|
||||
try {
|
||||
process.stdin.on('end', () => shutdown('STDIN_END'));
|
||||
process.stdin.on('close', () => shutdown('STDIN_CLOSE'));
|
||||
} catch (error) {
|
||||
logger.error('Failed to register stdin handlers, using signal handlers only:', error);
|
||||
// Continue - signal handlers will still work
|
||||
}
|
||||
}
|
||||
|
||||
await server.run();
|
||||
}
|
||||
|
||||
// Checkpoint: MCP handshake complete (fire-and-forget, no await)
|
||||
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.MCP_HANDSHAKE_COMPLETE);
|
||||
checkpoints.push(STARTUP_CHECKPOINTS.MCP_HANDSHAKE_COMPLETE);
|
||||
|
||||
// Checkpoint: Server ready (fire-and-forget, no await)
|
||||
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.SERVER_READY);
|
||||
checkpoints.push(STARTUP_CHECKPOINTS.SERVER_READY);
|
||||
|
||||
// Log successful startup (fire-and-forget, no await)
|
||||
const startupDuration = Date.now() - startTime;
|
||||
earlyLogger.logStartupSuccess(checkpoints, startupDuration);
|
||||
|
||||
logger.info(`Server startup completed in ${startupDuration}ms (${checkpoints.length} checkpoints passed)`);
|
||||
|
||||
} catch (error) {
|
||||
// Log startup error with checkpoint context (fire-and-forget, no await)
|
||||
const failedCheckpoint = findFailedCheckpoint(checkpoints);
|
||||
earlyLogger.logStartupError(failedCheckpoint, error);
|
||||
|
||||
// In stdio mode, we cannot output to console at all
|
||||
if (mode !== 'stdio') {
|
||||
console.error('Failed to start MCP server:', error);
|
||||
logger.error('Failed to start MCP server', error);
|
||||
|
||||
|
||||
// Provide helpful error messages
|
||||
if (error instanceof Error && error.message.includes('nodes.db not found')) {
|
||||
console.error('\nTo fix this issue:');
|
||||
@@ -112,7 +249,12 @@ Learn more: https://github.com/czlonkowski/n8n-mcp/blob/main/PRIVACY.md
|
||||
console.error('3. If that doesn\'t work, try: rm -rf node_modules && npm install');
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
process.exit(1);
|
||||
}
|
||||
} catch (outerError) {
|
||||
// Outer error catch for early initialization failures
|
||||
logger.error('Critical startup error:', outerError);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
1332
src/mcp/server.ts
1332
src/mcp/server.ts
File diff suppressed because it is too large
Load Diff
@@ -1,71 +0,0 @@
|
||||
import { ToolDocumentation } from '../types';
|
||||
|
||||
export const getNodeAsToolInfoDoc: ToolDocumentation = {
|
||||
name: 'get_node_as_tool_info',
|
||||
category: 'configuration',
|
||||
essentials: {
|
||||
description: 'Explains how to use ANY node as an AI tool with requirements and examples.',
|
||||
keyParameters: ['nodeType'],
|
||||
example: 'get_node_as_tool_info({nodeType: "nodes-base.slack"})',
|
||||
performance: 'Fast - returns guidance and examples',
|
||||
tips: [
|
||||
'ANY node can be used as AI tool, not just AI-marked ones',
|
||||
'Community nodes need N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE=true',
|
||||
'Provides specific use cases and connection requirements'
|
||||
]
|
||||
},
|
||||
full: {
|
||||
description: `Shows how to use any n8n node as an AI tool in AI Agent workflows. In n8n, ANY node can be connected to an AI Agent's tool port, allowing the AI to use that node's functionality. This tool provides specific guidance, requirements, and examples for using a node as an AI tool.`,
|
||||
parameters: {
|
||||
nodeType: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
description: 'Full node type WITH prefix: "nodes-base.slack", "nodes-base.googleSheets", etc.',
|
||||
examples: [
|
||||
'nodes-base.slack',
|
||||
'nodes-base.httpRequest',
|
||||
'nodes-base.googleSheets',
|
||||
'nodes-langchain.documentLoader'
|
||||
]
|
||||
}
|
||||
},
|
||||
returns: `Object containing:
|
||||
- nodeType: The node's full type identifier
|
||||
- displayName: Human-readable name
|
||||
- isMarkedAsAITool: Whether node has usableAsTool property
|
||||
- aiToolCapabilities: Detailed AI tool usage information including:
|
||||
- canBeUsedAsTool: Always true in n8n
|
||||
- requiresEnvironmentVariable: For community nodes
|
||||
- commonUseCases: Specific AI tool use cases
|
||||
- requirements: Connection and environment setup
|
||||
- examples: Code examples for common scenarios
|
||||
- tips: Best practices for AI tool usage`,
|
||||
examples: [
|
||||
'get_node_as_tool_info({nodeType: "nodes-base.slack"}) - Get AI tool guidance for Slack',
|
||||
'get_node_as_tool_info({nodeType: "nodes-base.httpRequest"}) - Learn to use HTTP Request as AI tool',
|
||||
'get_node_as_tool_info({nodeType: "nodes-base.postgres"}) - Database queries as AI tools'
|
||||
],
|
||||
useCases: [
|
||||
'Understanding how to connect any node to AI Agent',
|
||||
'Learning environment requirements for community nodes',
|
||||
'Getting specific use case examples for AI tool usage',
|
||||
'Checking if a node is optimized for AI usage',
|
||||
'Understanding credential requirements for AI tools'
|
||||
],
|
||||
performance: 'Very fast - returns pre-computed guidance and examples',
|
||||
bestPractices: [
|
||||
'Use this before configuring nodes as AI tools',
|
||||
'Check environment requirements for community nodes',
|
||||
'Review common use cases to understand best applications',
|
||||
'Test nodes independently before connecting to AI Agent',
|
||||
'Give tools descriptive names in AI Agent configuration'
|
||||
],
|
||||
pitfalls: [
|
||||
'Community nodes require environment variable to be used as tools',
|
||||
'Not all nodes make sense as AI tools (e.g., triggers)',
|
||||
'Some nodes require specific credentials configuration',
|
||||
'Tool descriptions in AI Agent must be clear and detailed'
|
||||
],
|
||||
relatedTools: ['list_ai_tools', 'get_node_essentials', 'validate_node_operation']
|
||||
}
|
||||
};
|
||||
@@ -1,45 +0,0 @@
|
||||
import { ToolDocumentation } from '../types';
|
||||
|
||||
export const getNodeDocumentationDoc: ToolDocumentation = {
|
||||
name: 'get_node_documentation',
|
||||
category: 'configuration',
|
||||
essentials: {
|
||||
description: 'Get readable docs with examples/auth/patterns. Better than raw schema! 87% coverage. Format: "nodes-base.slack"',
|
||||
keyParameters: ['nodeType'],
|
||||
example: 'get_node_documentation({nodeType: "nodes-base.slack"})',
|
||||
performance: 'Fast - pre-parsed',
|
||||
tips: [
|
||||
'87% coverage',
|
||||
'Includes auth examples',
|
||||
'Human-readable format'
|
||||
]
|
||||
},
|
||||
full: {
|
||||
description: 'Returns human-readable documentation parsed from n8n-docs including examples, authentication setup, and common patterns. More useful than raw schema for understanding node usage.',
|
||||
parameters: {
|
||||
nodeType: { type: 'string', required: true, description: 'Full node type with prefix (e.g., "nodes-base.slack")' }
|
||||
},
|
||||
returns: 'Parsed markdown documentation with examples, authentication guides, common patterns',
|
||||
examples: [
|
||||
'get_node_documentation({nodeType: "nodes-base.slack"}) - Slack usage guide',
|
||||
'get_node_documentation({nodeType: "nodes-base.googleSheets"}) - Sheets examples'
|
||||
],
|
||||
useCases: [
|
||||
'Understanding authentication setup',
|
||||
'Finding usage examples',
|
||||
'Learning common patterns'
|
||||
],
|
||||
performance: 'Fast - Pre-parsed documentation stored in database',
|
||||
bestPractices: [
|
||||
'Use for learning node usage',
|
||||
'Check coverage with get_database_statistics',
|
||||
'Combine with get_node_essentials'
|
||||
],
|
||||
pitfalls: [
|
||||
'Not all nodes have docs (87% coverage)',
|
||||
'May be outdated for new features',
|
||||
'Requires full node type prefix'
|
||||
],
|
||||
relatedTools: ['get_node_info', 'get_node_essentials', 'search_nodes']
|
||||
}
|
||||
};
|
||||
@@ -1,86 +0,0 @@
|
||||
import { ToolDocumentation } from '../types';
|
||||
|
||||
export const getNodeEssentialsDoc: ToolDocumentation = {
|
||||
name: 'get_node_essentials',
|
||||
category: 'configuration',
|
||||
essentials: {
|
||||
description: 'Returns only the most commonly-used properties for a node (10-20 fields). Response is 95% smaller than get_node_info (5KB vs 100KB+). Essential properties include required fields, common options, and authentication settings. Use validate_node_operation for working configurations.',
|
||||
keyParameters: ['nodeType'],
|
||||
example: 'get_node_essentials({nodeType: "nodes-base.slack"})',
|
||||
performance: '<10ms, ~5KB response',
|
||||
tips: [
|
||||
'Always use this before get_node_info',
|
||||
'Use validate_node_operation for examples',
|
||||
'Perfect for understanding node structure'
|
||||
]
|
||||
},
|
||||
full: {
|
||||
description: 'Returns a curated subset of node properties focusing on the most commonly-used fields. Essential properties are hand-picked for each node type and include: required fields, primary operations, authentication options, and the most frequent configuration patterns. NOTE: Examples have been removed to avoid confusion - use validate_node_operation to get working configurations with proper validation.',
|
||||
parameters: {
|
||||
nodeType: { type: 'string', description: 'Full node type with prefix, e.g., "nodes-base.slack", "nodes-base.httpRequest"', required: true }
|
||||
},
|
||||
returns: `Object containing:
|
||||
{
|
||||
"nodeType": "nodes-base.slack",
|
||||
"displayName": "Slack",
|
||||
"description": "Consume Slack API",
|
||||
"category": "output",
|
||||
"version": "2.3",
|
||||
"requiredProperties": [], // Most nodes have no strictly required fields
|
||||
"commonProperties": [
|
||||
{
|
||||
"name": "resource",
|
||||
"displayName": "Resource",
|
||||
"type": "options",
|
||||
"options": ["channel", "message", "user"],
|
||||
"default": "message"
|
||||
},
|
||||
{
|
||||
"name": "operation",
|
||||
"displayName": "Operation",
|
||||
"type": "options",
|
||||
"options": ["post", "update", "delete"],
|
||||
"default": "post"
|
||||
},
|
||||
// ... 10-20 most common properties
|
||||
],
|
||||
"operations": [
|
||||
{"name": "Post", "description": "Post a message"},
|
||||
{"name": "Update", "description": "Update a message"}
|
||||
],
|
||||
"metadata": {
|
||||
"totalProperties": 121,
|
||||
"isAITool": false,
|
||||
"hasCredentials": true
|
||||
}
|
||||
}`,
|
||||
examples: [
|
||||
'get_node_essentials({nodeType: "nodes-base.httpRequest"}) - HTTP configuration basics',
|
||||
'get_node_essentials({nodeType: "nodes-base.slack"}) - Slack messaging essentials',
|
||||
'get_node_essentials({nodeType: "nodes-base.googleSheets"}) - Sheets operations',
|
||||
'// Workflow: search → essentials → validate',
|
||||
'const nodes = search_nodes({query: "database"});',
|
||||
'const mysql = get_node_essentials({nodeType: "nodes-base.mySql"});',
|
||||
'validate_node_operation("nodes-base.mySql", {operation: "select"}, "minimal");'
|
||||
],
|
||||
useCases: [
|
||||
'Quickly understand node structure without information overload',
|
||||
'Identify which properties are most important',
|
||||
'Learn node basics before diving into advanced features',
|
||||
'Build workflows faster with curated property sets'
|
||||
],
|
||||
performance: '<10ms response time, ~5KB payload (vs 100KB+ for full schema)',
|
||||
bestPractices: [
|
||||
'Always start with essentials, only use get_node_info if needed',
|
||||
'Use validate_node_operation to get working configurations',
|
||||
'Check authentication requirements first',
|
||||
'Use search_node_properties if specific property not in essentials'
|
||||
],
|
||||
pitfalls: [
|
||||
'Advanced properties not included - use get_node_info for complete schema',
|
||||
'Node-specific validators may require additional fields',
|
||||
'Some nodes have 50+ properties, essentials shows only top 10-20'
|
||||
],
|
||||
relatedTools: ['get_node_info for complete schema', 'search_node_properties for finding specific fields', 'validate_node_minimal to check configuration']
|
||||
}
|
||||
};
|
||||
@@ -1,98 +0,0 @@
|
||||
import { ToolDocumentation } from '../types';
|
||||
|
||||
export const getNodeInfoDoc: ToolDocumentation = {
|
||||
name: 'get_node_info',
|
||||
category: 'configuration',
|
||||
essentials: {
|
||||
description: 'Returns complete node schema with ALL properties (100KB+ response). Only use when you need advanced properties not in get_node_essentials. Contains 200+ properties for complex nodes like HTTP Request. Requires full prefix like "nodes-base.httpRequest".',
|
||||
keyParameters: ['nodeType'],
|
||||
example: 'get_node_info({nodeType: "nodes-base.slack"})',
|
||||
performance: '100-500ms, 50-500KB response',
|
||||
tips: [
|
||||
'Try get_node_essentials first (95% smaller)',
|
||||
'Use only for advanced configurations',
|
||||
'Response may have 200+ properties'
|
||||
]
|
||||
},
|
||||
full: {
|
||||
description: 'Returns the complete JSON schema for a node including all properties, operations, authentication methods, version information, and metadata. Response sizes range from 50KB to 500KB. Use this only when get_node_essentials doesn\'t provide the specific property you need.',
|
||||
parameters: {
|
||||
nodeType: { type: 'string', required: true, description: 'Full node type with prefix. Examples: "nodes-base.slack", "nodes-base.httpRequest", "nodes-langchain.openAi"' }
|
||||
},
|
||||
returns: `Complete node object containing:
|
||||
{
|
||||
"displayName": "Slack",
|
||||
"name": "slack",
|
||||
"type": "nodes-base.slack",
|
||||
"typeVersion": 2.2,
|
||||
"description": "Consume Slack API",
|
||||
"defaults": {"name": "Slack"},
|
||||
"inputs": ["main"],
|
||||
"outputs": ["main"],
|
||||
"credentials": [
|
||||
{
|
||||
"name": "slackApi",
|
||||
"required": true,
|
||||
"displayOptions": {...}
|
||||
}
|
||||
],
|
||||
"properties": [
|
||||
// 200+ property definitions including:
|
||||
{
|
||||
"displayName": "Resource",
|
||||
"name": "resource",
|
||||
"type": "options",
|
||||
"options": ["channel", "message", "user", "file", ...],
|
||||
"default": "message"
|
||||
},
|
||||
{
|
||||
"displayName": "Operation",
|
||||
"name": "operation",
|
||||
"type": "options",
|
||||
"displayOptions": {
|
||||
"show": {"resource": ["message"]}
|
||||
},
|
||||
"options": ["post", "update", "delete", "get", ...],
|
||||
"default": "post"
|
||||
},
|
||||
// ... 200+ more properties with complex conditions
|
||||
],
|
||||
"version": 2.2,
|
||||
"subtitle": "={{$parameter[\"operation\"] + \": \" + $parameter[\"resource\"]}}",
|
||||
"codex": {...},
|
||||
"supportedWebhooks": [...]
|
||||
}`,
|
||||
examples: [
|
||||
'get_node_info({nodeType: "nodes-base.httpRequest"}) - 300+ properties for HTTP requests',
|
||||
'get_node_info({nodeType: "nodes-base.googleSheets"}) - Complex operations and auth',
|
||||
'// When to use get_node_info:',
|
||||
'// 1. First try essentials',
|
||||
'const essentials = get_node_essentials({nodeType: "nodes-base.slack"});',
|
||||
'// 2. If property missing, search for it',
|
||||
'const props = search_node_properties({nodeType: "nodes-base.slack", query: "thread"});',
|
||||
'// 3. Only if needed, get full schema',
|
||||
'const full = get_node_info({nodeType: "nodes-base.slack"});'
|
||||
],
|
||||
useCases: [
|
||||
'Analyzing all available operations for a node',
|
||||
'Understanding complex property dependencies',
|
||||
'Discovering all authentication methods',
|
||||
'Building UI that shows all node options',
|
||||
'Debugging property visibility conditions'
|
||||
],
|
||||
performance: '100-500ms depending on node complexity. HTTP Request node: ~300KB, Simple nodes: ~50KB',
|
||||
bestPractices: [
|
||||
'Always try get_node_essentials first - it\'s 95% smaller',
|
||||
'Use search_node_properties to find specific advanced properties',
|
||||
'Cache results locally - schemas rarely change',
|
||||
'Parse incrementally - don\'t load entire response into memory at once'
|
||||
],
|
||||
pitfalls: [
|
||||
'Response can exceed 500KB for complex nodes',
|
||||
'Contains many rarely-used properties that add noise',
|
||||
'Property conditions can be deeply nested and complex',
|
||||
'Must use full node type with prefix (nodes-base.X not just X)'
|
||||
],
|
||||
relatedTools: ['get_node_essentials for common properties', 'search_node_properties to find specific fields', 'get_property_dependencies to understand conditions']
|
||||
}
|
||||
};
|
||||
88
src/mcp/tool-docs/configuration/get-node.ts
Normal file
88
src/mcp/tool-docs/configuration/get-node.ts
Normal file
@@ -0,0 +1,88 @@
|
||||
import { ToolDocumentation } from '../types';
|
||||
|
||||
export const getNodeDoc: ToolDocumentation = {
|
||||
name: 'get_node',
|
||||
category: 'configuration',
|
||||
essentials: {
|
||||
description: 'Unified node information tool with progressive detail levels and multiple modes. Get node schema, docs, search properties, or version info.',
|
||||
keyParameters: ['nodeType', 'detail', 'mode', 'includeTypeInfo', 'includeExamples'],
|
||||
example: 'get_node({nodeType: "nodes-base.httpRequest", detail: "standard"})',
|
||||
performance: 'Instant (<10ms) for minimal/standard, moderate for full',
|
||||
tips: [
|
||||
'Use detail="standard" (default) for most tasks - shows required fields',
|
||||
'Use mode="docs" for readable markdown documentation',
|
||||
'Use mode="search_properties" with propertyQuery to find specific fields',
|
||||
'Use mode="versions" to check version history and breaking changes',
|
||||
'Add includeExamples=true to get real-world configuration examples'
|
||||
]
|
||||
},
|
||||
full: {
|
||||
description: `**Detail Levels (mode="info", default):**
|
||||
- minimal (~200 tokens): Basic metadata only - nodeType, displayName, description, category
|
||||
- standard (~1-2K tokens): Essential properties + operations - recommended for most tasks
|
||||
- full (~3-8K tokens): Complete node schema - use only when standard insufficient
|
||||
|
||||
**Operation Modes:**
|
||||
- info (default): Node schema with configurable detail level
|
||||
- docs: Readable markdown documentation with examples and patterns
|
||||
- search_properties: Find specific properties within a node
|
||||
- versions: List all available versions with breaking changes summary
|
||||
- compare: Compare two versions with property-level changes
|
||||
- breaking: Show only breaking changes between versions
|
||||
- migrations: Show auto-migratable changes between versions`,
|
||||
parameters: {
|
||||
nodeType: { type: 'string', required: true, description: 'Full node type with prefix: "nodes-base.httpRequest" or "nodes-langchain.agent"' },
|
||||
detail: { type: 'string', required: false, description: 'Detail level for mode=info: "minimal", "standard" (default), "full"' },
|
||||
mode: { type: 'string', required: false, description: 'Operation mode: "info" (default), "docs", "search_properties", "versions", "compare", "breaking", "migrations"' },
|
||||
includeTypeInfo: { type: 'boolean', required: false, description: 'Include type structure metadata (validation rules, JS types). Adds ~80-120 tokens per property' },
|
||||
includeExamples: { type: 'boolean', required: false, description: 'Include real-world configuration examples from templates. Adds ~200-400 tokens per example' },
|
||||
propertyQuery: { type: 'string', required: false, description: 'For mode=search_properties: search term to find properties (e.g., "auth", "header", "body")' },
|
||||
maxPropertyResults: { type: 'number', required: false, description: 'For mode=search_properties: max results (default 20)' },
|
||||
fromVersion: { type: 'string', required: false, description: 'For compare/breaking/migrations modes: source version (e.g., "1.0")' },
|
||||
toVersion: { type: 'string', required: false, description: 'For compare mode: target version (e.g., "2.0"). Defaults to latest' }
|
||||
},
|
||||
returns: `Depends on mode:
|
||||
- info: Node schema with properties based on detail level
|
||||
- docs: Markdown documentation string
|
||||
- search_properties: Array of matching property paths with descriptions
|
||||
- versions: Version history with breaking changes flags
|
||||
- compare/breaking/migrations: Version comparison details`,
|
||||
examples: [
|
||||
'// Standard detail (recommended for AI agents)\nget_node({nodeType: "nodes-base.httpRequest"})',
|
||||
'// Minimal for quick metadata check\nget_node({nodeType: "nodes-base.slack", detail: "minimal"})',
|
||||
'// Full detail with examples\nget_node({nodeType: "nodes-base.googleSheets", detail: "full", includeExamples: true})',
|
||||
'// Get readable documentation\nget_node({nodeType: "nodes-base.webhook", mode: "docs"})',
|
||||
'// Search for authentication properties\nget_node({nodeType: "nodes-base.httpRequest", mode: "search_properties", propertyQuery: "auth"})',
|
||||
'// Check version history\nget_node({nodeType: "nodes-base.executeWorkflow", mode: "versions"})',
|
||||
'// Compare specific versions\nget_node({nodeType: "nodes-base.httpRequest", mode: "compare", fromVersion: "3.0", toVersion: "4.1"})'
|
||||
],
|
||||
useCases: [
|
||||
'Configure nodes for workflow building (use detail=standard)',
|
||||
'Find specific configuration options (use mode=search_properties)',
|
||||
'Get human-readable node documentation (use mode=docs)',
|
||||
'Check for breaking changes before version upgrades (use mode=breaking)',
|
||||
'Understand complex types with includeTypeInfo=true'
|
||||
],
|
||||
performance: `Token costs by detail level:
|
||||
- minimal: ~200 tokens
|
||||
- standard: ~1000-2000 tokens (default)
|
||||
- full: ~3000-8000 tokens
|
||||
- includeTypeInfo: +80-120 tokens per property
|
||||
- includeExamples: +200-400 tokens per example
|
||||
- Version modes: ~400-1200 tokens`,
|
||||
bestPractices: [
|
||||
'Start with detail="standard" - it covers 95% of use cases',
|
||||
'Only use detail="full" if standard is missing required properties',
|
||||
'Use mode="docs" when explaining nodes to users',
|
||||
'Combine includeTypeInfo=true for complex nodes (filter, resourceMapper)',
|
||||
'Check version history before configuring versioned nodes'
|
||||
],
|
||||
pitfalls: [
|
||||
'detail="full" returns large responses (~100KB) - use sparingly',
|
||||
'Node type must include prefix (nodes-base. or nodes-langchain.)',
|
||||
'includeExamples only works with mode=info and detail=standard',
|
||||
'Version modes require nodes with multiple versions in database'
|
||||
],
|
||||
relatedTools: ['search_nodes', 'validate_node', 'validate_workflow']
|
||||
}
|
||||
};
|
||||
@@ -1,79 +0,0 @@
|
||||
import { ToolDocumentation } from '../types';
|
||||
|
||||
export const getPropertyDependenciesDoc: ToolDocumentation = {
|
||||
name: 'get_property_dependencies',
|
||||
category: 'configuration',
|
||||
essentials: {
|
||||
description: 'Shows property dependencies and visibility rules - which fields appear when.',
|
||||
keyParameters: ['nodeType', 'config?'],
|
||||
example: 'get_property_dependencies({nodeType: "nodes-base.httpRequest"})',
|
||||
performance: 'Fast - analyzes property conditions',
|
||||
tips: [
|
||||
'Shows which properties depend on other property values',
|
||||
'Test visibility impact with optional config parameter',
|
||||
'Helps understand complex conditional property displays'
|
||||
]
|
||||
},
|
||||
full: {
|
||||
description: `Analyzes property dependencies and visibility conditions for a node. Shows which properties control the visibility of other properties (e.g., sendBody=true reveals body-related fields). Optionally test how a specific configuration affects property visibility.`,
|
||||
parameters: {
|
||||
nodeType: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
description: 'The node type to analyze (e.g., "nodes-base.httpRequest")',
|
||||
examples: [
|
||||
'nodes-base.httpRequest',
|
||||
'nodes-base.slack',
|
||||
'nodes-base.if',
|
||||
'nodes-base.switch'
|
||||
]
|
||||
},
|
||||
config: {
|
||||
type: 'object',
|
||||
required: false,
|
||||
description: 'Optional partial configuration to check visibility impact',
|
||||
examples: [
|
||||
'{ method: "POST", sendBody: true }',
|
||||
'{ operation: "create", resource: "contact" }',
|
||||
'{ mode: "rules" }'
|
||||
]
|
||||
}
|
||||
},
|
||||
returns: `Object containing:
|
||||
- nodeType: The analyzed node type
|
||||
- displayName: Human-readable node name
|
||||
- controllingProperties: Properties that control visibility of others
|
||||
- dependentProperties: Properties whose visibility depends on others
|
||||
- complexDependencies: Multi-condition dependencies
|
||||
- currentConfig: If config provided, shows:
|
||||
- providedValues: The configuration you passed
|
||||
- visibilityImpact: Which properties are visible/hidden`,
|
||||
examples: [
|
||||
'get_property_dependencies({nodeType: "nodes-base.httpRequest"}) - Analyze HTTP Request dependencies',
|
||||
'get_property_dependencies({nodeType: "nodes-base.httpRequest", config: {sendBody: true}}) - Test visibility with sendBody enabled',
|
||||
'get_property_dependencies({nodeType: "nodes-base.if", config: {mode: "rules"}}) - Check If node in rules mode'
|
||||
],
|
||||
useCases: [
|
||||
'Understanding which properties control others',
|
||||
'Debugging why certain fields are not visible',
|
||||
'Building dynamic UIs that match n8n behavior',
|
||||
'Testing configurations before applying them',
|
||||
'Understanding complex node property relationships'
|
||||
],
|
||||
performance: 'Fast - analyzes property metadata without database queries',
|
||||
bestPractices: [
|
||||
'Use before configuring complex nodes with many conditional fields',
|
||||
'Test different config values to understand visibility rules',
|
||||
'Check dependencies when properties seem to be missing',
|
||||
'Use for nodes with multiple operation modes (Slack, Google Sheets)',
|
||||
'Combine with search_node_properties to find specific fields'
|
||||
],
|
||||
pitfalls: [
|
||||
'Some properties have complex multi-condition dependencies',
|
||||
'Visibility rules can be nested (property A controls B which controls C)',
|
||||
'Not all hidden properties are due to dependencies (some are deprecated)',
|
||||
'Config parameter only tests visibility, does not validate values'
|
||||
],
|
||||
relatedTools: ['search_node_properties', 'get_node_essentials', 'validate_node_operation']
|
||||
}
|
||||
};
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user