mirror of
https://github.com/czlonkowski/n8n-mcp.git
synced 2026-01-30 14:32:04 +00:00
Compare commits
194 Commits
fix/issue-
...
v2.22.7
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
892c4ed70a | ||
|
|
590dc087ac | ||
|
|
ee7229b4db | ||
|
|
b6683b8381 | ||
|
|
b2300429fd | ||
|
|
b87f638e52 | ||
|
|
1f94427d54 | ||
|
|
2eb459c80c | ||
|
|
79ef853e8c | ||
|
|
2682be33b8 | ||
|
|
9f291154f2 | ||
|
|
bfff497020 | ||
|
|
e522aec08c | ||
|
|
817bf7d211 | ||
|
|
9a3520adb7 | ||
|
|
ced7fafcbf | ||
|
|
ad4b521402 | ||
|
|
b18f6ec7a4 | ||
|
|
95ea6ca0bb | ||
|
|
a4c7e097e8 | ||
|
|
0778c55d85 | ||
|
|
913ff31164 | ||
|
|
952a97ef73 | ||
|
|
56114f041b | ||
|
|
c52a3dd253 | ||
|
|
bc156fce2a | ||
|
|
aaa6be6d74 | ||
|
|
3806efdbd8 | ||
|
|
0e26ea6a68 | ||
|
|
1bfbf05561 | ||
|
|
f23e09934d | ||
|
|
5ea00e12a2 | ||
|
|
04e7c53b59 | ||
|
|
c7f8614de1 | ||
|
|
5702a64a01 | ||
|
|
551fea841b | ||
|
|
eac4e67101 | ||
|
|
c76ffd9fb1 | ||
|
|
7300957d13 | ||
|
|
32a25e2706 | ||
|
|
ab6b554692 | ||
|
|
32264da107 | ||
|
|
ef1cf747a3 | ||
|
|
dbdc88d629 | ||
|
|
538618b1bc | ||
|
|
41830c88fe | ||
|
|
0d2d9bdd52 | ||
|
|
05f68b8ea1 | ||
|
|
5881304ed8 | ||
|
|
0f5b0d9463 | ||
|
|
4399899255 | ||
|
|
8d20c64f5c | ||
|
|
fe1309151a | ||
|
|
dd62040155 | ||
|
|
112b40119c | ||
|
|
318986f546 | ||
|
|
aa8a6a7069 | ||
|
|
e11a885b0d | ||
|
|
ee99cb7ba1 | ||
|
|
66cb66b31b | ||
|
|
b67d6ba353 | ||
|
|
3ba5584df9 | ||
|
|
be0211d826 | ||
|
|
0d71a16f83 | ||
|
|
085f6db7a2 | ||
|
|
b6bc3b732e | ||
|
|
c16c9a2398 | ||
|
|
1d34ad81d5 | ||
|
|
4566253bdc | ||
|
|
54c598717c | ||
|
|
8b5b01de98 | ||
|
|
275e573d8d | ||
|
|
6256105053 | ||
|
|
1f43784315 | ||
|
|
80e3391773 | ||
|
|
c580a3dde4 | ||
|
|
fc8fb66900 | ||
|
|
4625ebf64d | ||
|
|
43dea68f0b | ||
|
|
dc62fd66cb | ||
|
|
a94ff0586c | ||
|
|
29b2b1d4c1 | ||
|
|
fa6ff89516 | ||
|
|
34811eaf69 | ||
|
|
52c9902efd | ||
|
|
fba8b2a490 | ||
|
|
275e4f8cef | ||
|
|
4016ac42ef | ||
|
|
b8227ff775 | ||
|
|
f61fd9b429 | ||
|
|
4b36ed6a95 | ||
|
|
f072b2e003 | ||
|
|
cfd2325ca4 | ||
|
|
978347e8d0 | ||
|
|
1b7dd3b517 | ||
|
|
c52bbcbb83 | ||
|
|
5fb63cd725 | ||
|
|
36eb8e3864 | ||
|
|
51278f52e9 | ||
|
|
6479ac2bf5 | ||
|
|
08d43bd7fb | ||
|
|
914805f5ea | ||
|
|
08a1d42f09 | ||
|
|
ae11738ac7 | ||
|
|
6e365714e2 | ||
|
|
a2cc37bdf7 | ||
|
|
cf3c66c0ea | ||
|
|
f33b626179 | ||
|
|
2113714ec2 | ||
|
|
49757e3c22 | ||
|
|
dd521d0d87 | ||
|
|
331883f944 | ||
|
|
f3164e202f | ||
|
|
8e2e1dce62 | ||
|
|
b986beef2c | ||
|
|
943f5862a3 | ||
|
|
2c536a25fd | ||
|
|
e95ac7c335 | ||
|
|
e2c8fd0125 | ||
|
|
3332eb09fc | ||
|
|
bd03412fc8 | ||
|
|
73fa494735 | ||
|
|
67d8f5d4d4 | ||
|
|
d2a250e23d | ||
|
|
710f054b93 | ||
|
|
fd65727632 | ||
|
|
5d9936a909 | ||
|
|
de95fb21ba | ||
|
|
2bcd7c757b | ||
|
|
50439e2aa1 | ||
|
|
96cb9eca0f | ||
|
|
36dc8b489c | ||
|
|
cffd5e8b2e | ||
|
|
1ad2c6f6d2 | ||
|
|
28cff8c77b | ||
|
|
0818b4d56c | ||
|
|
5e2a6bdb9c | ||
|
|
ec9d8fdb7e | ||
|
|
ddc4de8c3e | ||
|
|
c67659a7c3 | ||
|
|
4cf8bb5c98 | ||
|
|
53b5dc312d | ||
|
|
1eedb43e9f | ||
|
|
81dfbbbd77 | ||
|
|
3ba3f101b3 | ||
|
|
92eb4ef34f | ||
|
|
ccbe04f007 | ||
|
|
91ad08493c | ||
|
|
7bb021163f | ||
|
|
59ae78f03a | ||
|
|
cb224de01f | ||
|
|
fd9ea985f2 | ||
|
|
225bb06cd5 | ||
|
|
2627028be3 | ||
|
|
cc9fe69449 | ||
|
|
0144484f96 | ||
|
|
2b7bc48699 | ||
|
|
0ec02fa0da | ||
|
|
d207cc3723 | ||
|
|
eeb4b6ac3e | ||
|
|
06cbb40213 | ||
|
|
9a00a99011 | ||
|
|
36aedd5050 | ||
|
|
59f49c47ab | ||
|
|
b106550520 | ||
|
|
e1be4473a3 | ||
|
|
b12a927a10 | ||
|
|
08abdb7937 | ||
|
|
95bb002577 | ||
|
|
36e02c68d3 | ||
|
|
3078273d93 | ||
|
|
aeb74102e5 | ||
|
|
af949b09a5 | ||
|
|
44568a6edd | ||
|
|
59e4cb85ac | ||
|
|
f78f53e731 | ||
|
|
c6e0e528d1 | ||
|
|
34bafe240d | ||
|
|
f139d38c81 | ||
|
|
aeaba3b9ca | ||
|
|
a7bfa73479 | ||
|
|
ee125c52f8 | ||
|
|
f9194ee74c | ||
|
|
2a85000411 | ||
|
|
653f395666 | ||
|
|
cfe3c5e584 | ||
|
|
67c3c9c9c8 | ||
|
|
6d50cf93f0 | ||
|
|
de9f222cfe | ||
|
|
da593400d2 | ||
|
|
126d09c66b | ||
|
|
4f81962953 | ||
|
|
a7dc07abab | ||
|
|
fcf778c79d |
34
.env.example
34
.env.example
@@ -69,6 +69,40 @@ AUTH_TOKEN=your-secure-token-here
|
||||
# Default: 0 (disabled)
|
||||
# TRUST_PROXY=0
|
||||
|
||||
# =========================
|
||||
# SECURITY CONFIGURATION
|
||||
# =========================
|
||||
|
||||
# Rate Limiting Configuration
|
||||
# Protects authentication endpoint from brute force attacks
|
||||
# Window: Time period in milliseconds (default: 900000 = 15 minutes)
|
||||
# Max: Maximum authentication attempts per IP within window (default: 20)
|
||||
# AUTH_RATE_LIMIT_WINDOW=900000
|
||||
# AUTH_RATE_LIMIT_MAX=20
|
||||
|
||||
# SSRF Protection Mode
|
||||
# Prevents webhooks from accessing internal networks and cloud metadata
|
||||
#
|
||||
# Modes:
|
||||
# - strict (default): Block localhost + private IPs + cloud metadata
|
||||
# Use for: Production deployments, cloud environments
|
||||
# Security: Maximum
|
||||
#
|
||||
# - moderate: Allow localhost, block private IPs + cloud metadata
|
||||
# Use for: Local development with local n8n instance
|
||||
# Security: Good balance
|
||||
# Example: n8n running on http://localhost:5678 or http://host.docker.internal:5678
|
||||
#
|
||||
# - permissive: Allow localhost + private IPs, block cloud metadata
|
||||
# Use for: Internal network testing, private cloud (NOT for production)
|
||||
# Security: Minimal - use with caution
|
||||
#
|
||||
# Default: strict
|
||||
# WEBHOOK_SECURITY_MODE=strict
|
||||
#
|
||||
# For local development with local n8n:
|
||||
# WEBHOOK_SECURITY_MODE=moderate
|
||||
|
||||
# =========================
|
||||
# MULTI-TENANT CONFIGURATION
|
||||
# =========================
|
||||
|
||||
52
.github/workflows/docker-build.yml
vendored
52
.github/workflows/docker-build.yml
vendored
@@ -5,8 +5,6 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- 'v*'
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- '**.txt'
|
||||
@@ -38,6 +36,12 @@ on:
|
||||
- 'CODE_OF_CONDUCT.md'
|
||||
workflow_dispatch:
|
||||
|
||||
# Prevent concurrent Docker pushes across all workflows (shared with release.yml)
|
||||
# This ensures docker-build.yml and release.yml never push to 'latest' simultaneously
|
||||
concurrency:
|
||||
group: docker-push-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
@@ -89,16 +93,54 @@ jobs:
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
no-cache: true
|
||||
no-cache: false
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
provenance: false
|
||||
|
||||
- name: Verify multi-arch manifest for latest tag
|
||||
if: github.event_name != 'pull_request' && github.ref == 'refs/heads/main'
|
||||
run: |
|
||||
echo "Verifying multi-arch manifest for latest tag..."
|
||||
|
||||
# Retry with exponential backoff (registry propagation can take time)
|
||||
MAX_ATTEMPTS=5
|
||||
ATTEMPT=1
|
||||
WAIT_TIME=2
|
||||
|
||||
while [ $ATTEMPT -le $MAX_ATTEMPTS ]; do
|
||||
echo "Attempt $ATTEMPT of $MAX_ATTEMPTS..."
|
||||
|
||||
MANIFEST=$(docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest 2>&1 || true)
|
||||
|
||||
# Check for both platforms
|
||||
if echo "$MANIFEST" | grep -q "linux/amd64" && echo "$MANIFEST" | grep -q "linux/arm64"; then
|
||||
echo "✅ Multi-arch manifest verified: both amd64 and arm64 present"
|
||||
echo "$MANIFEST"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $ATTEMPT -lt $MAX_ATTEMPTS ]; then
|
||||
echo "⏳ Registry still propagating, waiting ${WAIT_TIME}s before retry..."
|
||||
sleep $WAIT_TIME
|
||||
WAIT_TIME=$((WAIT_TIME * 2)) # Exponential backoff: 2s, 4s, 8s, 16s
|
||||
fi
|
||||
|
||||
ATTEMPT=$((ATTEMPT + 1))
|
||||
done
|
||||
|
||||
echo "❌ ERROR: Multi-arch manifest incomplete after $MAX_ATTEMPTS attempts!"
|
||||
echo "$MANIFEST"
|
||||
exit 1
|
||||
|
||||
build-railway:
|
||||
name: Build Railway Docker Image
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
@@ -143,11 +185,13 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile.railway
|
||||
no-cache: true
|
||||
no-cache: false
|
||||
platforms: linux/amd64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta-railway.outputs.tags }}
|
||||
labels: ${{ steps.meta-railway.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
provenance: false
|
||||
|
||||
# Nginx build commented out until Phase 2
|
||||
|
||||
243
.github/workflows/release.yml
vendored
243
.github/workflows/release.yml
vendored
@@ -13,9 +13,10 @@ permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
# Prevent concurrent releases
|
||||
# Prevent concurrent Docker pushes across all workflows (shared with docker-build.yml)
|
||||
# This ensures release.yml and docker-build.yml never push to 'latest' simultaneously
|
||||
concurrency:
|
||||
group: release
|
||||
group: docker-push-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
@@ -79,53 +80,111 @@ jobs:
|
||||
echo "ℹ️ No version change detected"
|
||||
fi
|
||||
|
||||
extract-changelog:
|
||||
name: Extract Changelog
|
||||
- name: Validate version against npm registry
|
||||
if: steps.check.outputs.changed == 'true'
|
||||
run: |
|
||||
CURRENT_VERSION="${{ steps.check.outputs.version }}"
|
||||
|
||||
# Get latest version from npm (handle package not found)
|
||||
NPM_VERSION=$(npm view n8n-mcp version 2>/dev/null || echo "0.0.0")
|
||||
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
echo "NPM registry version: $NPM_VERSION"
|
||||
|
||||
# Check if version already exists in npm
|
||||
if [ "$CURRENT_VERSION" = "$NPM_VERSION" ]; then
|
||||
echo "❌ Error: Version $CURRENT_VERSION already published to npm"
|
||||
echo "Please bump the version in package.json before releasing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Simple semver comparison (assumes format: major.minor.patch)
|
||||
# Compare if current version is greater than npm version
|
||||
if [ "$NPM_VERSION" != "0.0.0" ]; then
|
||||
# Sort versions and check if current is not the highest
|
||||
HIGHEST=$(printf '%s\n%s' "$NPM_VERSION" "$CURRENT_VERSION" | sort -V | tail -n1)
|
||||
if [ "$HIGHEST" != "$CURRENT_VERSION" ]; then
|
||||
echo "❌ Error: Version $CURRENT_VERSION is not greater than npm version $NPM_VERSION"
|
||||
echo "Please use a higher version number"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "✅ Version $CURRENT_VERSION is valid (higher than npm version $NPM_VERSION)"
|
||||
|
||||
generate-release-notes:
|
||||
name: Generate Release Notes
|
||||
runs-on: ubuntu-latest
|
||||
needs: detect-version-change
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
outputs:
|
||||
release-notes: ${{ steps.extract.outputs.notes }}
|
||||
has-notes: ${{ steps.extract.outputs.has-notes }}
|
||||
release-notes: ${{ steps.generate.outputs.notes }}
|
||||
has-notes: ${{ steps.generate.outputs.has-notes }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Extract changelog for version
|
||||
id: extract
|
||||
with:
|
||||
fetch-depth: 0 # Need full history for git log
|
||||
|
||||
- name: Generate release notes from commits
|
||||
id: generate
|
||||
run: |
|
||||
VERSION="${{ needs.detect-version-change.outputs.new-version }}"
|
||||
CHANGELOG_FILE="docs/CHANGELOG.md"
|
||||
|
||||
if [ ! -f "$CHANGELOG_FILE" ]; then
|
||||
echo "Changelog file not found at $CHANGELOG_FILE"
|
||||
echo "has-notes=false" >> $GITHUB_OUTPUT
|
||||
echo "notes=No changelog entries found for version $VERSION" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Use the extracted changelog script
|
||||
if NOTES=$(node scripts/extract-changelog.js "$VERSION" "$CHANGELOG_FILE" 2>/dev/null); then
|
||||
CURRENT_VERSION="${{ needs.detect-version-change.outputs.new-version }}"
|
||||
CURRENT_TAG="v$CURRENT_VERSION"
|
||||
|
||||
# Get the previous tag (excluding the current tag which doesn't exist yet)
|
||||
PREVIOUS_TAG=$(git tag --sort=-version:refname | grep -v "^$CURRENT_TAG$" | head -1)
|
||||
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
echo "Current tag: $CURRENT_TAG"
|
||||
echo "Previous tag: $PREVIOUS_TAG"
|
||||
|
||||
if [ -z "$PREVIOUS_TAG" ]; then
|
||||
echo "ℹ️ No previous tag found, this might be the first release"
|
||||
|
||||
# Generate initial release notes using script
|
||||
if NOTES=$(node scripts/generate-initial-release-notes.js "$CURRENT_VERSION" 2>/dev/null); then
|
||||
echo "✅ Successfully generated initial release notes for version $CURRENT_VERSION"
|
||||
else
|
||||
echo "⚠️ Could not generate initial release notes for version $CURRENT_VERSION"
|
||||
NOTES="Initial release v$CURRENT_VERSION"
|
||||
fi
|
||||
|
||||
echo "has-notes=true" >> $GITHUB_OUTPUT
|
||||
|
||||
|
||||
# Use heredoc to properly handle multiline content
|
||||
{
|
||||
echo "notes<<EOF"
|
||||
echo "$NOTES"
|
||||
echo "EOF"
|
||||
} >> $GITHUB_OUTPUT
|
||||
|
||||
echo "✅ Successfully extracted changelog for version $VERSION"
|
||||
|
||||
else
|
||||
echo "has-notes=false" >> $GITHUB_OUTPUT
|
||||
echo "notes=No changelog entries found for version $VERSION" >> $GITHUB_OUTPUT
|
||||
echo "⚠️ Could not extract changelog for version $VERSION"
|
||||
echo "✅ Previous tag found: $PREVIOUS_TAG"
|
||||
|
||||
# Generate release notes between tags
|
||||
if NOTES=$(node scripts/generate-release-notes.js "$PREVIOUS_TAG" "HEAD" 2>/dev/null); then
|
||||
echo "has-notes=true" >> $GITHUB_OUTPUT
|
||||
|
||||
# Use heredoc to properly handle multiline content
|
||||
{
|
||||
echo "notes<<EOF"
|
||||
echo "$NOTES"
|
||||
echo "EOF"
|
||||
} >> $GITHUB_OUTPUT
|
||||
|
||||
echo "✅ Successfully generated release notes from $PREVIOUS_TAG to $CURRENT_TAG"
|
||||
else
|
||||
echo "has-notes=false" >> $GITHUB_OUTPUT
|
||||
echo "notes=Failed to generate release notes for version $CURRENT_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "⚠️ Could not generate release notes for version $CURRENT_VERSION"
|
||||
fi
|
||||
fi
|
||||
|
||||
create-release:
|
||||
name: Create GitHub Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [detect-version-change, extract-changelog]
|
||||
needs: [detect-version-change, generate-release-notes]
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
outputs:
|
||||
release-id: ${{ steps.create.outputs.id }}
|
||||
@@ -156,7 +215,7 @@ jobs:
|
||||
cat > release_body.md << 'EOF'
|
||||
# Release v${{ needs.detect-version-change.outputs.new-version }}
|
||||
|
||||
${{ needs.extract-changelog.outputs.release-notes }}
|
||||
${{ needs.generate-release-notes.outputs.release-notes }}
|
||||
|
||||
---
|
||||
|
||||
@@ -206,8 +265,8 @@ jobs:
|
||||
echo "id=$RELEASE_ID" >> $GITHUB_OUTPUT
|
||||
echo "upload_url=https://uploads.github.com/repos/${{ github.repository }}/releases/$RELEASE_ID/assets{?name,label}" >> $GITHUB_OUTPUT
|
||||
|
||||
build-and-test:
|
||||
name: Build and Test
|
||||
build-and-verify:
|
||||
name: Build and Verify
|
||||
runs-on: ubuntu-latest
|
||||
needs: detect-version-change
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
@@ -226,22 +285,28 @@ jobs:
|
||||
|
||||
- name: Build project
|
||||
run: npm run build
|
||||
|
||||
- name: Rebuild database
|
||||
run: npm run rebuild
|
||||
|
||||
- name: Run tests
|
||||
run: npm test
|
||||
env:
|
||||
CI: true
|
||||
|
||||
|
||||
# Database is already built and committed during development
|
||||
# Rebuilding here causes segfault due to memory pressure (exit code 139)
|
||||
- name: Verify database exists
|
||||
run: |
|
||||
if [ ! -f "data/nodes.db" ]; then
|
||||
echo "❌ Error: data/nodes.db not found"
|
||||
echo "Please run 'npm run rebuild' locally and commit the database"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Database exists ($(du -h data/nodes.db | cut -f1))"
|
||||
|
||||
# Skip tests - they already passed in PR before merge
|
||||
# Running them again on the same commit adds no safety, only time (~6-7 min)
|
||||
|
||||
- name: Run type checking
|
||||
run: npm run typecheck
|
||||
|
||||
publish-npm:
|
||||
name: Publish to NPM
|
||||
runs-on: ubuntu-latest
|
||||
needs: [detect-version-change, build-and-test, create-release]
|
||||
needs: [detect-version-change, build-and-verify, create-release]
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -259,10 +324,16 @@ jobs:
|
||||
|
||||
- name: Build project
|
||||
run: npm run build
|
||||
|
||||
- name: Rebuild database
|
||||
run: npm run rebuild
|
||||
|
||||
|
||||
# Database is already built and committed during development
|
||||
- name: Verify database exists
|
||||
run: |
|
||||
if [ ! -f "data/nodes.db" ]; then
|
||||
echo "❌ Error: data/nodes.db not found"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Database exists ($(du -h data/nodes.db | cut -f1))"
|
||||
|
||||
- name: Sync runtime version
|
||||
run: npm run sync:runtime-version
|
||||
|
||||
@@ -290,6 +361,15 @@ jobs:
|
||||
const pkg = require('./package.json');
|
||||
pkg.name = 'n8n-mcp';
|
||||
pkg.description = 'Integration between n8n workflow automation and Model Context Protocol (MCP)';
|
||||
pkg.main = 'dist/index.js';
|
||||
pkg.types = 'dist/index.d.ts';
|
||||
pkg.exports = {
|
||||
'.': {
|
||||
types: './dist/index.d.ts',
|
||||
require: './dist/index.js',
|
||||
import: './dist/index.js'
|
||||
}
|
||||
};
|
||||
pkg.bin = { 'n8n-mcp': './dist/mcp/index.js' };
|
||||
pkg.repository = { type: 'git', url: 'git+https://github.com/czlonkowski/n8n-mcp.git' };
|
||||
pkg.keywords = ['n8n', 'mcp', 'model-context-protocol', 'ai', 'workflow', 'automation'];
|
||||
@@ -324,7 +404,7 @@ jobs:
|
||||
build-docker:
|
||||
name: Build and Push Docker Images
|
||||
runs-on: ubuntu-latest
|
||||
needs: [detect-version-change, build-and-test]
|
||||
needs: [detect-version-change, build-and-verify]
|
||||
if: needs.detect-version-change.outputs.version-changed == 'true'
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -382,7 +462,76 @@ jobs:
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
|
||||
- name: Verify multi-arch manifest for latest tag
|
||||
run: |
|
||||
echo "Verifying multi-arch manifest for latest tag..."
|
||||
|
||||
# Retry with exponential backoff (registry propagation can take time)
|
||||
MAX_ATTEMPTS=5
|
||||
ATTEMPT=1
|
||||
WAIT_TIME=2
|
||||
|
||||
while [ $ATTEMPT -le $MAX_ATTEMPTS ]; do
|
||||
echo "Attempt $ATTEMPT of $MAX_ATTEMPTS..."
|
||||
|
||||
MANIFEST=$(docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest 2>&1 || true)
|
||||
|
||||
# Check for both platforms
|
||||
if echo "$MANIFEST" | grep -q "linux/amd64" && echo "$MANIFEST" | grep -q "linux/arm64"; then
|
||||
echo "✅ Multi-arch manifest verified: both amd64 and arm64 present"
|
||||
echo "$MANIFEST"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $ATTEMPT -lt $MAX_ATTEMPTS ]; then
|
||||
echo "⏳ Registry still propagating, waiting ${WAIT_TIME}s before retry..."
|
||||
sleep $WAIT_TIME
|
||||
WAIT_TIME=$((WAIT_TIME * 2)) # Exponential backoff: 2s, 4s, 8s, 16s
|
||||
fi
|
||||
|
||||
ATTEMPT=$((ATTEMPT + 1))
|
||||
done
|
||||
|
||||
echo "❌ ERROR: Multi-arch manifest incomplete after $MAX_ATTEMPTS attempts!"
|
||||
echo "$MANIFEST"
|
||||
exit 1
|
||||
|
||||
- name: Verify multi-arch manifest for version tag
|
||||
run: |
|
||||
VERSION="${{ needs.detect-version-change.outputs.new-version }}"
|
||||
echo "Verifying multi-arch manifest for version tag :$VERSION (without 'v' prefix)..."
|
||||
|
||||
# Retry with exponential backoff (registry propagation can take time)
|
||||
MAX_ATTEMPTS=5
|
||||
ATTEMPT=1
|
||||
WAIT_TIME=2
|
||||
|
||||
while [ $ATTEMPT -le $MAX_ATTEMPTS ]; do
|
||||
echo "Attempt $ATTEMPT of $MAX_ATTEMPTS..."
|
||||
|
||||
MANIFEST=$(docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:$VERSION 2>&1 || true)
|
||||
|
||||
# Check for both platforms
|
||||
if echo "$MANIFEST" | grep -q "linux/amd64" && echo "$MANIFEST" | grep -q "linux/arm64"; then
|
||||
echo "✅ Multi-arch manifest verified for $VERSION: both amd64 and arm64 present"
|
||||
echo "$MANIFEST"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $ATTEMPT -lt $MAX_ATTEMPTS ]; then
|
||||
echo "⏳ Registry still propagating, waiting ${WAIT_TIME}s before retry..."
|
||||
sleep $WAIT_TIME
|
||||
WAIT_TIME=$((WAIT_TIME * 2)) # Exponential backoff: 2s, 4s, 8s, 16s
|
||||
fi
|
||||
|
||||
ATTEMPT=$((ATTEMPT + 1))
|
||||
done
|
||||
|
||||
echo "❌ ERROR: Multi-arch manifest incomplete for version $VERSION after $MAX_ATTEMPTS attempts!"
|
||||
echo "$MANIFEST"
|
||||
exit 1
|
||||
|
||||
- name: Extract metadata for Railway image
|
||||
id: meta-railway
|
||||
uses: docker/metadata-action@v5
|
||||
|
||||
3897
CHANGELOG.md
3897
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
@@ -192,4 +192,5 @@ NEVER proactively create documentation files (*.md) or README files. Only create
|
||||
- When the user asks to review issues, you should use GH CLI to get the issue and all the comments
|
||||
- When the task can be divided into separated subtasks, you should spawn separate sub-agents to handle them in paralel
|
||||
- Use the best sub-agent for the task as per their descriptions
|
||||
- Do not use hyperbolic or dramatic language in comments and documentation
|
||||
- Do not use hyperbolic or dramatic language in comments and documentation
|
||||
- Add to every commit and PR: Concieved by Romuald Członkowski - and then link to www.aiadvisors.pl/en. Don't add it in conversations
|
||||
12
Dockerfile
12
Dockerfile
@@ -34,9 +34,13 @@ RUN apk add --no-cache curl su-exec && \
|
||||
# Copy runtime-only package.json
|
||||
COPY package.runtime.json package.json
|
||||
|
||||
# Install runtime dependencies with cache mount
|
||||
# Install runtime dependencies with better-sqlite3 compilation
|
||||
# Build tools (python3, make, g++) are installed, used for compilation, then removed
|
||||
# This enables native SQLite (better-sqlite3) instead of sql.js, preventing memory leaks
|
||||
RUN --mount=type=cache,target=/root/.npm \
|
||||
npm install --production --no-audit --no-fund
|
||||
apk add --no-cache python3 make g++ && \
|
||||
npm install --production --no-audit --no-fund && \
|
||||
apk del python3 make g++
|
||||
|
||||
# Copy built application
|
||||
COPY --from=builder /app/dist ./dist
|
||||
@@ -78,7 +82,7 @@ ENV IS_DOCKER=true
|
||||
# To opt-out, uncomment the following line:
|
||||
# ENV N8N_MCP_TELEMETRY_DISABLED=true
|
||||
|
||||
# Expose HTTP port
|
||||
# Expose HTTP port (default 3000, configurable via PORT environment variable at runtime)
|
||||
EXPOSE 3000
|
||||
|
||||
# Set stop signal to SIGTERM (default, but explicit is better)
|
||||
@@ -86,7 +90,7 @@ STOPSIGNAL SIGTERM
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://127.0.0.1:3000/health || exit 1
|
||||
CMD sh -c 'curl -f http://127.0.0.1:${PORT:-3000}/health || exit 1'
|
||||
|
||||
# Optimized entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
|
||||
|
||||
@@ -25,16 +25,20 @@ RUN npm run build
|
||||
FROM node:22-alpine AS runtime
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
RUN apk add --no-cache curl python3 make g++ && \
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache curl && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
# Copy runtime-only package.json
|
||||
COPY package.runtime.json package.json
|
||||
|
||||
# Install only production dependencies
|
||||
RUN npm install --production --no-audit --no-fund && \
|
||||
npm cache clean --force
|
||||
# Install production dependencies with temporary build tools
|
||||
# Build tools (python3, make, g++) enable better-sqlite3 compilation (native SQLite)
|
||||
# They are removed after installation to reduce image size and attack surface
|
||||
RUN apk add --no-cache python3 make g++ && \
|
||||
npm install --production --no-audit --no-fund && \
|
||||
npm cache clean --force && \
|
||||
apk del python3 make g++
|
||||
|
||||
# Copy built application from builder stage
|
||||
COPY --from=builder /app/dist ./dist
|
||||
|
||||
255
README.md
255
README.md
@@ -5,7 +5,7 @@
|
||||
[](https://www.npmjs.com/package/n8n-mcp)
|
||||
[](https://codecov.io/gh/czlonkowski/n8n-mcp)
|
||||
[](https://github.com/czlonkowski/n8n-mcp/actions)
|
||||
[](https://github.com/n8n-io/n8n)
|
||||
[](https://github.com/n8n-io/n8n)
|
||||
[](https://github.com/czlonkowski/n8n-mcp/pkgs/container/n8n-mcp)
|
||||
[](https://railway.com/deploy/n8n-mcp?referralCode=n8n-mcp)
|
||||
|
||||
@@ -198,10 +198,36 @@ Add to Claude Desktop config:
|
||||
}
|
||||
```
|
||||
|
||||
>💡 Tip: If you’re running n8n locally on the same machine (e.g., via Docker), use http://host.docker.internal:5678 as the N8N_API_URL.
|
||||
>💡 Tip: If you're running n8n locally on the same machine (e.g., via Docker), use http://host.docker.internal:5678 as the N8N_API_URL.
|
||||
|
||||
> **Note**: The n8n API credentials are optional. Without them, you'll have access to all documentation and validation tools. With them, you'll additionally get workflow management capabilities (create, update, execute workflows).
|
||||
|
||||
### 🏠 Local n8n Instance Configuration
|
||||
|
||||
If you're running n8n locally (e.g., `http://localhost:5678` or Docker), you need to allow localhost webhooks:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"n8n-mcp": {
|
||||
"command": "docker",
|
||||
"args": [
|
||||
"run", "-i", "--rm", "--init",
|
||||
"-e", "MCP_MODE=stdio",
|
||||
"-e", "LOG_LEVEL=error",
|
||||
"-e", "DISABLE_CONSOLE_OUTPUT=true",
|
||||
"-e", "N8N_API_URL=http://host.docker.internal:5678",
|
||||
"-e", "N8N_API_KEY=your-api-key",
|
||||
"-e", "WEBHOOK_SECURITY_MODE=moderate",
|
||||
"ghcr.io/czlonkowski/n8n-mcp:latest"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
> ⚠️ **Important:** Set `WEBHOOK_SECURITY_MODE=moderate` to allow webhooks to your local n8n instance. This is safe for local development while still blocking private networks and cloud metadata.
|
||||
|
||||
**Important:** The `-i` flag is required for MCP stdio communication.
|
||||
|
||||
> 🔧 If you encounter any issues with Docker, check our [Docker Troubleshooting Guide](./docs/DOCKER_TROUBLESHOOTING.md).
|
||||
@@ -258,6 +284,86 @@ environment:
|
||||
N8N_MCP_TELEMETRY_DISABLED: "true"
|
||||
```
|
||||
|
||||
## ⚙️ Database & Memory Configuration
|
||||
|
||||
### Database Adapters
|
||||
|
||||
n8n-mcp uses SQLite for storing node documentation. Two adapters are available:
|
||||
|
||||
1. **better-sqlite3** (Default in Docker)
|
||||
- Native C++ bindings for best performance
|
||||
- Direct disk writes (no memory overhead)
|
||||
- **Now enabled by default** in Docker images (v2.20.2+)
|
||||
- Memory usage: ~100-120 MB stable
|
||||
|
||||
2. **sql.js** (Fallback)
|
||||
- Pure JavaScript implementation
|
||||
- In-memory database with periodic saves
|
||||
- Used when better-sqlite3 compilation fails
|
||||
- Memory usage: ~150-200 MB stable
|
||||
|
||||
### Memory Optimization (sql.js)
|
||||
|
||||
If using sql.js fallback, you can configure the save interval to balance between data safety and memory efficiency:
|
||||
|
||||
**Environment Variable:**
|
||||
```bash
|
||||
SQLJS_SAVE_INTERVAL_MS=5000 # Default: 5000ms (5 seconds)
|
||||
```
|
||||
|
||||
**Usage:**
|
||||
- Controls how long to wait after database changes before saving to disk
|
||||
- Lower values = more frequent saves = higher memory churn
|
||||
- Higher values = less frequent saves = lower memory usage
|
||||
- Minimum: 100ms
|
||||
- Recommended: 5000-10000ms for production
|
||||
|
||||
**Docker Configuration:**
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"n8n-mcp": {
|
||||
"command": "docker",
|
||||
"args": [
|
||||
"run",
|
||||
"-i",
|
||||
"--rm",
|
||||
"--init",
|
||||
"-e", "SQLJS_SAVE_INTERVAL_MS=10000",
|
||||
"ghcr.io/czlonkowski/n8n-mcp:latest"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**docker-compose:**
|
||||
```yaml
|
||||
environment:
|
||||
SQLJS_SAVE_INTERVAL_MS: "10000"
|
||||
```
|
||||
|
||||
### Memory Leak Fix (v2.20.2)
|
||||
|
||||
**Issue #330** identified a critical memory leak in long-running Docker/Kubernetes deployments:
|
||||
- **Before:** 100 MB → 2.2 GB over 72 hours (OOM kills)
|
||||
- **After:** Stable at 100-200 MB indefinitely
|
||||
|
||||
**Fixes Applied:**
|
||||
- ✅ Docker images now use better-sqlite3 by default (eliminates leak entirely)
|
||||
- ✅ sql.js fallback optimized (98% reduction in save frequency)
|
||||
- ✅ Removed unnecessary memory allocations (50% reduction per save)
|
||||
- ✅ Configurable save interval via `SQLJS_SAVE_INTERVAL_MS`
|
||||
|
||||
For Kubernetes deployments with memory limits:
|
||||
```yaml
|
||||
resources:
|
||||
requests:
|
||||
memory: 256Mi
|
||||
limits:
|
||||
memory: 512Mi
|
||||
```
|
||||
|
||||
## 💖 Support This Project
|
||||
|
||||
<div align="center">
|
||||
@@ -395,6 +501,14 @@ Complete guide for integrating n8n-MCP with Windsurf using project rules.
|
||||
### [Codex](./docs/CODEX_SETUP.md)
|
||||
Complete guide for integrating n8n-MCP with Codex.
|
||||
|
||||
## 🎓 Add Claude Skills (Optional)
|
||||
|
||||
Supercharge your n8n workflow building with specialized skills that teach AI how to build production-ready workflows!
|
||||
|
||||
[](https://www.youtube.com/watch?v=e6VvRqmUY2Y)
|
||||
|
||||
Learn more: [n8n-skills repository](https://github.com/czlonkowski/n8n-skills)
|
||||
|
||||
## 🤖 Claude Project Setup
|
||||
|
||||
For the best results when using n8n-MCP with Claude Projects, use these enhanced system instructions:
|
||||
@@ -560,6 +674,97 @@ n8n_update_partial_workflow({id: "wf-123", operations: [{...}]})
|
||||
n8n_update_partial_workflow({id: "wf-123", operations: [{...}]})
|
||||
```
|
||||
|
||||
### CRITICAL: addConnection Syntax
|
||||
|
||||
The `addConnection` operation requires **four separate string parameters**. Common mistakes cause misleading errors.
|
||||
|
||||
❌ WRONG - Object format (fails with "Expected string, received object"):
|
||||
```json
|
||||
{
|
||||
"type": "addConnection",
|
||||
"connection": {
|
||||
"source": {"nodeId": "node-1", "outputIndex": 0},
|
||||
"destination": {"nodeId": "node-2", "inputIndex": 0}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
❌ WRONG - Combined string (fails with "Source node not found"):
|
||||
```json
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "node-1:main:0",
|
||||
"target": "node-2:main:0"
|
||||
}
|
||||
```
|
||||
|
||||
✅ CORRECT - Four separate string parameters:
|
||||
```json
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "node-id-string",
|
||||
"target": "target-node-id-string",
|
||||
"sourcePort": "main",
|
||||
"targetPort": "main"
|
||||
}
|
||||
```
|
||||
|
||||
**Reference**: [GitHub Issue #327](https://github.com/czlonkowski/n8n-mcp/issues/327)
|
||||
|
||||
### ⚠️ CRITICAL: IF Node Multi-Output Routing
|
||||
|
||||
IF nodes have **two outputs** (TRUE and FALSE). Use the **`branch` parameter** to route to the correct output:
|
||||
|
||||
✅ CORRECT - Route to TRUE branch (when condition is met):
|
||||
```json
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "if-node-id",
|
||||
"target": "success-handler-id",
|
||||
"sourcePort": "main",
|
||||
"targetPort": "main",
|
||||
"branch": "true"
|
||||
}
|
||||
```
|
||||
|
||||
✅ CORRECT - Route to FALSE branch (when condition is NOT met):
|
||||
```json
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "if-node-id",
|
||||
"target": "failure-handler-id",
|
||||
"sourcePort": "main",
|
||||
"targetPort": "main",
|
||||
"branch": "false"
|
||||
}
|
||||
```
|
||||
|
||||
**Common Pattern** - Complete IF node routing:
|
||||
```json
|
||||
n8n_update_partial_workflow({
|
||||
id: "workflow-id",
|
||||
operations: [
|
||||
{type: "addConnection", source: "If Node", target: "True Handler", sourcePort: "main", targetPort: "main", branch: "true"},
|
||||
{type: "addConnection", source: "If Node", target: "False Handler", sourcePort: "main", targetPort: "main", branch: "false"}
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
**Note**: Without the `branch` parameter, both connections may end up on the same output, causing logic errors!
|
||||
|
||||
### removeConnection Syntax
|
||||
|
||||
Use the same four-parameter format:
|
||||
```json
|
||||
{
|
||||
"type": "removeConnection",
|
||||
"source": "source-node-id",
|
||||
"target": "target-node-id",
|
||||
"sourcePort": "main",
|
||||
"targetPort": "main"
|
||||
}
|
||||
```
|
||||
|
||||
## Example Workflow
|
||||
|
||||
### Template-First Approach
|
||||
@@ -652,6 +857,32 @@ n8n_update_partial_workflow({
|
||||
- **Avoid when possible** - Prefer standard nodes
|
||||
- **Only when necessary** - Use code node as last resort
|
||||
- **AI tool capability** - ANY node can be an AI tool (not just marked ones)
|
||||
|
||||
### Most Popular n8n Nodes (for get_node_essentials):
|
||||
|
||||
1. **n8n-nodes-base.code** - JavaScript/Python scripting
|
||||
2. **n8n-nodes-base.httpRequest** - HTTP API calls
|
||||
3. **n8n-nodes-base.webhook** - Event-driven triggers
|
||||
4. **n8n-nodes-base.set** - Data transformation
|
||||
5. **n8n-nodes-base.if** - Conditional routing
|
||||
6. **n8n-nodes-base.manualTrigger** - Manual workflow execution
|
||||
7. **n8n-nodes-base.respondToWebhook** - Webhook responses
|
||||
8. **n8n-nodes-base.scheduleTrigger** - Time-based triggers
|
||||
9. **@n8n/n8n-nodes-langchain.agent** - AI agents
|
||||
10. **n8n-nodes-base.googleSheets** - Spreadsheet integration
|
||||
11. **n8n-nodes-base.merge** - Data merging
|
||||
12. **n8n-nodes-base.switch** - Multi-branch routing
|
||||
13. **n8n-nodes-base.telegram** - Telegram bot integration
|
||||
14. **@n8n/n8n-nodes-langchain.lmChatOpenAi** - OpenAI chat models
|
||||
15. **n8n-nodes-base.splitInBatches** - Batch processing
|
||||
16. **n8n-nodes-base.openAi** - OpenAI legacy node
|
||||
17. **n8n-nodes-base.gmail** - Email automation
|
||||
18. **n8n-nodes-base.function** - Custom functions
|
||||
19. **n8n-nodes-base.stickyNote** - Workflow documentation
|
||||
20. **n8n-nodes-base.executeWorkflowTrigger** - Sub-workflow calls
|
||||
|
||||
**Note:** LangChain nodes use the `@n8n/n8n-nodes-langchain.` prefix, core nodes use `n8n-nodes-base.`
|
||||
|
||||
````
|
||||
|
||||
Save these instructions in your Claude Project for optimal n8n workflow assistance with intelligent template discovery.
|
||||
@@ -673,6 +904,11 @@ This tool was created to benefit everyone in the n8n community without friction.
|
||||
- **📖 Essential Properties**: Get only the 10-20 properties that matter
|
||||
- **💡 Real-World Examples**: 2,646 pre-extracted configurations from popular templates
|
||||
- **✅ Config Validation**: Validate node configurations before deployment
|
||||
- **🤖 AI Workflow Validation**: Comprehensive validation for AI Agent workflows (NEW in v2.17.0!)
|
||||
- Missing language model detection
|
||||
- AI tool connection validation
|
||||
- Streaming mode constraints
|
||||
- Memory and output parser checks
|
||||
- **🔗 Dependency Analysis**: Understand property relationships and conditions
|
||||
- **🎯 Template Discovery**: 2,500+ workflow templates with smart filtering
|
||||
- **⚡ Fast Response**: Average query time ~12ms with optimized SQLite
|
||||
@@ -714,12 +950,18 @@ Once connected, Claude can use these powerful tools:
|
||||
- **`get_template`** - Get complete workflow JSON for import
|
||||
- **`get_templates_for_task`** - Curated templates for common automation tasks
|
||||
|
||||
### Advanced Tools
|
||||
- **`validate_node_operation`** - Validate node configurations (operation-aware, profiles support)
|
||||
- **`validate_node_minimal`** - Quick validation for just required fields
|
||||
- **`validate_workflow`** - Complete workflow validation including AI tool connections
|
||||
### Validation Tools
|
||||
- **`validate_workflow`** - Complete workflow validation including **AI Agent validation** (NEW in v2.17.0!)
|
||||
- Detects missing language model connections
|
||||
- Validates AI tool connections (no false warnings)
|
||||
- Enforces streaming mode constraints
|
||||
- Checks memory and output parser configurations
|
||||
- **`validate_workflow_connections`** - Check workflow structure and AI tool connections
|
||||
- **`validate_workflow_expressions`** - Validate n8n expressions including $fromAI()
|
||||
- **`validate_node_operation`** - Validate node configurations (operation-aware, profiles support)
|
||||
- **`validate_node_minimal`** - Quick validation for just required fields
|
||||
|
||||
### Advanced Tools
|
||||
- **`get_property_dependencies`** - Analyze property visibility conditions
|
||||
- **`get_node_documentation`** - Get parsed documentation from n8n-docs
|
||||
- **`get_database_statistics`** - View database metrics and coverage
|
||||
@@ -739,6 +981,7 @@ These powerful tools allow you to manage n8n workflows directly from Claude. The
|
||||
- **`n8n_list_workflows`** - List workflows with filtering and pagination
|
||||
- **`n8n_validate_workflow`** - Validate workflows already in n8n by ID (NEW in v2.6.3)
|
||||
- **`n8n_autofix_workflow`** - Automatically fix common workflow errors (NEW in v2.13.0!)
|
||||
- **`n8n_workflow_versions`** - Manage workflow version history and rollback (NEW in v2.22.0!)
|
||||
|
||||
#### Execution Management
|
||||
- **`n8n_trigger_webhook_workflow`** - Trigger workflows via webhook URL
|
||||
|
||||
BIN
data/nodes.db
BIN
data/nodes.db
Binary file not shown.
@@ -20,19 +20,19 @@ services:
|
||||
image: n8n-mcp:latest
|
||||
container_name: n8n-mcp
|
||||
ports:
|
||||
- "3000:3000"
|
||||
- "${PORT:-3000}:${PORT:-3000}"
|
||||
environment:
|
||||
- MCP_MODE=${MCP_MODE:-http}
|
||||
- AUTH_TOKEN=${AUTH_TOKEN}
|
||||
- NODE_ENV=${NODE_ENV:-production}
|
||||
- LOG_LEVEL=${LOG_LEVEL:-info}
|
||||
- PORT=3000
|
||||
- PORT=${PORT:-3000}
|
||||
volumes:
|
||||
# Mount data directory for persistence
|
||||
- ./data:/app/data
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
|
||||
test: ["CMD", "sh", "-c", "curl -f http://localhost:$${PORT:-3000}/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
@@ -37,11 +37,12 @@ services:
|
||||
container_name: n8n-mcp
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "${MCP_PORT:-3000}:3000"
|
||||
- "${MCP_PORT:-3000}:${MCP_PORT:-3000}"
|
||||
environment:
|
||||
- NODE_ENV=production
|
||||
- N8N_MODE=true
|
||||
- MCP_MODE=http
|
||||
- PORT=${MCP_PORT:-3000}
|
||||
- N8N_API_URL=http://n8n:5678
|
||||
- N8N_API_KEY=${N8N_API_KEY}
|
||||
- MCP_AUTH_TOKEN=${MCP_AUTH_TOKEN}
|
||||
@@ -56,7 +57,7 @@ services:
|
||||
n8n:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
|
||||
test: ["CMD", "sh", "-c", "curl -f http://localhost:$${MCP_PORT:-3000}/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
@@ -41,7 +41,7 @@ services:
|
||||
|
||||
# Port mapping
|
||||
ports:
|
||||
- "${PORT:-3000}:3000"
|
||||
- "${PORT:-3000}:${PORT:-3000}"
|
||||
|
||||
# Resource limits
|
||||
deploy:
|
||||
@@ -53,7 +53,7 @@ services:
|
||||
|
||||
# Health check
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://127.0.0.1:3000/health"]
|
||||
test: ["CMD", "sh", "-c", "curl -f http://127.0.0.1:$${PORT:-3000}/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
@@ -5,6 +5,56 @@ All notable changes to this project will be documented in this file.
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased] - Phase 0: Connection Operations Critical Fixes
|
||||
|
||||
### Fixed
|
||||
- **🐛 CRITICAL: Fixed `addConnection` sourceIndex handling (Issue #272, discovered in hands-on testing)**
|
||||
- Multi-output nodes (IF, Switch) now work correctly with sourceIndex parameter
|
||||
- Changed from `||` to `??` operator to properly handle explicit 0 values
|
||||
- Added defensive array validation before accessing indices
|
||||
- Improves rating from 3/10 to 8/10 for multi-output node scenarios
|
||||
- **Impact**: IF nodes, Switch nodes, and all conditional routing now reliable
|
||||
|
||||
- **🐛 CRITICAL: Added runtime validation for `updateConnection` (Issue #272, #204)**
|
||||
- Prevents server crashes when `updates` object is missing
|
||||
- Provides helpful error message with:
|
||||
- Clear explanation of what's wrong
|
||||
- Correct format example
|
||||
- Suggestion to use removeConnection + addConnection for rewiring
|
||||
- Validates `updates` is an object, not string or other type
|
||||
- **Impact**: No more cryptic "Cannot read properties of undefined" crashes
|
||||
|
||||
### Enhanced
|
||||
- **Error Messages**: `updateConnection` errors now include actionable guidance
|
||||
- Example format shown in error
|
||||
- Alternative approaches suggested (removeConnection + addConnection)
|
||||
- Clear explanation that updateConnection modifies properties, not targets
|
||||
|
||||
### Testing
|
||||
- Added 8 comprehensive tests for Phase 0 fixes
|
||||
- 2 tests for updateConnection validation (missing updates, invalid type)
|
||||
- 5 tests for sourceIndex handling (IF nodes, parallel execution, Switch nodes, explicit 0)
|
||||
- 1 test for complex multi-output routing scenarios
|
||||
- All 126 existing tests still passing
|
||||
|
||||
### Documentation
|
||||
- Updated tool documentation to clarify:
|
||||
- `addConnection` now properly handles sourceIndex (Phase 0 fix noted)
|
||||
- `updateConnection` REQUIRES 'updates' object (Phase 0 validation noted)
|
||||
- Added pitfalls about updateConnection limitations
|
||||
- Clarified that updateConnection modifies properties, NOT connection targets
|
||||
|
||||
### Developer Experience
|
||||
- More defensive programming throughout connection operations
|
||||
- Better use of nullish coalescing (??) vs. logical OR (||)
|
||||
- Clear inline comments explaining expected behavior
|
||||
- Improved type safety with runtime guards
|
||||
|
||||
### References
|
||||
- Comprehensive analysis: `docs/local/connection-operations-deep-dive-and-improvement-plan.md`
|
||||
- Based on hands-on testing with n8n-mcp-tester agent
|
||||
- Overall experience rating improved from 4.5/10 to estimated 6/10
|
||||
|
||||
## [2.14.4] - 2025-09-30
|
||||
|
||||
### Added
|
||||
|
||||
111
docs/CI_TEST_INFRASTRUCTURE.md
Normal file
111
docs/CI_TEST_INFRASTRUCTURE.md
Normal file
@@ -0,0 +1,111 @@
|
||||
# CI Test Infrastructure - Known Issues
|
||||
|
||||
## Integration Test Failures for External Contributor PRs
|
||||
|
||||
### Issue Summary
|
||||
|
||||
Integration tests fail for external contributor PRs with "No response from n8n server" errors, despite the code changes being correct. This is a **test infrastructure issue**, not a code quality issue.
|
||||
|
||||
### Root Cause
|
||||
|
||||
1. **GitHub Actions Security**: External contributor PRs don't get access to repository secrets (`N8N_API_URL`, `N8N_API_KEY`, etc.)
|
||||
2. **MSW Mock Server**: Mock Service Worker (MSW) is not properly intercepting HTTP requests in the CI environment
|
||||
3. **Test Configuration**: Integration tests expect `http://localhost:3001/mock-api` but the mock server isn't responding
|
||||
|
||||
### Evidence
|
||||
|
||||
From CI logs (PR #343):
|
||||
```
|
||||
[CI-DEBUG] Global setup complete, N8N_API_URL: http://localhost:3001/mock-api
|
||||
❌ No response from n8n server (repeated 60+ times across 20 tests)
|
||||
```
|
||||
|
||||
The tests ARE using the correct mock URL, but MSW isn't intercepting the requests.
|
||||
|
||||
### Why This Happens
|
||||
|
||||
**For External PRs:**
|
||||
- GitHub Actions doesn't expose repository secrets for security reasons
|
||||
- Prevents malicious PRs from exfiltrating secrets
|
||||
- MSW setup runs but requests don't get intercepted in CI
|
||||
|
||||
**Test Configuration:**
|
||||
- `.env.test` line 19: `N8N_API_URL=http://localhost:3001/mock-api`
|
||||
- `.env.test` line 67: `MSW_ENABLED=true`
|
||||
- CI workflow line 75-80: Secrets set but empty for external PRs
|
||||
|
||||
### Impact
|
||||
|
||||
- ✅ **Code Quality**: NOT affected - the actual code changes are correct
|
||||
- ✅ **Local Testing**: Works fine - MSW intercepts requests locally
|
||||
- ❌ **CI for External PRs**: Integration tests fail (infrastructure issue)
|
||||
- ✅ **CI for Internal PRs**: Works fine (has access to secrets)
|
||||
|
||||
### Current Workarounds
|
||||
|
||||
1. **For Maintainers**: Use `--admin` flag to merge despite failing tests when code is verified correct
|
||||
2. **For Contributors**: Run tests locally where MSW works properly
|
||||
3. **For CI**: Unit tests pass (don't require n8n API), integration tests fail
|
||||
|
||||
### Files Affected
|
||||
|
||||
- `tests/integration/setup/integration-setup.ts` - MSW server setup
|
||||
- `tests/setup/msw-setup.ts` - MSW configuration
|
||||
- `tests/mocks/n8n-api/handlers.ts` - Mock request handlers
|
||||
- `.github/workflows/test.yml` - CI configuration
|
||||
- `.env.test` - Test environment configuration
|
||||
|
||||
### Potential Solutions (Not Implemented)
|
||||
|
||||
1. **Separate Unit/Integration Runs**
|
||||
- Run integration tests only for internal PRs
|
||||
- Skip integration tests for external PRs
|
||||
- Rely on unit tests for external PR validation
|
||||
|
||||
2. **MSW CI Debugging**
|
||||
- Add extensive logging to MSW setup
|
||||
- Check if MSW server actually starts in CI
|
||||
- Verify request interception is working
|
||||
|
||||
3. **Mock Server Process**
|
||||
- Start actual HTTP server in CI instead of MSW
|
||||
- More reliable but adds complexity
|
||||
- Would require test infrastructure refactoring
|
||||
|
||||
4. **Public Test Instance**
|
||||
- Use publicly accessible test n8n instance
|
||||
- Exposes test data, security concerns
|
||||
- Would work for external PRs
|
||||
|
||||
### Decision
|
||||
|
||||
**Status**: Documented but not fixed
|
||||
|
||||
**Rationale**:
|
||||
- Integration test infrastructure refactoring is separate concern from code quality
|
||||
- External PRs are relatively rare compared to internal development
|
||||
- Unit tests provide sufficient coverage for most changes
|
||||
- Maintainers can verify integration tests locally before merging
|
||||
|
||||
### Testing Strategy
|
||||
|
||||
**For External Contributor PRs:**
|
||||
1. ✅ Unit tests must pass
|
||||
2. ✅ TypeScript compilation must pass
|
||||
3. ✅ Build must succeed
|
||||
4. ⚠️ Integration test failures are expected (infrastructure issue)
|
||||
5. ✅ Maintainer verifies locally before merge
|
||||
|
||||
**For Internal PRs:**
|
||||
1. ✅ All tests must pass (unit + integration)
|
||||
2. ✅ Full CI validation
|
||||
|
||||
### References
|
||||
|
||||
- PR #343: First occurrence of this issue
|
||||
- PR #345: Documented the infrastructure issue
|
||||
- Issue: External PRs don't get secrets (GitHub Actions security)
|
||||
|
||||
### Last Updated
|
||||
|
||||
2025-10-21 - Documented as part of PR #345 investigation
|
||||
@@ -80,6 +80,53 @@ Remove the server:
|
||||
claude mcp remove n8n-mcp
|
||||
```
|
||||
|
||||
## 🎓 Add Claude Skills (Optional)
|
||||
|
||||
Supercharge your n8n workflow building with specialized Claude Code skills! The [n8n-skills](https://github.com/czlonkowski/n8n-skills) repository provides 7 complementary skills that teach AI assistants how to build production-ready n8n workflows.
|
||||
|
||||
### What You Get
|
||||
|
||||
- ✅ **n8n Expression Syntax** - Correct {{}} patterns and common mistakes
|
||||
- ✅ **n8n MCP Tools Expert** - How to use n8n-mcp tools effectively
|
||||
- ✅ **n8n Workflow Patterns** - 5 proven architectural patterns
|
||||
- ✅ **n8n Validation Expert** - Interpret and fix validation errors
|
||||
- ✅ **n8n Node Configuration** - Operation-aware setup guidance
|
||||
- ✅ **n8n Code JavaScript** - Write effective JavaScript in Code nodes
|
||||
- ✅ **n8n Code Python** - Python patterns with limitation awareness
|
||||
|
||||
### Installation
|
||||
|
||||
**Method 1: Plugin Installation** (Recommended)
|
||||
```bash
|
||||
/plugin install czlonkowski/n8n-skills
|
||||
```
|
||||
|
||||
**Method 2: Via Marketplace**
|
||||
```bash
|
||||
# Add as marketplace, then browse and install
|
||||
/plugin marketplace add czlonkowski/n8n-skills
|
||||
|
||||
# Then browse available plugins
|
||||
/plugin install
|
||||
# Select "n8n-mcp-skills" from the list
|
||||
```
|
||||
|
||||
**Method 3: Manual Installation**
|
||||
```bash
|
||||
# 1. Clone the repository
|
||||
git clone https://github.com/czlonkowski/n8n-skills.git
|
||||
|
||||
# 2. Copy skills to your Claude Code skills directory
|
||||
cp -r n8n-skills/skills/* ~/.claude/skills/
|
||||
|
||||
# 3. Reload Claude Code
|
||||
# Skills will activate automatically
|
||||
```
|
||||
|
||||
For complete installation instructions, configuration options, and usage examples, see the [n8n-skills README](https://github.com/czlonkowski/n8n-skills#-installation).
|
||||
|
||||
Skills work seamlessly with n8n-mcp to provide expert guidance throughout the workflow building process!
|
||||
|
||||
## Project Instructions
|
||||
|
||||
For optimal results, create a `CLAUDE.md` file in your project root with the instructions from the [main README's Claude Project Setup section](../README.md#-claude-project-setup).
|
||||
|
||||
@@ -65,6 +65,9 @@ docker run -d \
|
||||
| `NODE_ENV` | Environment: `development` or `production` | `production` | No |
|
||||
| `LOG_LEVEL` | Logging level: `debug`, `info`, `warn`, `error` | `info` | No |
|
||||
| `NODE_DB_PATH` | Custom database path (v2.7.16+) | `/app/data/nodes.db` | No |
|
||||
| `AUTH_RATE_LIMIT_WINDOW` | Rate limit window in ms (v2.16.3+) | `900000` (15 min) | No |
|
||||
| `AUTH_RATE_LIMIT_MAX` | Max auth attempts per window (v2.16.3+) | `20` | No |
|
||||
| `WEBHOOK_SECURITY_MODE` | SSRF protection: `strict`/`moderate`/`permissive` (v2.16.3+) | `strict` | No |
|
||||
|
||||
*Either `AUTH_TOKEN` or `AUTH_TOKEN_FILE` must be set for HTTP mode. If both are set, `AUTH_TOKEN` takes precedence.
|
||||
|
||||
@@ -283,7 +286,36 @@ docker ps --format "table {{.Names}}\t{{.Status}}"
|
||||
docker inspect n8n-mcp | jq '.[0].State.Health'
|
||||
```
|
||||
|
||||
## 🔒 Security Considerations
|
||||
## 🔒 Security Features (v2.16.3+)
|
||||
|
||||
### Rate Limiting
|
||||
|
||||
Protects against brute force authentication attacks:
|
||||
|
||||
```bash
|
||||
# Configure in .env or docker-compose.yml
|
||||
AUTH_RATE_LIMIT_WINDOW=900000 # 15 minutes in milliseconds
|
||||
AUTH_RATE_LIMIT_MAX=20 # 20 attempts per IP per window
|
||||
```
|
||||
|
||||
### SSRF Protection
|
||||
|
||||
Prevents Server-Side Request Forgery when using webhook triggers:
|
||||
|
||||
```bash
|
||||
# For production (blocks localhost + private IPs + cloud metadata)
|
||||
WEBHOOK_SECURITY_MODE=strict
|
||||
|
||||
# For local development with local n8n instance
|
||||
WEBHOOK_SECURITY_MODE=moderate
|
||||
|
||||
# For internal testing only (allows private IPs)
|
||||
WEBHOOK_SECURITY_MODE=permissive
|
||||
```
|
||||
|
||||
**Note:** Cloud metadata endpoints (169.254.169.254, metadata.google.internal, etc.) are ALWAYS blocked in all modes.
|
||||
|
||||
## 🔒 Authentication
|
||||
|
||||
### Authentication
|
||||
|
||||
|
||||
@@ -196,6 +196,41 @@ docker ps -a | grep n8n-mcp | grep Exited | awk '{print $1}' | xargs -r docker r
|
||||
- Manually clean up containers periodically
|
||||
- Consider using HTTP mode instead
|
||||
|
||||
### Webhooks to Local n8n Fail (v2.16.3+)
|
||||
|
||||
**Symptoms:**
|
||||
- `n8n_trigger_webhook_workflow` fails with "SSRF protection" error
|
||||
- Error message: "SSRF protection: Localhost access is blocked"
|
||||
- Webhooks work from n8n UI but not from n8n-MCP
|
||||
|
||||
**Root Cause:** Default strict SSRF protection blocks localhost access to prevent attacks.
|
||||
|
||||
**Solution:** Use moderate security mode for local development
|
||||
|
||||
```bash
|
||||
# For Docker run
|
||||
docker run -d \
|
||||
--name n8n-mcp \
|
||||
-e MCP_MODE=http \
|
||||
-e AUTH_TOKEN=your-token \
|
||||
-e WEBHOOK_SECURITY_MODE=moderate \
|
||||
-p 3000:3000 \
|
||||
ghcr.io/czlonkowski/n8n-mcp:latest
|
||||
|
||||
# For Docker Compose - add to environment:
|
||||
services:
|
||||
n8n-mcp:
|
||||
environment:
|
||||
WEBHOOK_SECURITY_MODE: moderate
|
||||
```
|
||||
|
||||
**Security Modes Explained:**
|
||||
- `strict` (default): Blocks localhost + private IPs + cloud metadata (production)
|
||||
- `moderate`: Allows localhost, blocks private IPs + cloud metadata (local development)
|
||||
- `permissive`: Allows localhost + private IPs, blocks cloud metadata (testing only)
|
||||
|
||||
**Important:** Always use `strict` mode in production. Cloud metadata is blocked in all modes.
|
||||
|
||||
### n8n API Connection Issues
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
3491
docs/FINAL_AI_VALIDATION_SPEC.md
Normal file
3491
docs/FINAL_AI_VALIDATION_SPEC.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -73,6 +73,13 @@ PORT=3000
|
||||
# Optional: Enable n8n management tools
|
||||
# N8N_API_URL=https://your-n8n-instance.com
|
||||
# N8N_API_KEY=your-api-key-here
|
||||
# Security Configuration (v2.16.3+)
|
||||
# Rate limiting (default: 20 attempts per 15 minutes)
|
||||
AUTH_RATE_LIMIT_WINDOW=900000
|
||||
AUTH_RATE_LIMIT_MAX=20
|
||||
# SSRF protection mode (default: strict)
|
||||
# Use 'moderate' for local n8n, 'strict' for production
|
||||
WEBHOOK_SECURITY_MODE=strict
|
||||
EOF
|
||||
|
||||
# 2. Deploy with Docker
|
||||
@@ -592,6 +599,67 @@ curl -H "Authorization: Bearer $AUTH_TOKEN" \
|
||||
}
|
||||
```
|
||||
|
||||
## 🔒 Security Features (v2.16.3+)
|
||||
|
||||
### Rate Limiting
|
||||
|
||||
Built-in rate limiting protects authentication endpoints from brute force attacks:
|
||||
|
||||
**Configuration:**
|
||||
```bash
|
||||
# Defaults (15 minutes window, 20 attempts per IP)
|
||||
AUTH_RATE_LIMIT_WINDOW=900000 # milliseconds
|
||||
AUTH_RATE_LIMIT_MAX=20
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- Per-IP rate limiting with configurable window and max attempts
|
||||
- Standard rate limit headers (RateLimit-Limit, RateLimit-Remaining, RateLimit-Reset)
|
||||
- JSON-RPC formatted error responses
|
||||
- Automatic IP tracking behind reverse proxies (requires TRUST_PROXY=1)
|
||||
|
||||
**Behavior:**
|
||||
- First 20 attempts: Return 401 Unauthorized for invalid credentials
|
||||
- Attempts 21+: Return 429 Too Many Requests with Retry-After header
|
||||
- Counter resets after 15 minutes (configurable)
|
||||
|
||||
### SSRF Protection
|
||||
|
||||
Prevents Server-Side Request Forgery attacks when using webhook triggers:
|
||||
|
||||
**Three Security Modes:**
|
||||
|
||||
1. **Strict Mode (default)** - Production deployments
|
||||
```bash
|
||||
WEBHOOK_SECURITY_MODE=strict
|
||||
```
|
||||
- ✅ Block localhost (127.0.0.1, ::1)
|
||||
- ✅ Block private IPs (10.x, 192.168.x, 172.16-31.x)
|
||||
- ✅ Block cloud metadata (169.254.169.254, metadata.google.internal)
|
||||
- ✅ DNS rebinding prevention
|
||||
- 🎯 **Use for**: Cloud deployments, production environments
|
||||
|
||||
2. **Moderate Mode** - Local development with local n8n
|
||||
```bash
|
||||
WEBHOOK_SECURITY_MODE=moderate
|
||||
```
|
||||
- ✅ Allow localhost (for local n8n instances)
|
||||
- ✅ Block private IPs
|
||||
- ✅ Block cloud metadata
|
||||
- ✅ DNS rebinding prevention
|
||||
- 🎯 **Use for**: Development with n8n on localhost:5678
|
||||
|
||||
3. **Permissive Mode** - Internal networks only
|
||||
```bash
|
||||
WEBHOOK_SECURITY_MODE=permissive
|
||||
```
|
||||
- ✅ Allow localhost and private IPs
|
||||
- ✅ Block cloud metadata (always blocked)
|
||||
- ✅ DNS rebinding prevention
|
||||
- 🎯 **Use for**: Internal testing (NOT for production)
|
||||
|
||||
**Important:** Cloud metadata endpoints are ALWAYS blocked in all modes for security.
|
||||
|
||||
## 🔒 Security Best Practices
|
||||
|
||||
### 1. Token Management
|
||||
|
||||
@@ -59,10 +59,10 @@ docker compose up -d
|
||||
- n8n-mcp-data:/app/data
|
||||
|
||||
ports:
|
||||
- "${PORT:-3000}:3000"
|
||||
|
||||
- "${PORT:-3000}:${PORT:-3000}"
|
||||
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://127.0.0.1:3000/health"]
|
||||
test: ["CMD", "sh", "-c", "curl -f http://127.0.0.1:$${PORT:-3000}/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
724
docs/LIBRARY_USAGE.md
Normal file
724
docs/LIBRARY_USAGE.md
Normal file
@@ -0,0 +1,724 @@
|
||||
# Library Usage Guide - Multi-Tenant / Hosted Deployments
|
||||
|
||||
This guide covers using n8n-mcp as a library dependency for building multi-tenant hosted services.
|
||||
|
||||
## Overview
|
||||
|
||||
n8n-mcp can be used as a Node.js library to build multi-tenant backends that provide MCP services to multiple users or instances. The package exports all necessary components for integration into your existing services.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
npm install n8n-mcp
|
||||
```
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### Library Mode vs CLI Mode
|
||||
|
||||
- **CLI Mode** (default): Single-player usage via `npx n8n-mcp` or Docker
|
||||
- **Library Mode**: Multi-tenant usage by importing and using the `N8NMCPEngine` class
|
||||
|
||||
### Instance Context
|
||||
|
||||
The `InstanceContext` type allows you to pass per-request configuration to the MCP engine:
|
||||
|
||||
```typescript
|
||||
interface InstanceContext {
|
||||
// Instance-specific n8n API configuration
|
||||
n8nApiUrl?: string;
|
||||
n8nApiKey?: string;
|
||||
n8nApiTimeout?: number;
|
||||
n8nApiMaxRetries?: number;
|
||||
|
||||
// Instance identification
|
||||
instanceId?: string;
|
||||
sessionId?: string;
|
||||
|
||||
// Extensible metadata
|
||||
metadata?: Record<string, any>;
|
||||
}
|
||||
```
|
||||
|
||||
## Basic Example
|
||||
|
||||
```typescript
|
||||
import express from 'express';
|
||||
import { N8NMCPEngine } from 'n8n-mcp';
|
||||
|
||||
const app = express();
|
||||
const mcpEngine = new N8NMCPEngine({
|
||||
sessionTimeout: 3600000, // 1 hour
|
||||
logLevel: 'info'
|
||||
});
|
||||
|
||||
// Handle MCP requests with per-user context
|
||||
app.post('/mcp', async (req, res) => {
|
||||
const instanceContext = {
|
||||
n8nApiUrl: req.user.n8nUrl,
|
||||
n8nApiKey: req.user.n8nApiKey,
|
||||
instanceId: req.user.id
|
||||
};
|
||||
|
||||
await mcpEngine.processRequest(req, res, instanceContext);
|
||||
});
|
||||
|
||||
app.listen(3000);
|
||||
```
|
||||
|
||||
## Multi-Tenant Backend Example
|
||||
|
||||
This example shows a complete multi-tenant implementation with user authentication and instance management:
|
||||
|
||||
```typescript
|
||||
import express from 'express';
|
||||
import { N8NMCPEngine, InstanceContext, validateInstanceContext } from 'n8n-mcp';
|
||||
|
||||
const app = express();
|
||||
const mcpEngine = new N8NMCPEngine({
|
||||
sessionTimeout: 3600000, // 1 hour
|
||||
logLevel: 'info'
|
||||
});
|
||||
|
||||
// Start MCP engine
|
||||
await mcpEngine.start();
|
||||
|
||||
// Authentication middleware
|
||||
const authenticate = async (req, res, next) => {
|
||||
const token = req.headers.authorization?.replace('Bearer ', '');
|
||||
if (!token) {
|
||||
return res.status(401).json({ error: 'Unauthorized' });
|
||||
}
|
||||
|
||||
// Verify token and attach user to request
|
||||
req.user = await getUserFromToken(token);
|
||||
next();
|
||||
};
|
||||
|
||||
// Get instance configuration from database
|
||||
const getInstanceConfig = async (instanceId: string, userId: string) => {
|
||||
// Your database logic here
|
||||
const instance = await db.instances.findOne({
|
||||
where: { id: instanceId, userId }
|
||||
});
|
||||
|
||||
if (!instance) {
|
||||
throw new Error('Instance not found');
|
||||
}
|
||||
|
||||
return {
|
||||
n8nApiUrl: instance.n8nUrl,
|
||||
n8nApiKey: await decryptApiKey(instance.encryptedApiKey),
|
||||
instanceId: instance.id
|
||||
};
|
||||
};
|
||||
|
||||
// MCP endpoint with per-instance context
|
||||
app.post('/api/instances/:instanceId/mcp', authenticate, async (req, res) => {
|
||||
try {
|
||||
// Get instance configuration
|
||||
const instance = await getInstanceConfig(req.params.instanceId, req.user.id);
|
||||
|
||||
// Create instance context
|
||||
const context: InstanceContext = {
|
||||
n8nApiUrl: instance.n8nApiUrl,
|
||||
n8nApiKey: instance.n8nApiKey,
|
||||
instanceId: instance.instanceId,
|
||||
metadata: {
|
||||
userId: req.user.id,
|
||||
userAgent: req.headers['user-agent'],
|
||||
ip: req.ip
|
||||
}
|
||||
};
|
||||
|
||||
// Validate context before processing
|
||||
const validation = validateInstanceContext(context);
|
||||
if (!validation.valid) {
|
||||
return res.status(400).json({
|
||||
error: 'Invalid instance configuration',
|
||||
details: validation.errors
|
||||
});
|
||||
}
|
||||
|
||||
// Process request with instance context
|
||||
await mcpEngine.processRequest(req, res, context);
|
||||
|
||||
} catch (error) {
|
||||
console.error('MCP request error:', error);
|
||||
res.status(500).json({ error: 'Internal server error' });
|
||||
}
|
||||
});
|
||||
|
||||
// Health endpoint
|
||||
app.get('/health', async (req, res) => {
|
||||
const health = await mcpEngine.healthCheck();
|
||||
res.status(health.status === 'healthy' ? 200 : 503).json(health);
|
||||
});
|
||||
|
||||
// Graceful shutdown
|
||||
process.on('SIGTERM', async () => {
|
||||
await mcpEngine.shutdown();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
app.listen(3000);
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### N8NMCPEngine
|
||||
|
||||
#### Constructor
|
||||
|
||||
```typescript
|
||||
new N8NMCPEngine(options?: {
|
||||
sessionTimeout?: number; // Session TTL in ms (default: 1800000 = 30min)
|
||||
logLevel?: 'error' | 'warn' | 'info' | 'debug'; // Default: 'info'
|
||||
})
|
||||
```
|
||||
|
||||
#### Methods
|
||||
|
||||
##### `async processRequest(req, res, context?)`
|
||||
|
||||
Process a single MCP request with optional instance context.
|
||||
|
||||
**Parameters:**
|
||||
- `req`: Express request object
|
||||
- `res`: Express response object
|
||||
- `context` (optional): InstanceContext with per-instance configuration
|
||||
|
||||
**Example:**
|
||||
```typescript
|
||||
const context: InstanceContext = {
|
||||
n8nApiUrl: 'https://instance1.n8n.cloud',
|
||||
n8nApiKey: 'instance1-key',
|
||||
instanceId: 'tenant-123'
|
||||
};
|
||||
|
||||
await engine.processRequest(req, res, context);
|
||||
```
|
||||
|
||||
##### `async healthCheck()`
|
||||
|
||||
Get engine health status for monitoring.
|
||||
|
||||
**Returns:** `EngineHealth`
|
||||
```typescript
|
||||
{
|
||||
status: 'healthy' | 'unhealthy';
|
||||
uptime: number; // seconds
|
||||
sessionActive: boolean;
|
||||
memoryUsage: {
|
||||
used: number;
|
||||
total: number;
|
||||
unit: string;
|
||||
};
|
||||
version: string;
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```typescript
|
||||
app.get('/health', async (req, res) => {
|
||||
const health = await engine.healthCheck();
|
||||
res.status(health.status === 'healthy' ? 200 : 503).json(health);
|
||||
});
|
||||
```
|
||||
|
||||
##### `getSessionInfo()`
|
||||
|
||||
Get current session information for debugging.
|
||||
|
||||
**Returns:**
|
||||
```typescript
|
||||
{
|
||||
active: boolean;
|
||||
sessionId?: string;
|
||||
age?: number; // milliseconds
|
||||
sessions?: {
|
||||
total: number;
|
||||
active: number;
|
||||
expired: number;
|
||||
max: number;
|
||||
sessionIds: string[];
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
##### `async start()`
|
||||
|
||||
Start the engine (for standalone mode). Not needed when using `processRequest()` directly.
|
||||
|
||||
##### `async shutdown()`
|
||||
|
||||
Graceful shutdown for service lifecycle management.
|
||||
|
||||
**Example:**
|
||||
```typescript
|
||||
process.on('SIGTERM', async () => {
|
||||
await engine.shutdown();
|
||||
process.exit(0);
|
||||
});
|
||||
```
|
||||
|
||||
### Types
|
||||
|
||||
#### InstanceContext
|
||||
|
||||
Configuration for a specific user instance:
|
||||
|
||||
```typescript
|
||||
interface InstanceContext {
|
||||
n8nApiUrl?: string;
|
||||
n8nApiKey?: string;
|
||||
n8nApiTimeout?: number;
|
||||
n8nApiMaxRetries?: number;
|
||||
instanceId?: string;
|
||||
sessionId?: string;
|
||||
metadata?: Record<string, any>;
|
||||
}
|
||||
```
|
||||
|
||||
#### Validation Functions
|
||||
|
||||
##### `validateInstanceContext(context: InstanceContext)`
|
||||
|
||||
Validate and sanitize instance context.
|
||||
|
||||
**Returns:**
|
||||
```typescript
|
||||
{
|
||||
valid: boolean;
|
||||
errors?: string[];
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```typescript
|
||||
import { validateInstanceContext } from 'n8n-mcp';
|
||||
|
||||
const validation = validateInstanceContext(context);
|
||||
if (!validation.valid) {
|
||||
console.error('Invalid context:', validation.errors);
|
||||
}
|
||||
```
|
||||
|
||||
##### `isInstanceContext(obj: any)`
|
||||
|
||||
Type guard to check if an object is a valid InstanceContext.
|
||||
|
||||
**Example:**
|
||||
```typescript
|
||||
import { isInstanceContext } from 'n8n-mcp';
|
||||
|
||||
if (isInstanceContext(req.body.context)) {
|
||||
// TypeScript knows this is InstanceContext
|
||||
await engine.processRequest(req, res, req.body.context);
|
||||
}
|
||||
```
|
||||
|
||||
## Session Management
|
||||
|
||||
### Session Strategies
|
||||
|
||||
The MCP engine supports flexible session ID formats:
|
||||
|
||||
- **UUIDv4**: Internal n8n-mcp format (default)
|
||||
- **Instance-prefixed**: `instance-{userId}-{hash}-{uuid}` for multi-tenant isolation
|
||||
- **Custom formats**: Any non-empty string for mcp-remote and other proxies
|
||||
|
||||
Session validation happens via transport lookup, not format validation. This ensures compatibility with all MCP clients.
|
||||
|
||||
### Multi-Tenant Configuration
|
||||
|
||||
Set these environment variables for multi-tenant mode:
|
||||
|
||||
```bash
|
||||
# Enable multi-tenant mode
|
||||
ENABLE_MULTI_TENANT=true
|
||||
|
||||
# Session strategy: "instance" (default) or "shared"
|
||||
MULTI_TENANT_SESSION_STRATEGY=instance
|
||||
```
|
||||
|
||||
**Session Strategies:**
|
||||
|
||||
- **instance** (recommended): Each tenant gets isolated sessions
|
||||
- Session ID: `instance-{instanceId}-{configHash}-{uuid}`
|
||||
- Better isolation and security
|
||||
- Easier debugging per tenant
|
||||
|
||||
- **shared**: Multiple tenants share sessions with context switching
|
||||
- More efficient for high tenant count
|
||||
- Requires careful context management
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### API Key Management
|
||||
|
||||
Always encrypt API keys server-side:
|
||||
|
||||
```typescript
|
||||
import { createCipheriv, createDecipheriv } from 'crypto';
|
||||
|
||||
// Encrypt before storing
|
||||
const encryptApiKey = (apiKey: string) => {
|
||||
const cipher = createCipheriv('aes-256-gcm', encryptionKey, iv);
|
||||
return cipher.update(apiKey, 'utf8', 'hex') + cipher.final('hex');
|
||||
};
|
||||
|
||||
// Decrypt before using
|
||||
const decryptApiKey = (encrypted: string) => {
|
||||
const decipher = createDecipheriv('aes-256-gcm', encryptionKey, iv);
|
||||
return decipher.update(encrypted, 'hex', 'utf8') + decipher.final('utf8');
|
||||
};
|
||||
|
||||
// Use decrypted key in context
|
||||
const context: InstanceContext = {
|
||||
n8nApiKey: await decryptApiKey(instance.encryptedApiKey),
|
||||
// ...
|
||||
};
|
||||
```
|
||||
|
||||
### Input Validation
|
||||
|
||||
Always validate instance context before processing:
|
||||
|
||||
```typescript
|
||||
import { validateInstanceContext } from 'n8n-mcp';
|
||||
|
||||
const validation = validateInstanceContext(context);
|
||||
if (!validation.valid) {
|
||||
throw new Error(`Invalid context: ${validation.errors?.join(', ')}`);
|
||||
}
|
||||
```
|
||||
|
||||
### Rate Limiting
|
||||
|
||||
Implement rate limiting per tenant:
|
||||
|
||||
```typescript
|
||||
import rateLimit from 'express-rate-limit';
|
||||
|
||||
const limiter = rateLimit({
|
||||
windowMs: 15 * 60 * 1000, // 15 minutes
|
||||
max: 100, // limit each IP to 100 requests per windowMs
|
||||
keyGenerator: (req) => req.user?.id || req.ip
|
||||
});
|
||||
|
||||
app.post('/api/instances/:instanceId/mcp', authenticate, limiter, async (req, res) => {
|
||||
// ...
|
||||
});
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
Always wrap MCP requests in try-catch blocks:
|
||||
|
||||
```typescript
|
||||
app.post('/api/instances/:instanceId/mcp', authenticate, async (req, res) => {
|
||||
try {
|
||||
const context = await getInstanceConfig(req.params.instanceId, req.user.id);
|
||||
await mcpEngine.processRequest(req, res, context);
|
||||
} catch (error) {
|
||||
console.error('MCP error:', error);
|
||||
|
||||
// Don't leak internal errors to clients
|
||||
if (error.message.includes('not found')) {
|
||||
return res.status(404).json({ error: 'Instance not found' });
|
||||
}
|
||||
|
||||
res.status(500).json({ error: 'Internal server error' });
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
## Monitoring
|
||||
|
||||
### Health Checks
|
||||
|
||||
Set up periodic health checks:
|
||||
|
||||
```typescript
|
||||
setInterval(async () => {
|
||||
const health = await mcpEngine.healthCheck();
|
||||
|
||||
if (health.status === 'unhealthy') {
|
||||
console.error('MCP engine unhealthy:', health);
|
||||
// Alert your monitoring system
|
||||
}
|
||||
|
||||
// Log metrics
|
||||
console.log('MCP engine metrics:', {
|
||||
uptime: health.uptime,
|
||||
memory: health.memoryUsage,
|
||||
sessionActive: health.sessionActive
|
||||
});
|
||||
}, 60000); // Every minute
|
||||
```
|
||||
|
||||
### Session Monitoring
|
||||
|
||||
Track active sessions:
|
||||
|
||||
```typescript
|
||||
app.get('/admin/sessions', authenticate, async (req, res) => {
|
||||
if (!req.user.isAdmin) {
|
||||
return res.status(403).json({ error: 'Forbidden' });
|
||||
}
|
||||
|
||||
const sessionInfo = mcpEngine.getSessionInfo();
|
||||
res.json(sessionInfo);
|
||||
});
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit Testing
|
||||
|
||||
```typescript
|
||||
import { N8NMCPEngine, InstanceContext } from 'n8n-mcp';
|
||||
|
||||
describe('MCP Engine', () => {
|
||||
let engine: N8NMCPEngine;
|
||||
|
||||
beforeEach(() => {
|
||||
engine = new N8NMCPEngine({ logLevel: 'error' });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await engine.shutdown();
|
||||
});
|
||||
|
||||
it('should process request with context', async () => {
|
||||
const context: InstanceContext = {
|
||||
n8nApiUrl: 'https://test.n8n.io',
|
||||
n8nApiKey: 'test-key',
|
||||
instanceId: 'test-instance'
|
||||
};
|
||||
|
||||
const mockReq = createMockRequest();
|
||||
const mockRes = createMockResponse();
|
||||
|
||||
await engine.processRequest(mockReq, mockRes, context);
|
||||
|
||||
expect(mockRes.status).toBe(200);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Integration Testing
|
||||
|
||||
```typescript
|
||||
import request from 'supertest';
|
||||
import { createApp } from './app';
|
||||
|
||||
describe('Multi-tenant MCP API', () => {
|
||||
let app;
|
||||
let authToken;
|
||||
|
||||
beforeAll(async () => {
|
||||
app = await createApp();
|
||||
authToken = await getTestAuthToken();
|
||||
});
|
||||
|
||||
it('should handle MCP request for instance', async () => {
|
||||
const response = await request(app)
|
||||
.post('/api/instances/test-instance/mcp')
|
||||
.set('Authorization', `Bearer ${authToken}`)
|
||||
.send({
|
||||
jsonrpc: '2.0',
|
||||
method: 'initialize',
|
||||
params: {
|
||||
protocolVersion: '2024-11-05',
|
||||
capabilities: {}
|
||||
},
|
||||
id: 1
|
||||
});
|
||||
|
||||
expect(response.status).toBe(200);
|
||||
expect(response.body.result).toBeDefined();
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Deployment Considerations
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
# Required for multi-tenant mode
|
||||
ENABLE_MULTI_TENANT=true
|
||||
MULTI_TENANT_SESSION_STRATEGY=instance
|
||||
|
||||
# Optional: Logging
|
||||
LOG_LEVEL=info
|
||||
DISABLE_CONSOLE_OUTPUT=false
|
||||
|
||||
# Optional: Session configuration
|
||||
SESSION_TIMEOUT=1800000 # 30 minutes in milliseconds
|
||||
MAX_SESSIONS=100
|
||||
|
||||
# Optional: Performance
|
||||
NODE_ENV=production
|
||||
```
|
||||
|
||||
### Docker Deployment
|
||||
|
||||
```dockerfile
|
||||
FROM node:20-alpine
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY package*.json ./
|
||||
RUN npm ci --only=production
|
||||
|
||||
COPY . .
|
||||
|
||||
ENV NODE_ENV=production
|
||||
ENV ENABLE_MULTI_TENANT=true
|
||||
ENV LOG_LEVEL=info
|
||||
|
||||
EXPOSE 3000
|
||||
|
||||
CMD ["node", "dist/server.js"]
|
||||
```
|
||||
|
||||
### Kubernetes Deployment
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: n8n-mcp-backend
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: n8n-mcp-backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: n8n-mcp-backend
|
||||
spec:
|
||||
containers:
|
||||
- name: backend
|
||||
image: your-registry/n8n-mcp-backend:latest
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
env:
|
||||
- name: ENABLE_MULTI_TENANT
|
||||
value: "true"
|
||||
- name: LOG_LEVEL
|
||||
value: "info"
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 3000
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 30
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 3000
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Complete Multi-Tenant SaaS Example
|
||||
|
||||
For a complete implementation example, see:
|
||||
- [n8n-mcp-backend](https://github.com/czlonkowski/n8n-mcp-backend) - Full hosted service implementation
|
||||
|
||||
### Migration from Single-Player
|
||||
|
||||
If you're migrating from single-player (CLI/Docker) to multi-tenant:
|
||||
|
||||
1. **Keep backward compatibility** - Use environment fallback:
|
||||
```typescript
|
||||
const context: InstanceContext = {
|
||||
n8nApiUrl: instanceUrl || process.env.N8N_API_URL,
|
||||
n8nApiKey: instanceKey || process.env.N8N_API_KEY,
|
||||
instanceId: instanceId || 'default'
|
||||
};
|
||||
```
|
||||
|
||||
2. **Gradual rollout** - Start with a feature flag:
|
||||
```typescript
|
||||
const isMultiTenant = process.env.ENABLE_MULTI_TENANT === 'true';
|
||||
|
||||
if (isMultiTenant) {
|
||||
const context = await getInstanceConfig(req.params.instanceId);
|
||||
await engine.processRequest(req, res, context);
|
||||
} else {
|
||||
// Legacy single-player mode
|
||||
await engine.processRequest(req, res);
|
||||
}
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### Module Resolution Errors
|
||||
|
||||
If you see `Cannot find module 'n8n-mcp'`:
|
||||
|
||||
```bash
|
||||
# Clear node_modules and reinstall
|
||||
rm -rf node_modules package-lock.json
|
||||
npm install
|
||||
|
||||
# Verify package has types field
|
||||
npm info n8n-mcp
|
||||
|
||||
# Check TypeScript can resolve it
|
||||
npx tsc --noEmit
|
||||
```
|
||||
|
||||
#### Session ID Validation Errors
|
||||
|
||||
If you see `Invalid session ID format` errors:
|
||||
|
||||
- Ensure you're using n8n-mcp v2.18.9 or later
|
||||
- Session IDs can be any non-empty string
|
||||
- No need to generate UUIDs - use your own format
|
||||
|
||||
#### Memory Leaks
|
||||
|
||||
If memory usage grows over time:
|
||||
|
||||
```typescript
|
||||
// Ensure proper cleanup
|
||||
process.on('SIGTERM', async () => {
|
||||
await engine.shutdown();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
// Monitor session count
|
||||
const sessionInfo = engine.getSessionInfo();
|
||||
console.log('Active sessions:', sessionInfo.sessions?.active);
|
||||
```
|
||||
|
||||
## Further Reading
|
||||
|
||||
- [MCP Protocol Specification](https://modelcontextprotocol.io/docs)
|
||||
- [n8n API Documentation](https://docs.n8n.io/api/)
|
||||
- [Express.js Guide](https://expressjs.com/en/guide/routing.html)
|
||||
- [n8n-mcp Main README](../README.md)
|
||||
|
||||
## Support
|
||||
|
||||
- **Issues**: [GitHub Issues](https://github.com/czlonkowski/n8n-mcp/issues)
|
||||
- **Discussions**: [GitHub Discussions](https://github.com/czlonkowski/n8n-mcp/discussions)
|
||||
- **Security**: For security issues, see [SECURITY.md](../SECURITY.md)
|
||||
@@ -1,62 +0,0 @@
|
||||
# PR #104 Test Suite Improvements Summary
|
||||
|
||||
## Overview
|
||||
Based on comprehensive review feedback from PR #104, we've significantly improved the test suite quality, organization, and coverage.
|
||||
|
||||
## Test Results
|
||||
- **Before:** 78 failing tests
|
||||
- **After:** 0 failing tests (1,356 passed, 19 skipped)
|
||||
- **Coverage:** 85.34% statements, 85.3% branches
|
||||
|
||||
## Key Improvements
|
||||
|
||||
### 1. Fixed All Test Failures
|
||||
- Fixed logger test spy issues by properly handling DEBUG environment variable
|
||||
- Fixed MSW configuration test by restoring environment variables
|
||||
- Fixed workflow validator tests by adding proper node connections
|
||||
- Fixed mock setup issues in edge case tests
|
||||
|
||||
### 2. Improved Test Organization
|
||||
- Split large config-validator.test.ts (1,075 lines) into 4 focused files:
|
||||
- config-validator-basic.test.ts
|
||||
- config-validator-node-specific.test.ts
|
||||
- config-validator-security.test.ts
|
||||
- config-validator-edge-cases.test.ts
|
||||
|
||||
### 3. Enhanced Test Coverage
|
||||
- Added comprehensive edge case tests for all major validators
|
||||
- Added null/undefined handling tests
|
||||
- Added boundary value tests
|
||||
- Added performance tests with CI-aware timeouts
|
||||
- Added security validation tests
|
||||
|
||||
### 4. Improved Test Quality
|
||||
- Fixed test naming conventions (100% compliance with "should X when Y" pattern)
|
||||
- Added JSDoc comments to test utilities and factories
|
||||
- Created comprehensive test documentation (tests/README.md)
|
||||
- Improved test isolation to prevent cross-test pollution
|
||||
|
||||
### 5. New Features
|
||||
- Implemented validateBatch method for ConfigValidator
|
||||
- Added test factories for better test data management
|
||||
- Created test utilities for common scenarios
|
||||
|
||||
## Files Modified
|
||||
- 7 existing test files fixed
|
||||
- 8 new test files created
|
||||
- 1 source file enhanced (ConfigValidator)
|
||||
- 4 debug files removed before commit
|
||||
|
||||
## Skipped Tests
|
||||
19 tests remain skipped with documented reasons:
|
||||
- FTS5 search sync test (database corruption in CI)
|
||||
- Template clearing (not implemented)
|
||||
- Mock API configuration tests
|
||||
- Duplicate edge case tests with mocking issues (working versions exist)
|
||||
|
||||
## Next Steps
|
||||
The only remaining task from the improvement plan is:
|
||||
- Add performance regression tests and boundaries (low priority, future sprint)
|
||||
|
||||
## Conclusion
|
||||
The test suite is now robust, well-organized, and provides excellent coverage. All critical issues have been resolved, and the codebase is ready for merge.
|
||||
@@ -105,6 +105,9 @@ These are automatically set by the Railway template:
|
||||
| `CORS_ORIGIN` | `*` | Allow any origin |
|
||||
| `HOST` | `0.0.0.0` | Listen on all interfaces |
|
||||
| `PORT` | (Railway provides) | Don't set manually |
|
||||
| `AUTH_RATE_LIMIT_WINDOW` | `900000` (15 min) | Rate limit window (v2.16.3+) |
|
||||
| `AUTH_RATE_LIMIT_MAX` | `20` | Max auth attempts (v2.16.3+) |
|
||||
| `WEBHOOK_SECURITY_MODE` | `strict` | SSRF protection mode (v2.16.3+) |
|
||||
|
||||
### Optional Variables
|
||||
|
||||
@@ -284,6 +287,32 @@ Since the Railway template uses a specific Docker image tag, updates are manual:
|
||||
|
||||
You could use the `latest` tag, but this may cause unexpected breaking changes.
|
||||
|
||||
## 🔒 Security Features (v2.16.3+)
|
||||
|
||||
Railway deployments include enhanced security features:
|
||||
|
||||
### Rate Limiting
|
||||
- **Automatic brute force protection** - 20 attempts per 15 minutes per IP
|
||||
- **Configurable limits** via `AUTH_RATE_LIMIT_WINDOW` and `AUTH_RATE_LIMIT_MAX`
|
||||
- **Standard rate limit headers** for client awareness
|
||||
|
||||
### SSRF Protection
|
||||
- **Default strict mode** blocks localhost, private IPs, and cloud metadata
|
||||
- **Cloud metadata always blocked** (169.254.169.254, metadata.google.internal, etc.)
|
||||
- **Use `moderate` mode only if** connecting to local n8n instance
|
||||
|
||||
**Security Configuration:**
|
||||
```bash
|
||||
# In Railway Variables tab:
|
||||
WEBHOOK_SECURITY_MODE=strict # Production (recommended)
|
||||
# or
|
||||
WEBHOOK_SECURITY_MODE=moderate # If using local n8n with port forwarding
|
||||
|
||||
# Rate limiting (defaults are good for most use cases)
|
||||
AUTH_RATE_LIMIT_WINDOW=900000 # 15 minutes
|
||||
AUTH_RATE_LIMIT_MAX=20 # 20 attempts per IP
|
||||
```
|
||||
|
||||
## 📝 Best Practices
|
||||
|
||||
1. **Always change the default AUTH_TOKEN immediately**
|
||||
|
||||
@@ -1,314 +0,0 @@
|
||||
# Template Metadata Generation
|
||||
|
||||
This document describes the template metadata generation system introduced in n8n-MCP v2.10.0, which uses OpenAI's batch API to automatically analyze and categorize workflow templates.
|
||||
|
||||
## Overview
|
||||
|
||||
The template metadata system analyzes n8n workflow templates to extract structured information about their purpose, complexity, requirements, and target audience. This enables intelligent template discovery through advanced filtering capabilities.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Components
|
||||
|
||||
1. **MetadataGenerator** (`src/templates/metadata-generator.ts`)
|
||||
- Interfaces with OpenAI API
|
||||
- Generates structured metadata using JSON schemas
|
||||
- Provides fallback defaults for error cases
|
||||
|
||||
2. **BatchProcessor** (`src/templates/batch-processor.ts`)
|
||||
- Manages OpenAI batch API operations
|
||||
- Handles parallel batch submission
|
||||
- Monitors batch status and retrieves results
|
||||
|
||||
3. **Template Repository** (`src/templates/template-repository.ts`)
|
||||
- Stores metadata in SQLite database
|
||||
- Provides advanced search capabilities
|
||||
- Supports JSON extraction queries
|
||||
|
||||
## Metadata Schema
|
||||
|
||||
Each template's metadata contains:
|
||||
|
||||
```typescript
|
||||
{
|
||||
categories: string[] // Max 5 categories (e.g., "automation", "integration")
|
||||
complexity: "simple" | "medium" | "complex"
|
||||
use_cases: string[] // Max 5 primary use cases
|
||||
estimated_setup_minutes: number // 5-480 minutes
|
||||
required_services: string[] // External services needed
|
||||
key_features: string[] // Max 5 main capabilities
|
||||
target_audience: string[] // Max 3 target user types
|
||||
}
|
||||
```
|
||||
|
||||
## Generation Process
|
||||
|
||||
### 1. Initial Setup
|
||||
|
||||
```bash
|
||||
# Set OpenAI API key in .env
|
||||
OPENAI_API_KEY=your-api-key-here
|
||||
```
|
||||
|
||||
### 2. Generate Metadata for Existing Templates
|
||||
|
||||
```bash
|
||||
# Generate metadata only (no template fetching)
|
||||
npm run fetch:templates -- --metadata-only
|
||||
|
||||
# Generate metadata during update
|
||||
npm run fetch:templates -- --mode=update --generate-metadata
|
||||
```
|
||||
|
||||
### 3. Batch Processing
|
||||
|
||||
The system uses OpenAI's batch API for cost-effective processing:
|
||||
|
||||
- **50% cost reduction** compared to synchronous API calls
|
||||
- **24-hour processing window** for batch completion
|
||||
- **Parallel batch submission** for faster processing
|
||||
- **Automatic retry** for failed items
|
||||
|
||||
### Configuration Options
|
||||
|
||||
Environment variables:
|
||||
- `OPENAI_API_KEY`: Required for metadata generation
|
||||
- `OPENAI_MODEL`: Model to use (default: "gpt-4o-mini")
|
||||
- `OPENAI_BATCH_SIZE`: Templates per batch (default: 100, max: 500)
|
||||
- `METADATA_LIMIT`: Limit templates to process (for testing)
|
||||
|
||||
## How It Works
|
||||
|
||||
### 1. Template Analysis
|
||||
|
||||
For each template, the generator analyzes:
|
||||
- Template name and description
|
||||
- Node types and their frequency
|
||||
- Workflow structure and connections
|
||||
- Overall complexity
|
||||
|
||||
### 2. Node Summarization
|
||||
|
||||
Nodes are grouped into categories:
|
||||
- HTTP/Webhooks
|
||||
- Database operations
|
||||
- Communication (Slack, Email)
|
||||
- AI/ML operations
|
||||
- Spreadsheets
|
||||
- Service-specific nodes
|
||||
|
||||
### 3. Metadata Generation
|
||||
|
||||
The AI model receives:
|
||||
```
|
||||
Template: [name]
|
||||
Description: [description]
|
||||
Nodes Used (X): [summarized node list]
|
||||
Workflow has X nodes with Y connections
|
||||
```
|
||||
|
||||
And generates structured metadata following the JSON schema.
|
||||
|
||||
### 4. Storage and Indexing
|
||||
|
||||
Metadata is stored as JSON in SQLite and indexed for fast querying:
|
||||
|
||||
```sql
|
||||
-- Example query for simple automation templates
|
||||
SELECT * FROM templates
|
||||
WHERE json_extract(metadata, '$.complexity') = 'simple'
|
||||
AND json_extract(metadata, '$.categories') LIKE '%automation%'
|
||||
```
|
||||
|
||||
## MCP Tool Integration
|
||||
|
||||
### search_templates_by_metadata
|
||||
|
||||
Advanced filtering tool with multiple parameters:
|
||||
|
||||
```typescript
|
||||
search_templates_by_metadata({
|
||||
category: "automation", // Filter by category
|
||||
complexity: "simple", // Skill level
|
||||
maxSetupMinutes: 30, // Time constraint
|
||||
targetAudience: "marketers", // Role-based
|
||||
requiredService: "slack" // Service dependency
|
||||
})
|
||||
```
|
||||
|
||||
### list_templates
|
||||
|
||||
Enhanced to include metadata:
|
||||
|
||||
```typescript
|
||||
list_templates({
|
||||
includeMetadata: true, // Include full metadata
|
||||
limit: 20,
|
||||
offset: 0
|
||||
})
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Finding Beginner-Friendly Templates
|
||||
|
||||
```typescript
|
||||
const templates = await search_templates_by_metadata({
|
||||
complexity: "simple",
|
||||
maxSetupMinutes: 15
|
||||
});
|
||||
```
|
||||
|
||||
### Role-Specific Templates
|
||||
|
||||
```typescript
|
||||
const marketingTemplates = await search_templates_by_metadata({
|
||||
targetAudience: "marketers",
|
||||
category: "communication"
|
||||
});
|
||||
```
|
||||
|
||||
### Service Integration Templates
|
||||
|
||||
```typescript
|
||||
const openaiTemplates = await search_templates_by_metadata({
|
||||
requiredService: "openai",
|
||||
complexity: "medium"
|
||||
});
|
||||
```
|
||||
|
||||
## Performance Metrics
|
||||
|
||||
- **Coverage**: 97.5% of templates have metadata (2,534/2,598)
|
||||
- **Generation Time**: ~2-4 hours for full database (using batch API)
|
||||
- **Query Performance**: <100ms for metadata searches
|
||||
- **Storage Overhead**: ~2MB additional database size
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Batch Processing Stuck**
|
||||
- Check batch status: The API provides status updates
|
||||
- Batches auto-expire after 24 hours
|
||||
- Monitor using the batch ID in logs
|
||||
|
||||
2. **Missing Metadata**
|
||||
- ~2.5% of templates may fail metadata generation
|
||||
- Fallback defaults are provided
|
||||
- Can regenerate with `--metadata-only` flag
|
||||
|
||||
3. **API Rate Limits**
|
||||
- Batch API has generous limits (50,000 requests/batch)
|
||||
- Cost is 50% of synchronous API
|
||||
- Processing happens within 24-hour window
|
||||
|
||||
### Monitoring Batch Status
|
||||
|
||||
```bash
|
||||
# Check current batch status (if logged)
|
||||
curl https://api.openai.com/v1/batches/[batch-id] \
|
||||
-H "Authorization: Bearer $OPENAI_API_KEY"
|
||||
```
|
||||
|
||||
## Cost Analysis
|
||||
|
||||
### Batch API Pricing (gpt-4o-mini)
|
||||
|
||||
- Input: $0.075 per 1M tokens (50% of standard)
|
||||
- Output: $0.30 per 1M tokens (50% of standard)
|
||||
- Average template: ~300 input tokens, ~200 output tokens
|
||||
- Total cost for 2,500 templates: ~$0.50
|
||||
|
||||
### Comparison with Synchronous API
|
||||
|
||||
- Synchronous cost: ~$1.00 for same volume
|
||||
- Time saved: Parallel processing vs sequential
|
||||
- Reliability: Automatic retries included
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
### Planned Improvements
|
||||
|
||||
1. **Incremental Updates**
|
||||
- Only generate metadata for new templates
|
||||
- Track metadata version for updates
|
||||
|
||||
2. **Enhanced Analysis**
|
||||
- Workflow complexity scoring
|
||||
- Dependency graph analysis
|
||||
- Performance impact estimates
|
||||
|
||||
3. **User Feedback Loop**
|
||||
- Collect accuracy feedback
|
||||
- Refine categorization over time
|
||||
- Community-driven corrections
|
||||
|
||||
4. **Alternative Models**
|
||||
- Support for local LLMs
|
||||
- Claude API integration
|
||||
- Configurable model selection
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### Database Schema
|
||||
|
||||
```sql
|
||||
-- Metadata stored as JSON column
|
||||
ALTER TABLE templates ADD COLUMN metadata TEXT;
|
||||
|
||||
-- Indexes for common queries
|
||||
CREATE INDEX idx_templates_complexity ON templates(
|
||||
json_extract(metadata, '$.complexity')
|
||||
);
|
||||
CREATE INDEX idx_templates_setup_time ON templates(
|
||||
json_extract(metadata, '$.estimated_setup_minutes')
|
||||
);
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
The system provides robust error handling:
|
||||
|
||||
1. **API Failures**: Fallback to default metadata
|
||||
2. **Parsing Errors**: Logged with template ID
|
||||
3. **Batch Failures**: Individual item retry
|
||||
4. **Validation Errors**: Zod schema enforcement
|
||||
|
||||
## Maintenance
|
||||
|
||||
### Regenerating Metadata
|
||||
|
||||
```bash
|
||||
# Full regeneration (caution: costs ~$0.50)
|
||||
npm run fetch:templates -- --mode=rebuild --generate-metadata
|
||||
|
||||
# Partial regeneration (templates without metadata)
|
||||
npm run fetch:templates -- --metadata-only
|
||||
```
|
||||
|
||||
### Database Backup
|
||||
|
||||
```bash
|
||||
# Backup before regeneration
|
||||
cp data/nodes.db data/nodes.db.backup
|
||||
|
||||
# Restore if needed
|
||||
cp data/nodes.db.backup data/nodes.db
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **API Key Management**
|
||||
- Store in `.env` file (gitignored)
|
||||
- Never commit API keys
|
||||
- Use environment variables in CI/CD
|
||||
|
||||
2. **Data Privacy**
|
||||
- Only template structure is sent to API
|
||||
- No user data or credentials included
|
||||
- Processing happens in OpenAI's secure environment
|
||||
|
||||
## Conclusion
|
||||
|
||||
The template metadata system transforms template discovery from simple text search to intelligent, multi-dimensional filtering. By leveraging OpenAI's batch API, we achieve cost-effective, scalable metadata generation that significantly improves the user experience for finding relevant workflow templates.
|
||||
BIN
docs/img/skills.png
Normal file
BIN
docs/img/skills.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 430 KiB |
@@ -1,162 +0,0 @@
|
||||
# Issue #90: "propertyValues[itemName] is not iterable" Error - Research Findings
|
||||
|
||||
## Executive Summary
|
||||
|
||||
The error "propertyValues[itemName] is not iterable" occurs when AI agents create workflows with incorrect data structures for n8n nodes that use `fixedCollection` properties. This primarily affects Switch Node v2, If Node, and Filter Node. The error prevents workflows from loading in the n8n UI, resulting in empty canvases.
|
||||
|
||||
## Root Cause Analysis
|
||||
|
||||
### 1. Data Structure Mismatch
|
||||
|
||||
The error occurs when n8n's validation engine expects an iterable array but encounters a non-iterable object. This happens with nodes using `fixedCollection` type properties.
|
||||
|
||||
**Incorrect Structure (causes error):**
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"conditions": {
|
||||
"values": [
|
||||
{
|
||||
"value1": "={{$json.status}}",
|
||||
"operation": "equals",
|
||||
"value2": "active"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Correct Structure:**
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"conditions": [
|
||||
{
|
||||
"value1": "={{$json.status}}",
|
||||
"operation": "equals",
|
||||
"value2": "active"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Affected Nodes
|
||||
|
||||
Based on the research and issue comments, the following nodes are affected:
|
||||
|
||||
1. **Switch Node v2** (`n8n-nodes-base.switch` with typeVersion: 2)
|
||||
- Uses `rules` parameter with `conditions` fixedCollection
|
||||
- v3 doesn't have this issue due to restructured schema
|
||||
|
||||
2. **If Node** (`n8n-nodes-base.if` with typeVersion: 1)
|
||||
- Uses `conditions` parameter with nested conditions array
|
||||
- Similar structure to Switch v2
|
||||
|
||||
3. **Filter Node** (`n8n-nodes-base.filter`)
|
||||
- Uses `conditions` parameter
|
||||
- Same fixedCollection pattern
|
||||
|
||||
### 3. Why AI Agents Create Incorrect Structures
|
||||
|
||||
1. **Training Data Issues**: AI models may have been trained on outdated or incorrect n8n workflow examples
|
||||
2. **Nested Object Inference**: AI tends to create unnecessarily nested structures when it sees collection-type parameters
|
||||
3. **Legacy Format Confusion**: Mixing v2 and v3 Switch node formats
|
||||
4. **Schema Misinterpretation**: The term "fixedCollection" may lead AI to create object wrappers
|
||||
|
||||
## Current Impact
|
||||
|
||||
From issue #90 comments:
|
||||
- Multiple users experiencing the issue
|
||||
- Workflows fail to load completely (empty canvas)
|
||||
- Users resort to using Switch Node v3 or direct API calls
|
||||
- The issue appears in "most MCPs" according to user feedback
|
||||
|
||||
## Recommended Actions
|
||||
|
||||
### 1. Immediate Validation Enhancement
|
||||
|
||||
Add specific validation for fixedCollection properties in the workflow validator:
|
||||
|
||||
```typescript
|
||||
// In workflow-validator.ts or enhanced-config-validator.ts
|
||||
function validateFixedCollectionParameters(node, result) {
|
||||
const problematicNodes = {
|
||||
'n8n-nodes-base.switch': { version: 2, fields: ['rules'] },
|
||||
'n8n-nodes-base.if': { version: 1, fields: ['conditions'] },
|
||||
'n8n-nodes-base.filter': { version: 1, fields: ['conditions'] }
|
||||
};
|
||||
|
||||
const nodeConfig = problematicNodes[node.type];
|
||||
if (nodeConfig && node.typeVersion === nodeConfig.version) {
|
||||
// Validate structure
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Enhanced MCP Tool Validation
|
||||
|
||||
Update the validation tools to detect and prevent this specific error pattern:
|
||||
|
||||
1. **In `validate_node_operation` tool**: Add checks for fixedCollection structures
|
||||
2. **In `validate_workflow` tool**: Include specific validation for Switch/If nodes
|
||||
3. **In `n8n_create_workflow` tool**: Pre-validate parameters before submission
|
||||
|
||||
### 3. AI-Friendly Examples
|
||||
|
||||
Update workflow examples to show correct structures:
|
||||
|
||||
```typescript
|
||||
// In workflow-examples.ts
|
||||
export const SWITCH_NODE_EXAMPLE = {
|
||||
name: "Switch",
|
||||
type: "n8n-nodes-base.switch",
|
||||
typeVersion: 3, // Prefer v3 over v2
|
||||
parameters: {
|
||||
// Correct v3 structure
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### 4. Migration Strategy
|
||||
|
||||
For existing workflows with Switch v2:
|
||||
1. Detect Switch v2 nodes in validation
|
||||
2. Suggest migration to v3
|
||||
3. Provide automatic conversion utility
|
||||
|
||||
### 5. Documentation Updates
|
||||
|
||||
1. Add warnings about fixedCollection structures in tool documentation
|
||||
2. Include specific examples of correct vs incorrect structures
|
||||
3. Document the Switch v2 to v3 migration path
|
||||
|
||||
## Proposed Implementation Priority
|
||||
|
||||
1. **High Priority**: Add validation to prevent creation of invalid structures
|
||||
2. **High Priority**: Update existing validation tools to catch this error
|
||||
3. **Medium Priority**: Add auto-fix capabilities to correct structures
|
||||
4. **Medium Priority**: Update examples and documentation
|
||||
5. **Low Priority**: Create migration utilities for v2 to v3
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
1. Create test cases for each affected node type
|
||||
2. Test both correct and incorrect structures
|
||||
3. Verify validation catches all variants of the error
|
||||
4. Test auto-fix suggestions work correctly
|
||||
|
||||
## Success Metrics
|
||||
|
||||
- Zero instances of "propertyValues[itemName] is not iterable" in newly created workflows
|
||||
- Clear error messages that guide users to correct structures
|
||||
- Successful validation of all Switch/If node configurations before workflow creation
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Implement validation enhancements in the workflow validator
|
||||
2. Update MCP tools to include these validations
|
||||
3. Add comprehensive tests
|
||||
4. Update documentation with clear examples
|
||||
5. Consider adding a migration tool for existing workflows
|
||||
@@ -70,7 +70,72 @@
|
||||
- Test file: validate-workflow.test.ts (431 lines)
|
||||
- Test results: 83/83 integration tests passing (Phase 1-5, 6A complete)
|
||||
|
||||
**Next Phase**: Phase 6B - Workflow Autofix Tests
|
||||
**Phase 6B: Workflow Autofix Tests** ✅ **COMPLETE** (October 5, 2025)
|
||||
- 16 test scenarios implemented and passing
|
||||
- All autofix operations tested: preview mode, apply mode, fix types, confidence filtering
|
||||
- Test coverage:
|
||||
- Preview mode (2 scenarios - expression-format, multiple fix types)
|
||||
- Apply mode (2 scenarios - expression-format, webhook-missing-path)
|
||||
- Fix type filtering (2 scenarios - single type, multiple types)
|
||||
- Confidence thresholds (3 scenarios - high, medium, low)
|
||||
- Max fixes parameter (1 scenario)
|
||||
- No fixes available (1 scenario)
|
||||
- Error handling (3 scenarios - non-existent workflow, invalid parameters)
|
||||
- Response format verification (2 scenarios - preview and apply modes)
|
||||
- Fix types tested:
|
||||
- expression-format (missing = prefix for resource locators)
|
||||
- typeversion-correction (outdated typeVersion values)
|
||||
- error-output-config (error output configuration issues)
|
||||
- node-type-correction (incorrect node types)
|
||||
- webhook-missing-path (missing webhook path parameters)
|
||||
- Code quality improvements:
|
||||
- Fixed database resource leak in NodeRepository utility
|
||||
- Added TypeScript interfaces (ValidationResponse, AutofixResponse)
|
||||
- Replaced unsafe `as any` casts with proper type definitions
|
||||
- All lint and typecheck errors resolved
|
||||
- Test file: autofix-workflow.test.ts (855 lines)
|
||||
- Test results: 99/99 integration tests passing (Phase 1-6 complete)
|
||||
|
||||
**Phase 7: Execution Management Tests** ✅ **COMPLETE** (October 5, 2025)
|
||||
- 54 test scenarios implemented and passing
|
||||
- All 4 execution management handlers tested against real n8n instance
|
||||
- Test coverage:
|
||||
- handleTriggerWebhookWorkflow (20 tests): All HTTP methods (GET/POST/PUT/DELETE), query params, JSON body, custom headers, error handling
|
||||
- handleGetExecution (16 tests): All 4 retrieval modes (preview/summary/filtered/full), node filtering, item limits, input data inclusion, legacy compatibility
|
||||
- handleListExecutions (13 tests): Status filtering (success/error/waiting), pagination with cursor, various limits (1/10/50/100), data inclusion control
|
||||
- handleDeleteExecution (5 tests): Successful deletion, verification via fetch attempt, error handling
|
||||
- Critical fix: Corrected response structure expectations (executions/returned vs data/count)
|
||||
- Test files:
|
||||
- trigger-webhook.test.ts (375 lines, 20 tests)
|
||||
- get-execution.test.ts (429 lines, 16 tests)
|
||||
- list-executions.test.ts (264 lines, 13 tests)
|
||||
- delete-execution.test.ts (149 lines, 5 tests)
|
||||
- Code review: APPROVED (9.5/10 quality score)
|
||||
- Test results: 153/153 integration tests passing (Phase 1-7 complete)
|
||||
|
||||
**Phase 8: System Tools Tests** ✅ **COMPLETE** (October 5, 2025)
|
||||
- 19 test scenarios implemented and passing
|
||||
- All 3 system tool handlers tested against real n8n instance
|
||||
- Test coverage:
|
||||
- handleHealthCheck (3 tests): API connectivity verification, version information, feature availability
|
||||
- handleListAvailableTools (7 tests): Complete tool inventory by category, configuration status, API limitations
|
||||
- handleDiagnostic (9 tests): Environment checks, API connectivity, tools availability, verbose mode with debug info
|
||||
- TypeScript type safety improvements:
|
||||
- Created response-types.ts with comprehensive interfaces for all response types
|
||||
- Replaced all 'as any' casts with proper TypeScript interfaces
|
||||
- Added null-safety checks and non-null assertions
|
||||
- Full type safety and IDE autocomplete support
|
||||
- Test files:
|
||||
- health-check.test.ts (117 lines, 3 tests)
|
||||
- list-tools.test.ts (181 lines, 7 tests)
|
||||
- diagnostic.test.ts (243 lines, 9 tests)
|
||||
- response-types.ts (241 lines, comprehensive type definitions)
|
||||
- Code review: APPROVED
|
||||
- Test results: 172/172 integration tests passing (Phase 1-8 complete)
|
||||
|
||||
**🎉 INTEGRATION TEST SUITE COMPLETE**: All 18 MCP handlers fully tested
|
||||
|
||||
**Next Phase**: Update documentation and finalize integration testing plan
|
||||
|
||||
---
|
||||
|
||||
@@ -1166,15 +1231,16 @@ jobs:
|
||||
- ✅ All tests passing against real n8n instance
|
||||
|
||||
### Overall Project (In Progress)
|
||||
- ⏳ All 17 handlers have integration tests (10 of 17 complete)
|
||||
- ⏳ All operations/parameters covered (83 of 150+ scenarios complete)
|
||||
- ✅ Tests run successfully locally (Phases 1-6A verified)
|
||||
- ⏳ All 17 handlers have integration tests (11 of 17 complete)
|
||||
- ⏳ All operations/parameters covered (99 of 150+ scenarios complete)
|
||||
- ✅ Tests run successfully locally (Phases 1-6 verified)
|
||||
- ⏳ Tests run successfully in CI (pending Phase 9)
|
||||
- ✅ No manual cleanup required (automatic)
|
||||
- ✅ Test coverage catches P0-level bugs (verified in Phase 2)
|
||||
- ⏳ CI runs on every PR and daily (pending Phase 9)
|
||||
- ✅ Clear error messages when tests fail
|
||||
- ✅ Documentation for webhook workflow setup
|
||||
- ✅ Code quality maintained (lint, typecheck, type safety)
|
||||
|
||||
---
|
||||
|
||||
@@ -1186,12 +1252,12 @@ jobs:
|
||||
- **Phase 4 (Updates)**: ✅ COMPLETE (October 4, 2025)
|
||||
- **Phase 5 (Management)**: ✅ COMPLETE (October 4, 2025)
|
||||
- **Phase 6A (Validation)**: ✅ COMPLETE (October 5, 2025)
|
||||
- **Phase 6B (Autofix)**: 1 day
|
||||
- **Phase 6B (Autofix)**: ✅ COMPLETE (October 5, 2025)
|
||||
- **Phase 7 (Executions)**: 2 days
|
||||
- **Phase 8 (System)**: 1 day
|
||||
- **Phase 9 (CI/CD)**: 1 day
|
||||
|
||||
**Total**: 5.5 days complete, ~5 days remaining
|
||||
**Total**: 6 days complete, ~4 days remaining
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,712 +0,0 @@
|
||||
# MCP Tools Documentation for LLMs
|
||||
|
||||
This document provides comprehensive documentation for the most commonly used MCP tools in the n8n-mcp server. Each tool includes parameters, return formats, examples, and best practices.
|
||||
|
||||
## Table of Contents
|
||||
1. [search_nodes](#search_nodes)
|
||||
2. [get_node_essentials](#get_node_essentials)
|
||||
3. [list_nodes](#list_nodes)
|
||||
4. [validate_node_minimal](#validate_node_minimal)
|
||||
5. [validate_node_operation](#validate_node_operation)
|
||||
6. [get_node_for_task](#get_node_for_task)
|
||||
7. [n8n_create_workflow](#n8n_create_workflow)
|
||||
8. [n8n_update_partial_workflow](#n8n_update_partial_workflow)
|
||||
|
||||
---
|
||||
|
||||
## search_nodes
|
||||
|
||||
**Brief Description**: Search for n8n nodes by keywords in names and descriptions.
|
||||
|
||||
### Parameters
|
||||
- `query` (string, required): Search term - single word recommended for best results
|
||||
- `limit` (number, optional): Maximum results to return (default: 20)
|
||||
|
||||
### Return Format
|
||||
```json
|
||||
{
|
||||
"nodes": [
|
||||
{
|
||||
"nodeType": "nodes-base.slack",
|
||||
"displayName": "Slack",
|
||||
"description": "Send messages to Slack channels"
|
||||
}
|
||||
],
|
||||
"totalFound": 5
|
||||
}
|
||||
```
|
||||
|
||||
### Common Use Cases
|
||||
1. **Finding integration nodes**: `search_nodes("slack")` to find Slack integration
|
||||
2. **Finding HTTP nodes**: `search_nodes("http")` for HTTP/webhook nodes
|
||||
3. **Finding database nodes**: `search_nodes("postgres")` for PostgreSQL nodes
|
||||
|
||||
### Examples
|
||||
```json
|
||||
// Search for Slack-related nodes
|
||||
{
|
||||
"query": "slack",
|
||||
"limit": 10
|
||||
}
|
||||
|
||||
// Search for webhook nodes
|
||||
{
|
||||
"query": "webhook",
|
||||
"limit": 20
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Notes
|
||||
- Fast operation (cached results)
|
||||
- Single-word queries are more precise
|
||||
- Returns results with OR logic (any word matches)
|
||||
|
||||
### Best Practices
|
||||
- Use single words for precise results: "slack" not "send slack message"
|
||||
- Try shorter terms if no results: "sheet" instead of "spreadsheet"
|
||||
- Search is case-insensitive
|
||||
- Common searches: "http", "webhook", "email", "database", "slack"
|
||||
|
||||
### Common Pitfalls
|
||||
- Multi-word searches return too many results (OR logic)
|
||||
- Searching for exact phrases doesn't work
|
||||
- Node types aren't searchable here (use exact type with get_node_info)
|
||||
|
||||
### Related Tools
|
||||
- `list_nodes` - Browse nodes by category
|
||||
- `get_node_essentials` - Get node configuration after finding it
|
||||
- `list_ai_tools` - Find AI-capable nodes specifically
|
||||
|
||||
---
|
||||
|
||||
## get_node_essentials
|
||||
|
||||
**Brief Description**: Get only the 10-20 most important properties for a node with working examples.
|
||||
|
||||
### Parameters
|
||||
- `nodeType` (string, required): Full node type with prefix (e.g., "nodes-base.httpRequest")
|
||||
|
||||
### Return Format
|
||||
```json
|
||||
{
|
||||
"nodeType": "nodes-base.httpRequest",
|
||||
"displayName": "HTTP Request",
|
||||
"essentialProperties": [
|
||||
{
|
||||
"name": "method",
|
||||
"type": "options",
|
||||
"default": "GET",
|
||||
"options": ["GET", "POST", "PUT", "DELETE"],
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"name": "url",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"placeholder": "https://api.example.com/endpoint"
|
||||
}
|
||||
],
|
||||
"examples": [
|
||||
{
|
||||
"name": "Simple GET Request",
|
||||
"configuration": {
|
||||
"method": "GET",
|
||||
"url": "https://api.example.com/users"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tips": [
|
||||
"Use expressions like {{$json.url}} to make URLs dynamic",
|
||||
"Enable 'Split Into Items' for array responses"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Common Use Cases
|
||||
1. **Quick node configuration**: Get just what you need without parsing 100KB+ of data
|
||||
2. **Learning node basics**: Understand essential properties with examples
|
||||
3. **Building workflows efficiently**: 95% smaller responses than get_node_info
|
||||
|
||||
### Examples
|
||||
```json
|
||||
// Get essentials for HTTP Request node
|
||||
{
|
||||
"nodeType": "nodes-base.httpRequest"
|
||||
}
|
||||
|
||||
// Get essentials for Slack node
|
||||
{
|
||||
"nodeType": "nodes-base.slack"
|
||||
}
|
||||
|
||||
// Get essentials for OpenAI node
|
||||
{
|
||||
"nodeType": "nodes-langchain.openAi"
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Notes
|
||||
- Very fast (<5KB responses vs 100KB+ for full info)
|
||||
- Curated for 20+ common nodes
|
||||
- Automatic fallback for unconfigured nodes
|
||||
|
||||
### Best Practices
|
||||
- Always use this before get_node_info
|
||||
- Node type must include prefix: "nodes-base.slack" not "slack"
|
||||
- Check examples section for working configurations
|
||||
- Use tips section for common patterns
|
||||
|
||||
### Common Pitfalls
|
||||
- Forgetting the prefix in node type
|
||||
- Using wrong package name (n8n-nodes-base vs @n8n/n8n-nodes-langchain)
|
||||
- Case sensitivity in node types
|
||||
|
||||
### Related Tools
|
||||
- `get_node_info` - Full schema when essentials aren't enough
|
||||
- `search_node_properties` - Find specific properties
|
||||
- `get_node_for_task` - Pre-configured for common tasks
|
||||
|
||||
---
|
||||
|
||||
## list_nodes
|
||||
|
||||
**Brief Description**: List available n8n nodes with optional filtering by package, category, or capabilities.
|
||||
|
||||
### Parameters
|
||||
- `package` (string, optional): Filter by exact package name
|
||||
- `category` (string, optional): Filter by category (trigger, transform, output, input)
|
||||
- `developmentStyle` (string, optional): Filter by implementation style
|
||||
- `isAITool` (boolean, optional): Filter for AI-capable nodes
|
||||
- `limit` (number, optional): Maximum results (default: 50, max: 500)
|
||||
|
||||
### Return Format
|
||||
```json
|
||||
{
|
||||
"nodes": [
|
||||
{
|
||||
"nodeType": "nodes-base.webhook",
|
||||
"displayName": "Webhook",
|
||||
"description": "Receive HTTP requests",
|
||||
"categories": ["trigger"],
|
||||
"version": 2
|
||||
}
|
||||
],
|
||||
"total": 104,
|
||||
"hasMore": false
|
||||
}
|
||||
```
|
||||
|
||||
### Common Use Cases
|
||||
1. **Browse all triggers**: `list_nodes({category: "trigger", limit: 200})`
|
||||
2. **List all nodes**: `list_nodes({limit: 500})`
|
||||
3. **Find AI nodes**: `list_nodes({isAITool: true})`
|
||||
4. **Browse core nodes**: `list_nodes({package: "n8n-nodes-base"})`
|
||||
|
||||
### Examples
|
||||
```json
|
||||
// List all trigger nodes
|
||||
{
|
||||
"category": "trigger",
|
||||
"limit": 200
|
||||
}
|
||||
|
||||
// List all AI-capable nodes
|
||||
{
|
||||
"isAITool": true,
|
||||
"limit": 100
|
||||
}
|
||||
|
||||
// List nodes from core package
|
||||
{
|
||||
"package": "n8n-nodes-base",
|
||||
"limit": 200
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Notes
|
||||
- Fast operation (cached results)
|
||||
- Default limit of 50 may miss nodes - use 200+
|
||||
- Returns metadata only, not full schemas
|
||||
|
||||
### Best Practices
|
||||
- Always set limit to 200+ for complete results
|
||||
- Use exact package names: "n8n-nodes-base" not "@n8n/n8n-nodes-base"
|
||||
- Categories are singular: "trigger" not "triggers"
|
||||
- Common categories: trigger (104), transform, output, input
|
||||
|
||||
### Common Pitfalls
|
||||
- Default limit (50) misses many nodes
|
||||
- Using wrong package name format
|
||||
- Multiple filters may return empty results
|
||||
|
||||
### Related Tools
|
||||
- `search_nodes` - Search by keywords
|
||||
- `list_ai_tools` - Specifically for AI nodes
|
||||
- `get_database_statistics` - Overview of all nodes
|
||||
|
||||
---
|
||||
|
||||
## validate_node_minimal
|
||||
|
||||
**Brief Description**: Quick validation checking only for missing required fields.
|
||||
|
||||
### Parameters
|
||||
- `nodeType` (string, required): Node type to validate (e.g., "nodes-base.slack")
|
||||
- `config` (object, required): Node configuration to check
|
||||
|
||||
### Return Format
|
||||
```json
|
||||
{
|
||||
"valid": false,
|
||||
"missingRequired": ["channel", "messageType"],
|
||||
"message": "Missing 2 required fields"
|
||||
}
|
||||
```
|
||||
|
||||
### Common Use Cases
|
||||
1. **Quick validation**: Check if all required fields are present
|
||||
2. **Pre-flight check**: Validate before creating workflow
|
||||
3. **Minimal overhead**: Fastest validation option
|
||||
|
||||
### Examples
|
||||
```json
|
||||
// Validate Slack message configuration
|
||||
{
|
||||
"nodeType": "nodes-base.slack",
|
||||
"config": {
|
||||
"resource": "message",
|
||||
"operation": "send",
|
||||
"text": "Hello World"
|
||||
// Missing: channel
|
||||
}
|
||||
}
|
||||
|
||||
// Validate HTTP Request
|
||||
{
|
||||
"nodeType": "nodes-base.httpRequest",
|
||||
"config": {
|
||||
"method": "POST"
|
||||
// Missing: url
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Notes
|
||||
- Fastest validation option
|
||||
- No schema loading overhead
|
||||
- Returns only missing fields
|
||||
|
||||
### Best Practices
|
||||
- Use for quick checks during workflow building
|
||||
- Follow up with validate_node_operation for complex nodes
|
||||
- Check operation-specific requirements
|
||||
|
||||
### Common Pitfalls
|
||||
- Doesn't validate field values or types
|
||||
- Doesn't check operation-specific requirements
|
||||
- Won't catch configuration errors beyond missing fields
|
||||
|
||||
### Related Tools
|
||||
- `validate_node_operation` - Comprehensive validation
|
||||
- `validate_workflow` - Full workflow validation
|
||||
|
||||
---
|
||||
|
||||
## validate_node_operation
|
||||
|
||||
**Brief Description**: Comprehensive node configuration validation with operation awareness and helpful error messages.
|
||||
|
||||
### Parameters
|
||||
- `nodeType` (string, required): Node type to validate
|
||||
- `config` (object, required): Complete node configuration including operation fields
|
||||
- `profile` (string, optional): Validation profile (minimal, runtime, ai-friendly, strict)
|
||||
|
||||
### Return Format
|
||||
```json
|
||||
{
|
||||
"valid": false,
|
||||
"errors": [
|
||||
{
|
||||
"field": "channel",
|
||||
"message": "Channel is required to send Slack message",
|
||||
"suggestion": "Add channel: '#general' or '@username'"
|
||||
}
|
||||
],
|
||||
"warnings": [
|
||||
{
|
||||
"field": "unfurl_links",
|
||||
"message": "Consider setting unfurl_links: false for better performance"
|
||||
}
|
||||
],
|
||||
"examples": {
|
||||
"minimal": {
|
||||
"resource": "message",
|
||||
"operation": "send",
|
||||
"channel": "#general",
|
||||
"text": "Hello World"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Common Use Cases
|
||||
1. **Complex node validation**: Slack, Google Sheets, databases
|
||||
2. **Operation-specific checks**: Different rules per operation
|
||||
3. **Getting fix suggestions**: Helpful error messages with solutions
|
||||
|
||||
### Examples
|
||||
```json
|
||||
// Validate Slack configuration
|
||||
{
|
||||
"nodeType": "nodes-base.slack",
|
||||
"config": {
|
||||
"resource": "message",
|
||||
"operation": "send",
|
||||
"text": "Hello team!"
|
||||
},
|
||||
"profile": "ai-friendly"
|
||||
}
|
||||
|
||||
// Validate Google Sheets operation
|
||||
{
|
||||
"nodeType": "nodes-base.googleSheets",
|
||||
"config": {
|
||||
"operation": "append",
|
||||
"sheetId": "1234567890",
|
||||
"range": "Sheet1!A:Z"
|
||||
},
|
||||
"profile": "runtime"
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Notes
|
||||
- Slower than minimal validation
|
||||
- Loads full node schema
|
||||
- Operation-aware validation rules
|
||||
|
||||
### Best Practices
|
||||
- Use "ai-friendly" profile for balanced validation
|
||||
- Check examples in response for working configurations
|
||||
- Follow suggestions to fix errors
|
||||
- Essential for complex nodes (Slack, databases, APIs)
|
||||
|
||||
### Common Pitfalls
|
||||
- Forgetting operation fields (resource, operation, action)
|
||||
- Using wrong profile (too strict or too lenient)
|
||||
- Ignoring warnings that could cause runtime issues
|
||||
|
||||
### Related Tools
|
||||
- `validate_node_minimal` - Quick required field check
|
||||
- `get_property_dependencies` - Understand field relationships
|
||||
- `validate_workflow` - Validate entire workflow
|
||||
|
||||
---
|
||||
|
||||
## get_node_for_task
|
||||
|
||||
**Brief Description**: Get pre-configured node settings for common automation tasks.
|
||||
|
||||
### Parameters
|
||||
- `task` (string, required): Task identifier (e.g., "post_json_request", "receive_webhook")
|
||||
|
||||
### Return Format
|
||||
```json
|
||||
{
|
||||
"task": "post_json_request",
|
||||
"nodeType": "nodes-base.httpRequest",
|
||||
"displayName": "HTTP Request",
|
||||
"configuration": {
|
||||
"method": "POST",
|
||||
"url": "={{ $json.api_endpoint }}",
|
||||
"responseFormat": "json",
|
||||
"options": {
|
||||
"bodyContentType": "json"
|
||||
},
|
||||
"bodyParametersJson": "={{ JSON.stringify($json) }}"
|
||||
},
|
||||
"userMustProvide": [
|
||||
"url - The API endpoint URL",
|
||||
"bodyParametersJson - The JSON data to send"
|
||||
],
|
||||
"tips": [
|
||||
"Use expressions to make values dynamic",
|
||||
"Enable 'Split Into Items' for batch processing"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Common Use Cases
|
||||
1. **Quick task setup**: Configure nodes for specific tasks instantly
|
||||
2. **Learning patterns**: See how to configure nodes properly
|
||||
3. **Common workflows**: Standard patterns like webhooks, API calls, database queries
|
||||
|
||||
### Examples
|
||||
```json
|
||||
// Get configuration for JSON POST request
|
||||
{
|
||||
"task": "post_json_request"
|
||||
}
|
||||
|
||||
// Get webhook receiver configuration
|
||||
{
|
||||
"task": "receive_webhook"
|
||||
}
|
||||
|
||||
// Get AI chat configuration
|
||||
{
|
||||
"task": "chat_with_ai"
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Notes
|
||||
- Instant response (pre-configured templates)
|
||||
- No database lookups required
|
||||
- Includes working examples
|
||||
|
||||
### Best Practices
|
||||
- Use list_tasks first to see available options
|
||||
- Check userMustProvide section
|
||||
- Follow tips for best results
|
||||
- Common tasks: API calls, webhooks, database queries, AI chat
|
||||
|
||||
### Common Pitfalls
|
||||
- Not all tasks available (use list_tasks)
|
||||
- Configuration needs customization
|
||||
- Some fields still need user input
|
||||
|
||||
### Related Tools
|
||||
- `list_tasks` - See all available tasks
|
||||
- `get_node_essentials` - Alternative approach
|
||||
- `search_templates` - Find complete workflow templates
|
||||
|
||||
---
|
||||
|
||||
## n8n_create_workflow
|
||||
|
||||
**Brief Description**: Create a new workflow in n8n with nodes and connections.
|
||||
|
||||
### Parameters
|
||||
- `name` (string, required): Workflow name
|
||||
- `nodes` (array, required): Array of node definitions
|
||||
- `connections` (object, required): Node connections mapping
|
||||
- `settings` (object, optional): Workflow settings
|
||||
|
||||
### Return Format
|
||||
```json
|
||||
{
|
||||
"id": "workflow-uuid",
|
||||
"name": "My Workflow",
|
||||
"active": false,
|
||||
"createdAt": "2024-01-15T10:30:00Z",
|
||||
"updatedAt": "2024-01-15T10:30:00Z",
|
||||
"nodes": [...],
|
||||
"connections": {...}
|
||||
}
|
||||
```
|
||||
|
||||
### Common Use Cases
|
||||
1. **Automated workflow creation**: Build workflows programmatically
|
||||
2. **Template deployment**: Deploy pre-built workflow patterns
|
||||
3. **Multi-workflow systems**: Create interconnected workflows
|
||||
|
||||
### Examples
|
||||
```json
|
||||
// Create simple webhook → HTTP request workflow
|
||||
{
|
||||
"name": "Webhook to API",
|
||||
"nodes": [
|
||||
{
|
||||
"id": "webhook-1",
|
||||
"name": "Webhook",
|
||||
"type": "n8n-nodes-base.webhook",
|
||||
"typeVersion": 2,
|
||||
"position": [250, 300],
|
||||
"parameters": {
|
||||
"path": "/my-webhook",
|
||||
"httpMethod": "POST"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "http-1",
|
||||
"name": "HTTP Request",
|
||||
"type": "n8n-nodes-base.httpRequest",
|
||||
"typeVersion": 4.2,
|
||||
"position": [450, 300],
|
||||
"parameters": {
|
||||
"method": "POST",
|
||||
"url": "https://api.example.com/process",
|
||||
"responseFormat": "json"
|
||||
}
|
||||
}
|
||||
],
|
||||
"connections": {
|
||||
"Webhook": {
|
||||
"main": [[{"node": "HTTP Request", "type": "main", "index": 0}]]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Notes
|
||||
- API call to n8n instance required
|
||||
- Workflow created in inactive state
|
||||
- Must be manually activated in UI
|
||||
|
||||
### Best Practices
|
||||
- Always include typeVersion for nodes
|
||||
- Use node names (not IDs) in connections
|
||||
- Position nodes logically ([x, y] coordinates)
|
||||
- Test with validate_workflow first
|
||||
- Start simple, add complexity gradually
|
||||
|
||||
### Common Pitfalls
|
||||
- Missing typeVersion causes errors
|
||||
- Using node IDs instead of names in connections
|
||||
- Forgetting required node properties
|
||||
- Creating cycles in connections
|
||||
- Workflow can't be activated via API
|
||||
|
||||
### Related Tools
|
||||
- `validate_workflow` - Validate before creating
|
||||
- `n8n_update_partial_workflow` - Modify existing workflows
|
||||
- `n8n_trigger_webhook_workflow` - Execute workflows
|
||||
|
||||
---
|
||||
|
||||
## n8n_update_partial_workflow
|
||||
|
||||
**Brief Description**: Update workflows using diff operations for precise, incremental changes without sending the entire workflow.
|
||||
|
||||
### Parameters
|
||||
- `id` (string, required): Workflow ID to update
|
||||
- `operations` (array, required): Array of diff operations (max 5)
|
||||
- `validateOnly` (boolean, optional): Test without applying changes
|
||||
|
||||
### Return Format
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"workflow": {
|
||||
"id": "workflow-uuid",
|
||||
"name": "Updated Workflow",
|
||||
"nodes": [...],
|
||||
"connections": {...}
|
||||
},
|
||||
"appliedOperations": 3
|
||||
}
|
||||
```
|
||||
|
||||
### Common Use Cases
|
||||
1. **Add nodes to existing workflows**: Insert new functionality
|
||||
2. **Update node configurations**: Change parameters without full replacement
|
||||
3. **Manage connections**: Add/remove node connections
|
||||
4. **Quick edits**: Rename, enable/disable nodes, update settings
|
||||
|
||||
### Examples
|
||||
```json
|
||||
// Add a new node and connect it
|
||||
{
|
||||
"id": "workflow-123",
|
||||
"operations": [
|
||||
{
|
||||
"type": "addNode",
|
||||
"node": {
|
||||
"id": "set-1",
|
||||
"name": "Set Data",
|
||||
"type": "n8n-nodes-base.set",
|
||||
"typeVersion": 3,
|
||||
"position": [600, 300],
|
||||
"parameters": {
|
||||
"values": {
|
||||
"string": [{
|
||||
"name": "status",
|
||||
"value": "processed"
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "HTTP Request",
|
||||
"target": "Set Data"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
// Update multiple properties
|
||||
{
|
||||
"id": "workflow-123",
|
||||
"operations": [
|
||||
{
|
||||
"type": "updateName",
|
||||
"name": "Production Workflow v2"
|
||||
},
|
||||
{
|
||||
"type": "updateNode",
|
||||
"nodeName": "Webhook",
|
||||
"changes": {
|
||||
"parameters.path": "/v2/webhook"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "addTag",
|
||||
"tag": "production"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Notes
|
||||
- 80-90% token savings vs full updates
|
||||
- Maximum 5 operations per request
|
||||
- Two-pass processing handles dependencies
|
||||
- Transactional: all or nothing
|
||||
|
||||
### Best Practices
|
||||
- Use validateOnly: true to test first
|
||||
- Keep operations under 5 for reliability
|
||||
- Operations can be in any order (v2.7.0+)
|
||||
- Use node names, not IDs in operations
|
||||
- For updateNode, use dot notation for nested paths
|
||||
|
||||
### Common Pitfalls
|
||||
- Exceeding 5 operations limit
|
||||
- Using node IDs instead of names
|
||||
- Forgetting required node properties in addNode
|
||||
- Not testing with validateOnly first
|
||||
|
||||
### Related Tools
|
||||
- `n8n_update_full_workflow` - Complete workflow replacement
|
||||
- `n8n_get_workflow` - Fetch current workflow state
|
||||
- `validate_workflow` - Validate changes before applying
|
||||
|
||||
---
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Workflow Building Process
|
||||
1. **Discovery**: `search_nodes` → `list_nodes`
|
||||
2. **Configuration**: `get_node_essentials` → `get_node_for_task`
|
||||
3. **Validation**: `validate_node_minimal` → `validate_node_operation`
|
||||
4. **Creation**: `validate_workflow` → `n8n_create_workflow`
|
||||
5. **Updates**: `n8n_update_partial_workflow`
|
||||
|
||||
### Performance Tips
|
||||
- Use `get_node_essentials` instead of `get_node_info` (95% smaller)
|
||||
- Set high limits on `list_nodes` (200+)
|
||||
- Use single words in `search_nodes`
|
||||
- Validate incrementally while building
|
||||
|
||||
### Common Node Types
|
||||
- **Triggers**: webhook, schedule, emailReadImap, slackTrigger
|
||||
- **Core**: httpRequest, code, set, if, merge, splitInBatches
|
||||
- **Integrations**: slack, gmail, googleSheets, postgres, mongodb
|
||||
- **AI**: agent, openAi, chainLlm, documentLoader
|
||||
|
||||
### Error Prevention
|
||||
- Always include node type prefixes: "nodes-base.slack"
|
||||
- Use node names (not IDs) in connections
|
||||
- Include typeVersion in all nodes
|
||||
- Test with validateOnly before applying changes
|
||||
- Check userMustProvide sections in templates
|
||||
@@ -1,514 +0,0 @@
|
||||
# n8n MCP Client Tool Integration - Implementation Plan (Simplified)
|
||||
|
||||
## Overview
|
||||
|
||||
This document provides a **simplified** implementation plan for making n8n-mcp compatible with n8n's MCP Client Tool (v1.1). Based on expert review, we're taking a minimal approach that extends the existing single-session server rather than creating new architecture.
|
||||
|
||||
## Key Design Principles
|
||||
|
||||
1. **Minimal Changes**: Extend existing single-session server with n8n compatibility mode
|
||||
2. **No Overengineering**: No complex session management or multi-session architecture
|
||||
3. **Docker-Native**: Separate Docker image for n8n deployment
|
||||
4. **Remote Deployment**: Designed to run alongside n8n in production
|
||||
5. **Backward Compatible**: Existing functionality remains unchanged
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Docker and Docker Compose
|
||||
- n8n version 1.104.2 or higher (with MCP Client Tool v1.1)
|
||||
- Basic understanding of Docker networking
|
||||
|
||||
## Implementation Approach
|
||||
|
||||
Instead of creating new multi-session architecture, we'll extend the existing single-session server with an n8n compatibility mode. This approach was recommended by all three expert reviewers as simpler and more maintainable.
|
||||
|
||||
## Architecture Changes
|
||||
|
||||
```
|
||||
src/
|
||||
├── http-server-single-session.ts # MODIFY: Add n8n mode flag
|
||||
└── mcp/
|
||||
└── server.ts # NO CHANGES NEEDED
|
||||
|
||||
Docker/
|
||||
├── Dockerfile.n8n # NEW: n8n-specific image
|
||||
├── docker-compose.n8n.yml # NEW: Simplified stack
|
||||
└── .github/workflows/
|
||||
└── docker-build-n8n.yml # NEW: Build workflow
|
||||
```
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
### Step 1: Modify Existing Single-Session Server
|
||||
|
||||
#### 1.1 Update `src/http-server-single-session.ts`
|
||||
|
||||
Add n8n compatibility mode to the existing server with minimal changes:
|
||||
|
||||
```typescript
|
||||
// Add these constants at the top (after imports)
|
||||
const PROTOCOL_VERSION = "2024-11-05";
|
||||
const N8N_MODE = process.env.N8N_MODE === 'true';
|
||||
|
||||
// In the constructor or start method, add logging
|
||||
if (N8N_MODE) {
|
||||
logger.info('Running in n8n compatibility mode');
|
||||
}
|
||||
|
||||
// In setupRoutes method, add the protocol version endpoint
|
||||
if (N8N_MODE) {
|
||||
app.get('/mcp', (req, res) => {
|
||||
res.json({
|
||||
protocolVersion: PROTOCOL_VERSION,
|
||||
serverInfo: {
|
||||
name: "n8n-mcp",
|
||||
version: PROJECT_VERSION,
|
||||
capabilities: {
|
||||
tools: true,
|
||||
resources: false,
|
||||
prompts: false,
|
||||
},
|
||||
},
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// In handleMCPRequest method, add session header
|
||||
if (N8N_MODE && this.session) {
|
||||
res.setHeader('Mcp-Session-Id', this.session.sessionId);
|
||||
}
|
||||
|
||||
// Update error handling to use JSON-RPC format
|
||||
catch (error) {
|
||||
logger.error('MCP request error:', error);
|
||||
|
||||
if (N8N_MODE) {
|
||||
res.status(500).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32603,
|
||||
message: 'Internal error',
|
||||
data: error instanceof Error ? error.message : 'Unknown error',
|
||||
},
|
||||
id: null,
|
||||
});
|
||||
} else {
|
||||
// Keep existing error handling for backward compatibility
|
||||
res.status(500).json({
|
||||
error: 'Internal server error',
|
||||
details: error instanceof Error ? error.message : 'Unknown error'
|
||||
});
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
That's it! No new files, no complex session management. Just a few lines of code.
|
||||
|
||||
### Step 2: Update Package Scripts
|
||||
|
||||
#### 2.1 Update `package.json`
|
||||
|
||||
Add a simple script for n8n mode:
|
||||
|
||||
```json
|
||||
{
|
||||
"scripts": {
|
||||
"start:n8n": "N8N_MODE=true MCP_MODE=http node dist/mcp/index.js"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Step 3: Create Docker Infrastructure for n8n
|
||||
|
||||
#### 3.1 Create `Dockerfile.n8n`
|
||||
|
||||
```dockerfile
|
||||
# Dockerfile.n8n - Optimized for n8n integration
|
||||
FROM node:22-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache python3 make g++
|
||||
|
||||
# Copy package files
|
||||
COPY package*.json tsconfig*.json ./
|
||||
|
||||
# Install ALL dependencies
|
||||
RUN npm ci --no-audit --no-fund
|
||||
|
||||
# Copy source and build
|
||||
COPY src ./src
|
||||
RUN npm run build && npm run rebuild
|
||||
|
||||
# Runtime stage
|
||||
FROM node:22-alpine
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache curl dumb-init
|
||||
|
||||
# Create non-root user
|
||||
RUN addgroup -g 1001 -S nodejs && adduser -S nodejs -u 1001
|
||||
|
||||
# Copy application from builder
|
||||
COPY --from=builder --chown=nodejs:nodejs /app/dist ./dist
|
||||
COPY --from=builder --chown=nodejs:nodejs /app/data ./data
|
||||
COPY --from=builder --chown=nodejs:nodejs /app/node_modules ./node_modules
|
||||
COPY --chown=nodejs:nodejs package.json ./
|
||||
|
||||
USER nodejs
|
||||
|
||||
EXPOSE 3001
|
||||
|
||||
HEALTHCHECK CMD curl -f http://localhost:3001/health || exit 1
|
||||
|
||||
ENTRYPOINT ["dumb-init", "--"]
|
||||
CMD ["node", "dist/mcp/index.js"]
|
||||
```
|
||||
|
||||
#### 3.2 Create `docker-compose.n8n.yml`
|
||||
|
||||
```yaml
|
||||
# docker-compose.n8n.yml - Simple stack for n8n + n8n-mcp
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
n8n:
|
||||
image: n8nio/n8n:latest
|
||||
container_name: n8n
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "5678:5678"
|
||||
environment:
|
||||
- N8N_BASIC_AUTH_ACTIVE=${N8N_BASIC_AUTH_ACTIVE:-true}
|
||||
- N8N_BASIC_AUTH_USER=${N8N_USER:-admin}
|
||||
- N8N_BASIC_AUTH_PASSWORD=${N8N_PASSWORD:-changeme}
|
||||
- N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE=true
|
||||
volumes:
|
||||
- n8n_data:/home/node/.n8n
|
||||
networks:
|
||||
- n8n-net
|
||||
depends_on:
|
||||
n8n-mcp:
|
||||
condition: service_healthy
|
||||
|
||||
n8n-mcp:
|
||||
image: ghcr.io/${GITHUB_USER:-czlonkowski}/n8n-mcp-n8n:latest
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.n8n
|
||||
container_name: n8n-mcp
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- MCP_MODE=http
|
||||
- N8N_MODE=true
|
||||
- AUTH_TOKEN=${MCP_AUTH_TOKEN}
|
||||
- NODE_ENV=production
|
||||
- HTTP_PORT=3001
|
||||
networks:
|
||||
- n8n-net
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3001/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
networks:
|
||||
n8n-net:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
n8n_data:
|
||||
```
|
||||
|
||||
#### 3.3 Create `.env.n8n.example`
|
||||
|
||||
```bash
|
||||
# .env.n8n.example - Copy to .env and configure
|
||||
|
||||
# n8n Configuration
|
||||
N8N_USER=admin
|
||||
N8N_PASSWORD=changeme
|
||||
N8N_BASIC_AUTH_ACTIVE=true
|
||||
|
||||
# MCP Configuration
|
||||
# Generate with: openssl rand -base64 32
|
||||
MCP_AUTH_TOKEN=your-secure-token-minimum-32-characters
|
||||
|
||||
# GitHub username for image registry
|
||||
GITHUB_USER=czlonkowski
|
||||
```
|
||||
|
||||
### Step 4: Create GitHub Actions Workflow
|
||||
|
||||
#### 4.1 Create `.github/workflows/docker-build-n8n.yml`
|
||||
|
||||
```yaml
|
||||
name: Build n8n Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
tags: ['v*']
|
||||
paths:
|
||||
- 'src/**'
|
||||
- 'package*.json'
|
||||
- 'Dockerfile.n8n'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}-n8n
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
|
||||
- uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: docker/metadata-action@v5
|
||||
id: meta
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile.n8n
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
```
|
||||
|
||||
### Step 5: Testing
|
||||
|
||||
#### 5.1 Unit Tests for n8n Mode
|
||||
|
||||
Create `tests/unit/http-server-n8n-mode.test.ts`:
|
||||
|
||||
```typescript
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import request from 'supertest';
|
||||
|
||||
describe('n8n Mode', () => {
|
||||
it('should return protocol version on GET /mcp', async () => {
|
||||
process.env.N8N_MODE = 'true';
|
||||
const app = await createTestApp();
|
||||
|
||||
const response = await request(app)
|
||||
.get('/mcp')
|
||||
.expect(200);
|
||||
|
||||
expect(response.body.protocolVersion).toBe('2024-11-05');
|
||||
expect(response.body.serverInfo.capabilities.tools).toBe(true);
|
||||
});
|
||||
|
||||
it('should include session ID in response headers', async () => {
|
||||
process.env.N8N_MODE = 'true';
|
||||
const app = await createTestApp();
|
||||
|
||||
const response = await request(app)
|
||||
.post('/mcp')
|
||||
.set('Authorization', 'Bearer test-token')
|
||||
.send({ jsonrpc: '2.0', method: 'initialize', id: 1 });
|
||||
|
||||
expect(response.headers['mcp-session-id']).toBeDefined();
|
||||
});
|
||||
|
||||
it('should format errors as JSON-RPC', async () => {
|
||||
process.env.N8N_MODE = 'true';
|
||||
const app = await createTestApp();
|
||||
|
||||
const response = await request(app)
|
||||
.post('/mcp')
|
||||
.send({ invalid: 'request' })
|
||||
.expect(500);
|
||||
|
||||
expect(response.body.jsonrpc).toBe('2.0');
|
||||
expect(response.body.error.code).toBe(-32603);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
#### 5.2 Quick Deployment Script
|
||||
|
||||
Create `deploy/quick-deploy-n8n.sh`:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "🚀 Quick Deploy n8n + n8n-mcp"
|
||||
|
||||
# Check prerequisites
|
||||
command -v docker >/dev/null 2>&1 || { echo "Docker required"; exit 1; }
|
||||
command -v docker-compose >/dev/null 2>&1 || { echo "Docker Compose required"; exit 1; }
|
||||
|
||||
# Generate auth token if not exists
|
||||
if [ ! -f .env ]; then
|
||||
cp .env.n8n.example .env
|
||||
TOKEN=$(openssl rand -base64 32)
|
||||
sed -i "s/your-secure-token-minimum-32-characters/$TOKEN/" .env
|
||||
echo "Generated MCP_AUTH_TOKEN: $TOKEN"
|
||||
fi
|
||||
|
||||
# Deploy
|
||||
docker-compose -f docker-compose.n8n.yml up -d
|
||||
|
||||
echo ""
|
||||
echo "✅ Deployment complete!"
|
||||
echo ""
|
||||
echo "📋 Next steps:"
|
||||
echo "1. Access n8n at http://localhost:5678"
|
||||
echo " Username: admin (or check .env)"
|
||||
echo " Password: changeme (or check .env)"
|
||||
echo ""
|
||||
echo "2. Create a workflow with MCP Client Tool:"
|
||||
echo " - Server URL: http://n8n-mcp:3001/mcp"
|
||||
echo " - Authentication: Bearer Token"
|
||||
echo " - Token: Check .env file for MCP_AUTH_TOKEN"
|
||||
echo ""
|
||||
echo "📊 View logs: docker-compose -f docker-compose.n8n.yml logs -f"
|
||||
echo "🛑 Stop: docker-compose -f docker-compose.n8n.yml down"
|
||||
```
|
||||
|
||||
## Implementation Checklist (Simplified)
|
||||
|
||||
### Code Changes
|
||||
- [ ] Add N8N_MODE flag to `http-server-single-session.ts`
|
||||
- [ ] Add protocol version endpoint (GET /mcp) when N8N_MODE=true
|
||||
- [ ] Add Mcp-Session-Id header to responses
|
||||
- [ ] Update error responses to JSON-RPC format when N8N_MODE=true
|
||||
- [ ] Add npm script `start:n8n` to package.json
|
||||
|
||||
### Docker Infrastructure
|
||||
- [ ] Create `Dockerfile.n8n` for n8n-specific image
|
||||
- [ ] Create `docker-compose.n8n.yml` for simple deployment
|
||||
- [ ] Create `.env.n8n.example` template
|
||||
- [ ] Create GitHub Actions workflow `docker-build-n8n.yml`
|
||||
- [ ] Create `deploy/quick-deploy-n8n.sh` script
|
||||
|
||||
### Testing
|
||||
- [ ] Write unit tests for n8n mode functionality
|
||||
- [ ] Test with actual n8n MCP Client Tool
|
||||
- [ ] Verify protocol version endpoint
|
||||
- [ ] Test authentication flow
|
||||
- [ ] Validate error formatting
|
||||
|
||||
### Documentation
|
||||
- [ ] Update README with n8n deployment section
|
||||
- [ ] Document N8N_MODE environment variable
|
||||
- [ ] Add troubleshooting guide for common issues
|
||||
|
||||
## Quick Start Guide
|
||||
|
||||
### 1. One-Command Deployment
|
||||
|
||||
```bash
|
||||
# Clone and deploy
|
||||
git clone https://github.com/czlonkowski/n8n-mcp.git
|
||||
cd n8n-mcp
|
||||
./deploy/quick-deploy-n8n.sh
|
||||
```
|
||||
|
||||
### 2. Manual Configuration in n8n
|
||||
|
||||
After deployment, configure the MCP Client Tool in n8n:
|
||||
|
||||
1. Open n8n at `http://localhost:5678`
|
||||
2. Create a new workflow
|
||||
3. Add "MCP Client Tool" node (under AI category)
|
||||
4. Configure:
|
||||
- **Server URL**: `http://n8n-mcp:3001/mcp`
|
||||
- **Authentication**: Bearer Token
|
||||
- **Token**: Check your `.env` file for MCP_AUTH_TOKEN
|
||||
5. Select a tool (e.g., `list_nodes`)
|
||||
6. Execute the workflow
|
||||
|
||||
### 3. Production Deployment
|
||||
|
||||
For production with SSL, use a reverse proxy:
|
||||
|
||||
```nginx
|
||||
# nginx configuration
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name n8n.yourdomain.com;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:5678;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The MCP server should remain internal only - n8n connects via Docker network.
|
||||
|
||||
## Success Criteria
|
||||
|
||||
The implementation is successful when:
|
||||
|
||||
1. **Minimal Code Changes**: Only ~20 lines added to existing server
|
||||
2. **Protocol Compliance**: GET /mcp returns correct protocol version
|
||||
3. **n8n Connection**: MCP Client Tool connects successfully
|
||||
4. **Tool Execution**: Tools work without modification
|
||||
5. **Backward Compatible**: Existing Claude Desktop usage unaffected
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **"Protocol version mismatch"**
|
||||
- Ensure N8N_MODE=true is set
|
||||
- Check GET /mcp returns "2024-11-05"
|
||||
|
||||
2. **"Authentication failed"**
|
||||
- Verify AUTH_TOKEN matches in .env and n8n
|
||||
- Token must be 32+ characters
|
||||
- Use "Bearer Token" auth type in n8n
|
||||
|
||||
3. **"Connection refused"**
|
||||
- Check containers are on same network
|
||||
- Use internal hostname: `http://n8n-mcp:3001/mcp`
|
||||
- Verify health check passes
|
||||
|
||||
4. **Testing the Setup**
|
||||
```bash
|
||||
# Check protocol version
|
||||
docker exec n8n-mcp curl http://localhost:3001/mcp
|
||||
|
||||
# View logs
|
||||
docker-compose -f docker-compose.n8n.yml logs -f n8n-mcp
|
||||
```
|
||||
|
||||
## Summary
|
||||
|
||||
This simplified approach:
|
||||
- **Extends existing code** rather than creating new architecture
|
||||
- **Adds n8n compatibility** with minimal changes
|
||||
- **Uses separate Docker image** for clean deployment
|
||||
- **Maintains backward compatibility** for existing users
|
||||
- **Avoids overengineering** with simple, practical solutions
|
||||
|
||||
Total implementation effort: ~2-3 hours (vs. 2-3 days for multi-session approach)
|
||||
@@ -1,146 +0,0 @@
|
||||
# Test Artifacts Documentation
|
||||
|
||||
This document describes the comprehensive test result artifact storage system implemented in the n8n-mcp project.
|
||||
|
||||
## Overview
|
||||
|
||||
The test artifact system captures, stores, and presents test results in multiple formats to facilitate debugging, analysis, and historical tracking of test performance.
|
||||
|
||||
## Artifact Types
|
||||
|
||||
### 1. Test Results
|
||||
- **JUnit XML** (`test-results/junit.xml`): Standard format for CI integration
|
||||
- **JSON Results** (`test-results/results.json`): Detailed test data for analysis
|
||||
- **HTML Report** (`test-results/html/index.html`): Interactive test report
|
||||
- **Test Summary** (`test-summary.md`): Markdown summary for PR comments
|
||||
|
||||
### 2. Coverage Reports
|
||||
- **LCOV** (`coverage/lcov.info`): Standard coverage format
|
||||
- **HTML Coverage** (`coverage/html/index.html`): Interactive coverage browser
|
||||
- **Coverage Summary** (`coverage/coverage-summary.json`): JSON coverage data
|
||||
|
||||
### 3. Benchmark Results
|
||||
- **Benchmark JSON** (`benchmark-results.json`): Raw benchmark data
|
||||
- **Comparison Reports** (`benchmark-comparison.md`): PR benchmark comparisons
|
||||
|
||||
### 4. Detailed Reports
|
||||
- **HTML Report** (`test-reports/report.html`): Comprehensive styled report
|
||||
- **Markdown Report** (`test-reports/report.md`): Full markdown report
|
||||
- **JSON Report** (`test-reports/report.json`): Complete test data
|
||||
|
||||
## GitHub Actions Integration
|
||||
|
||||
### Test Workflow (`test.yml`)
|
||||
|
||||
The main test workflow:
|
||||
1. Runs tests with coverage using multiple reporters
|
||||
2. Generates test summaries and detailed reports
|
||||
3. Uploads artifacts with metadata
|
||||
4. Posts summaries to PRs
|
||||
5. Creates a combined artifact index
|
||||
|
||||
### Benchmark PR Workflow (`benchmark-pr.yml`)
|
||||
|
||||
For pull requests:
|
||||
1. Runs benchmarks on PR branch
|
||||
2. Runs benchmarks on base branch
|
||||
3. Compares results
|
||||
4. Posts comparison to PR
|
||||
5. Sets status checks for regressions
|
||||
|
||||
## Artifact Retention
|
||||
|
||||
- **Test Results**: 30 days
|
||||
- **Coverage Reports**: 30 days
|
||||
- **Benchmark Results**: 30 days
|
||||
- **Combined Results**: 90 days
|
||||
- **Test Metadata**: 30 days
|
||||
|
||||
## PR Comment Integration
|
||||
|
||||
The system automatically:
|
||||
- Posts test summaries to PR comments
|
||||
- Updates existing comments instead of creating duplicates
|
||||
- Includes links to full artifacts
|
||||
- Shows coverage and benchmark changes
|
||||
|
||||
## Job Summary
|
||||
|
||||
Each workflow run includes a job summary with:
|
||||
- Test results overview
|
||||
- Coverage summary
|
||||
- Benchmark results
|
||||
- Direct links to download artifacts
|
||||
|
||||
## Local Development
|
||||
|
||||
### Running Tests with Reports
|
||||
|
||||
```bash
|
||||
# Run tests with all reporters
|
||||
CI=true npm run test:coverage
|
||||
|
||||
# Generate detailed reports
|
||||
node scripts/generate-detailed-reports.js
|
||||
|
||||
# Generate test summary
|
||||
node scripts/generate-test-summary.js
|
||||
|
||||
# Compare benchmarks
|
||||
node scripts/compare-benchmarks.js benchmark-results.json benchmark-baseline.json
|
||||
```
|
||||
|
||||
### Report Locations
|
||||
|
||||
When running locally, reports are generated in:
|
||||
- `test-results/` - Vitest outputs
|
||||
- `test-reports/` - Detailed reports
|
||||
- `coverage/` - Coverage reports
|
||||
- Root directory - Summary files
|
||||
|
||||
## Report Formats
|
||||
|
||||
### HTML Report Features
|
||||
- Responsive design
|
||||
- Test suite breakdown
|
||||
- Failed test details with error messages
|
||||
- Coverage visualization with progress bars
|
||||
- Benchmark performance metrics
|
||||
- Sortable tables
|
||||
|
||||
### Markdown Report Features
|
||||
- GitHub-compatible formatting
|
||||
- Summary statistics
|
||||
- Failed test listings
|
||||
- Coverage breakdown
|
||||
- Benchmark comparisons
|
||||
|
||||
### JSON Report Features
|
||||
- Complete test data
|
||||
- Programmatic access
|
||||
- Historical comparison
|
||||
- CI/CD integration
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Always Check Artifacts**: When tests fail in CI, download and review the HTML report
|
||||
2. **Monitor Coverage**: Use the coverage reports to identify untested code
|
||||
3. **Track Benchmarks**: Review benchmark comparisons on performance-critical PRs
|
||||
4. **Archive Important Runs**: Download artifacts from significant releases
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Missing Artifacts
|
||||
- Check if tests ran to completion
|
||||
- Verify artifact upload steps executed
|
||||
- Check retention period hasn't expired
|
||||
|
||||
### Report Generation Failures
|
||||
- Ensure all dependencies are installed
|
||||
- Check for valid test/coverage output files
|
||||
- Review workflow logs for errors
|
||||
|
||||
### PR Comment Issues
|
||||
- Verify GitHub Actions permissions
|
||||
- Check bot authentication
|
||||
- Review comment posting logs
|
||||
@@ -1,935 +0,0 @@
|
||||
# n8n-MCP Testing Architecture
|
||||
|
||||
## Overview
|
||||
|
||||
This document describes the comprehensive testing infrastructure implemented for the n8n-MCP project. The testing suite includes 3,336 tests split between unit and integration tests, benchmarks, and a complete CI/CD pipeline ensuring code quality and reliability.
|
||||
|
||||
### Test Suite Statistics (October 2025)
|
||||
|
||||
- **Total Tests**: 3,336 tests
|
||||
- **Unit Tests**: 2,766 tests - Isolated component testing with mocks
|
||||
- **Integration Tests**: 570 tests - Full system behavior validation
|
||||
- n8n API Integration: 172 tests (all 18 MCP handler tools)
|
||||
- MCP Protocol: 119 tests (protocol compliance, session management)
|
||||
- Database: 226 tests (repository operations, transactions, FTS5)
|
||||
- Templates: 35 tests (fetching, storage, metadata)
|
||||
- Docker: 18 tests (configuration, security)
|
||||
- **Test Files**:
|
||||
- 106 unit test files
|
||||
- 41 integration test files
|
||||
- Total: 147 test files
|
||||
- **Test Execution Time**:
|
||||
- Unit tests: ~2 minutes with coverage
|
||||
- Integration tests: ~30 seconds
|
||||
- Total CI time: ~3 minutes
|
||||
- **Success Rate**: 100% (all tests passing in CI)
|
||||
- **CI/CD Pipeline**: Fully automated with GitHub Actions
|
||||
- **Test Artifacts**: JUnit XML, coverage reports, benchmark results
|
||||
- **Parallel Execution**: Configurable with thread pool
|
||||
|
||||
## Testing Framework: Vitest
|
||||
|
||||
We use **Vitest** as our primary testing framework, chosen for its:
|
||||
- **Speed**: Native ESM support and fast execution
|
||||
- **TypeScript Integration**: First-class TypeScript support
|
||||
- **Watch Mode**: Instant feedback during development
|
||||
- **Jest Compatibility**: Easy migration from Jest
|
||||
- **Built-in Mocking**: Powerful mocking capabilities
|
||||
- **Coverage**: Integrated code coverage with v8
|
||||
|
||||
### Configuration
|
||||
|
||||
```typescript
|
||||
// vitest.config.ts
|
||||
export default defineConfig({
|
||||
test: {
|
||||
globals: true,
|
||||
environment: 'node',
|
||||
setupFiles: ['./tests/setup/global-setup.ts'],
|
||||
pool: 'threads',
|
||||
poolOptions: {
|
||||
threads: {
|
||||
singleThread: process.env.TEST_PARALLEL !== 'true',
|
||||
maxThreads: parseInt(process.env.TEST_MAX_WORKERS || '4', 10)
|
||||
}
|
||||
},
|
||||
coverage: {
|
||||
provider: 'v8',
|
||||
reporter: ['lcov', 'html', 'text-summary'],
|
||||
exclude: ['node_modules/', 'tests/', '**/*.test.ts', 'scripts/']
|
||||
}
|
||||
},
|
||||
resolve: {
|
||||
alias: {
|
||||
'@': path.resolve(__dirname, './src'),
|
||||
'@tests': path.resolve(__dirname, './tests')
|
||||
}
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
tests/
|
||||
├── unit/ # Unit tests with mocks (2,766 tests, 106 files)
|
||||
│ ├── __mocks__/ # Mock implementations
|
||||
│ │ └── n8n-nodes-base.test.ts
|
||||
│ ├── database/ # Database layer tests
|
||||
│ │ ├── database-adapter-unit.test.ts
|
||||
│ │ ├── node-repository-core.test.ts
|
||||
│ │ └── template-repository-core.test.ts
|
||||
│ ├── docker/ # Docker configuration tests
|
||||
│ │ ├── config-security.test.ts
|
||||
│ │ ├── edge-cases.test.ts
|
||||
│ │ ├── parse-config.test.ts
|
||||
│ │ └── serve-command.test.ts
|
||||
│ ├── http-server/ # HTTP server tests
|
||||
│ │ └── multi-tenant-support.test.ts
|
||||
│ ├── loaders/ # Node loader tests
|
||||
│ │ └── node-loader.test.ts
|
||||
│ ├── mappers/ # Data mapper tests
|
||||
│ │ └── docs-mapper.test.ts
|
||||
│ ├── mcp/ # MCP server and tools tests
|
||||
│ │ ├── handlers-n8n-manager.test.ts
|
||||
│ │ ├── handlers-workflow-diff.test.ts
|
||||
│ │ ├── tools-documentation.test.ts
|
||||
│ │ └── tools.test.ts
|
||||
│ ├── parsers/ # Parser tests
|
||||
│ │ ├── node-parser.test.ts
|
||||
│ │ ├── property-extractor.test.ts
|
||||
│ │ └── simple-parser.test.ts
|
||||
│ ├── scripts/ # Script tests
|
||||
│ │ └── fetch-templates-extraction.test.ts
|
||||
│ ├── services/ # Service layer tests (largest test suite)
|
||||
│ │ ├── config-validator.test.ts
|
||||
│ │ ├── enhanced-config-validator.test.ts
|
||||
│ │ ├── example-generator.test.ts
|
||||
│ │ ├── expression-validator.test.ts
|
||||
│ │ ├── n8n-api-client.test.ts
|
||||
│ │ ├── n8n-validation.test.ts
|
||||
│ │ ├── node-specific-validators.test.ts
|
||||
│ │ ├── property-dependencies.test.ts
|
||||
│ │ ├── property-filter.test.ts
|
||||
│ │ ├── task-templates.test.ts
|
||||
│ │ ├── workflow-diff-engine.test.ts
|
||||
│ │ ├── workflow-validator-comprehensive.test.ts
|
||||
│ │ └── workflow-validator.test.ts
|
||||
│ ├── telemetry/ # Telemetry tests
|
||||
│ │ └── telemetry-manager.test.ts
|
||||
│ └── utils/ # Utility function tests
|
||||
│ ├── cache-utils.test.ts
|
||||
│ └── database-utils.test.ts
|
||||
├── integration/ # Integration tests (570 tests, 41 files)
|
||||
│ ├── n8n-api/ # n8n API integration tests (172 tests, 18 files)
|
||||
│ │ ├── executions/ # Execution management tests
|
||||
│ │ │ ├── get-execution.test.ts
|
||||
│ │ │ └── list-executions.test.ts
|
||||
│ │ ├── system/ # System tool tests
|
||||
│ │ │ ├── diagnostic.test.ts
|
||||
│ │ │ ├── health-check.test.ts
|
||||
│ │ │ └── list-tools.test.ts
|
||||
│ │ ├── utils/ # Test utilities
|
||||
│ │ │ ├── mcp-context.ts
|
||||
│ │ │ └── response-types.ts
|
||||
│ │ └── workflows/ # Workflow management tests
|
||||
│ │ ├── autofix-workflow.test.ts
|
||||
│ │ ├── create-workflow.test.ts
|
||||
│ │ ├── delete-workflow.test.ts
|
||||
│ │ ├── get-workflow-details.test.ts
|
||||
│ │ ├── get-workflow-minimal.test.ts
|
||||
│ │ ├── get-workflow-structure.test.ts
|
||||
│ │ ├── get-workflow.test.ts
|
||||
│ │ ├── list-workflows.test.ts
|
||||
│ │ ├── update-full-workflow.test.ts
|
||||
│ │ ├── update-partial-workflow.test.ts
|
||||
│ │ └── validate-workflow.test.ts
|
||||
│ ├── database/ # Database integration tests (226 tests)
|
||||
│ │ ├── connection-management.test.ts
|
||||
│ │ ├── fts5-search.test.ts
|
||||
│ │ ├── node-repository.test.ts
|
||||
│ │ ├── performance.test.ts
|
||||
│ │ ├── template-node-configs.test.ts
|
||||
│ │ ├── template-repository.test.ts
|
||||
│ │ └── transactions.test.ts
|
||||
│ ├── docker/ # Docker integration tests (18 tests)
|
||||
│ │ ├── docker-config.test.ts
|
||||
│ │ └── docker-entrypoint.test.ts
|
||||
│ ├── mcp-protocol/ # MCP protocol tests (119 tests)
|
||||
│ │ ├── basic-connection.test.ts
|
||||
│ │ ├── error-handling.test.ts
|
||||
│ │ ├── performance.test.ts
|
||||
│ │ ├── protocol-compliance.test.ts
|
||||
│ │ ├── session-management.test.ts
|
||||
│ │ ├── tool-invocation.test.ts
|
||||
│ │ └── workflow-error-validation.test.ts
|
||||
│ ├── templates/ # Template tests (35 tests)
|
||||
│ │ └── metadata-operations.test.ts
|
||||
│ └── setup/ # Integration test setup
|
||||
│ ├── integration-setup.ts
|
||||
│ └── msw-test-server.ts
|
||||
├── benchmarks/ # Performance benchmarks
|
||||
│ ├── database-queries.bench.ts
|
||||
│ └── sample.bench.ts
|
||||
├── setup/ # Global test configuration
|
||||
│ ├── global-setup.ts # Global test setup
|
||||
│ ├── msw-setup.ts # Mock Service Worker setup
|
||||
│ └── test-env.ts # Test environment configuration
|
||||
├── utils/ # Test utilities
|
||||
│ ├── assertions.ts # Custom assertions
|
||||
│ ├── builders/ # Test data builders
|
||||
│ │ └── workflow.builder.ts
|
||||
│ ├── data-generators.ts # Test data generators
|
||||
│ ├── database-utils.ts # Database test utilities
|
||||
│ └── test-helpers.ts # General test helpers
|
||||
├── mocks/ # Mock implementations
|
||||
│ └── n8n-api/ # n8n API mocks
|
||||
│ ├── handlers.ts # MSW request handlers
|
||||
│ └── data/ # Mock data
|
||||
└── fixtures/ # Test fixtures
|
||||
├── database/ # Database fixtures
|
||||
├── factories/ # Data factories
|
||||
└── workflows/ # Workflow fixtures
|
||||
```
|
||||
|
||||
## Mock Strategy
|
||||
|
||||
### 1. Mock Service Worker (MSW) for API Mocking
|
||||
|
||||
We use MSW for intercepting and mocking HTTP requests:
|
||||
|
||||
```typescript
|
||||
// tests/mocks/n8n-api/handlers.ts
|
||||
import { http, HttpResponse } from 'msw';
|
||||
|
||||
export const handlers = [
|
||||
// Workflow endpoints
|
||||
http.get('*/workflows/:id', ({ params }) => {
|
||||
const workflow = mockWorkflows.find(w => w.id === params.id);
|
||||
if (!workflow) {
|
||||
return new HttpResponse(null, { status: 404 });
|
||||
}
|
||||
return HttpResponse.json(workflow);
|
||||
}),
|
||||
|
||||
// Execution endpoints
|
||||
http.post('*/workflows/:id/run', async ({ params, request }) => {
|
||||
const body = await request.json();
|
||||
return HttpResponse.json({
|
||||
executionId: generateExecutionId(),
|
||||
status: 'running'
|
||||
});
|
||||
})
|
||||
];
|
||||
```
|
||||
|
||||
### 2. Database Mocking
|
||||
|
||||
For unit tests, we mock the database layer:
|
||||
|
||||
```typescript
|
||||
// tests/unit/__mocks__/better-sqlite3.ts
|
||||
import { vi } from 'vitest';
|
||||
|
||||
export default vi.fn(() => ({
|
||||
prepare: vi.fn(() => ({
|
||||
all: vi.fn().mockReturnValue([]),
|
||||
get: vi.fn().mockReturnValue(undefined),
|
||||
run: vi.fn().mockReturnValue({ changes: 1 }),
|
||||
finalize: vi.fn()
|
||||
})),
|
||||
exec: vi.fn(),
|
||||
close: vi.fn(),
|
||||
pragma: vi.fn()
|
||||
}));
|
||||
```
|
||||
|
||||
### 3. MCP SDK Mocking
|
||||
|
||||
For testing MCP protocol interactions:
|
||||
|
||||
```typescript
|
||||
// tests/integration/mcp-protocol/test-helpers.ts
|
||||
export class TestableN8NMCPServer extends N8NMCPServer {
|
||||
private transports = new Set<Transport>();
|
||||
|
||||
async connectToTransport(transport: Transport): Promise<void> {
|
||||
this.transports.add(transport);
|
||||
await this.connect(transport);
|
||||
}
|
||||
|
||||
async close(): Promise<void> {
|
||||
for (const transport of this.transports) {
|
||||
await transport.close();
|
||||
}
|
||||
this.transports.clear();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Test Patterns and Utilities
|
||||
|
||||
### 1. Database Test Utilities
|
||||
|
||||
```typescript
|
||||
// tests/utils/database-utils.ts
|
||||
export class TestDatabase {
|
||||
constructor(options: TestDatabaseOptions = {}) {
|
||||
this.options = {
|
||||
mode: 'memory',
|
||||
enableFTS5: true,
|
||||
...options
|
||||
};
|
||||
}
|
||||
|
||||
async initialize(): Promise<Database.Database> {
|
||||
const db = this.options.mode === 'memory'
|
||||
? new Database(':memory:')
|
||||
: new Database(this.dbPath);
|
||||
|
||||
if (this.options.enableFTS5) {
|
||||
await this.enableFTS5(db);
|
||||
}
|
||||
|
||||
return db;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Data Generators
|
||||
|
||||
```typescript
|
||||
// tests/utils/data-generators.ts
|
||||
export class TestDataGenerator {
|
||||
static generateNode(overrides: Partial<ParsedNode> = {}): ParsedNode {
|
||||
return {
|
||||
nodeType: `test.node${faker.number.int()}`,
|
||||
displayName: faker.commerce.productName(),
|
||||
description: faker.lorem.sentence(),
|
||||
properties: this.generateProperties(5),
|
||||
...overrides
|
||||
};
|
||||
}
|
||||
|
||||
static generateWorkflow(nodeCount = 3): any {
|
||||
const nodes = Array.from({ length: nodeCount }, (_, i) => ({
|
||||
id: `node_${i}`,
|
||||
type: 'test.node',
|
||||
position: [i * 100, 0],
|
||||
parameters: {}
|
||||
}));
|
||||
|
||||
return { nodes, connections: {} };
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Custom Assertions
|
||||
|
||||
```typescript
|
||||
// tests/utils/assertions.ts
|
||||
export function expectValidMCPResponse(response: any): void {
|
||||
expect(response).toBeDefined();
|
||||
expect(response.content).toBeDefined();
|
||||
expect(Array.isArray(response.content)).toBe(true);
|
||||
expect(response.content[0]).toHaveProperty('type', 'text');
|
||||
expect(response.content[0]).toHaveProperty('text');
|
||||
}
|
||||
|
||||
export function expectNodeStructure(node: any): void {
|
||||
expect(node).toHaveProperty('nodeType');
|
||||
expect(node).toHaveProperty('displayName');
|
||||
expect(node).toHaveProperty('properties');
|
||||
expect(Array.isArray(node.properties)).toBe(true);
|
||||
}
|
||||
```
|
||||
|
||||
## Unit Testing
|
||||
|
||||
Our unit tests focus on testing individual components in isolation with mocked dependencies:
|
||||
|
||||
### Service Layer Tests
|
||||
|
||||
The bulk of our unit tests (400+ tests) are in the services layer:
|
||||
|
||||
```typescript
|
||||
// tests/unit/services/workflow-validator-comprehensive.test.ts
|
||||
describe('WorkflowValidator Comprehensive Tests', () => {
|
||||
it('should validate complex workflow with AI nodes', () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'ai_agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
parameters: { prompt: 'Analyze data' }
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = validator.validateWorkflow(workflow);
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Parser Tests
|
||||
|
||||
Testing the node parsing logic:
|
||||
|
||||
```typescript
|
||||
// tests/unit/parsers/property-extractor.test.ts
|
||||
describe('PropertyExtractor', () => {
|
||||
it('should extract nested properties correctly', () => {
|
||||
const node = {
|
||||
properties: [
|
||||
{
|
||||
displayName: 'Options',
|
||||
name: 'options',
|
||||
type: 'collection',
|
||||
options: [
|
||||
{ name: 'timeout', type: 'number' }
|
||||
]
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
const extracted = extractor.extractProperties(node);
|
||||
expect(extracted).toHaveProperty('options.timeout');
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Mock Testing
|
||||
|
||||
Testing our mock implementations:
|
||||
|
||||
```typescript
|
||||
// tests/unit/__mocks__/n8n-nodes-base.test.ts
|
||||
describe('n8n-nodes-base mock', () => {
|
||||
it('should provide mocked node definitions', () => {
|
||||
const httpNode = mockNodes['n8n-nodes-base.httpRequest'];
|
||||
expect(httpNode).toBeDefined();
|
||||
expect(httpNode.description.displayName).toBe('HTTP Request');
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Integration Testing
|
||||
|
||||
Our integration tests verify the complete system behavior across 570 tests in four major categories:
|
||||
|
||||
### n8n API Integration Testing (172 tests)
|
||||
|
||||
The n8n API integration tests verify all 18 MCP handler tools against a real n8n instance. These tests ensure our product layer (MCP handlers) work correctly end-to-end, not just the raw API client.
|
||||
|
||||
**Test Organization:**
|
||||
- **Workflows** (11 handlers): Create, read, update (full/partial), delete, list, validate, autofix
|
||||
- **Executions** (2 handlers): Get execution details, list executions
|
||||
- **System** (3 handlers): Health check, list available tools, diagnostics
|
||||
|
||||
**Example:**
|
||||
```typescript
|
||||
// tests/integration/n8n-api/workflows/create-workflow.test.ts
|
||||
describe('Integration: handleCreateWorkflow', () => {
|
||||
it('should create a simple two-node workflow', async () => {
|
||||
const response = await handleCreateWorkflow(
|
||||
{
|
||||
params: {
|
||||
arguments: {
|
||||
name: 'Test Workflow',
|
||||
nodes: [webhook, setNode],
|
||||
connections: { Webhook: { main: [[{ node: 'Set', type: 'main', index: 0 }]] } }
|
||||
}
|
||||
}
|
||||
},
|
||||
mcpContext
|
||||
);
|
||||
|
||||
expect(response.success).toBe(true);
|
||||
const workflow = response.data as WorkflowData;
|
||||
expect(workflow.id).toBeDefined();
|
||||
expect(workflow.nodes).toHaveLength(2);
|
||||
|
||||
// Cleanup
|
||||
await handleDeleteWorkflow({ params: { arguments: { id: workflow.id } } }, mcpContext);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
**Key Features Tested:**
|
||||
- Real workflow creation, modification, deletion with cleanup
|
||||
- TypeScript type safety with response interfaces
|
||||
- Complete coverage of all 18 n8n API tools
|
||||
- Proper error handling and edge cases
|
||||
- Response format validation
|
||||
|
||||
### MCP Protocol Testing (119 tests)
|
||||
|
||||
```typescript
|
||||
// tests/integration/mcp-protocol/tool-invocation.test.ts
|
||||
describe('MCP Tool Invocation', () => {
|
||||
let mcpServer: TestableN8NMCPServer;
|
||||
let client: Client;
|
||||
|
||||
beforeEach(async () => {
|
||||
mcpServer = new TestableN8NMCPServer();
|
||||
await mcpServer.initialize();
|
||||
|
||||
const [serverTransport, clientTransport] = InMemoryTransport.createLinkedPair();
|
||||
await mcpServer.connectToTransport(serverTransport);
|
||||
|
||||
client = new Client({ name: 'test-client', version: '1.0.0' }, {});
|
||||
await client.connect(clientTransport);
|
||||
});
|
||||
|
||||
it('should list nodes with filtering', async () => {
|
||||
const response = await client.callTool({
|
||||
name: 'list_nodes',
|
||||
arguments: { category: 'trigger', limit: 10 }
|
||||
});
|
||||
|
||||
expectValidMCPResponse(response);
|
||||
const result = JSON.parse(response.content[0].text);
|
||||
expect(result.nodes).toHaveLength(10);
|
||||
expect(result.nodes.every(n => n.category === 'trigger')).toBe(true);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Database Integration Testing (226 tests)
|
||||
|
||||
```typescript
|
||||
// tests/integration/database/fts5-search.test.ts
|
||||
describe('FTS5 Search Integration', () => {
|
||||
it('should perform fuzzy search', async () => {
|
||||
const results = await nodeRepo.searchNodes('HTT', 'FUZZY');
|
||||
|
||||
expect(results.some(n => n.nodeType.includes('httpRequest'))).toBe(true);
|
||||
expect(results.some(n => n.displayName.includes('HTTP'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle complex boolean queries', async () => {
|
||||
const results = await nodeRepo.searchNodes('webhook OR http', 'OR');
|
||||
|
||||
expect(results.length).toBeGreaterThan(0);
|
||||
expect(results.some(n =>
|
||||
n.description?.includes('webhook') ||
|
||||
n.description?.includes('http')
|
||||
)).toBe(true);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Template Integration Testing (35 tests)
|
||||
|
||||
Tests template fetching, storage, and metadata operations against the n8n.io API and local database.
|
||||
|
||||
### Docker Integration Testing (18 tests)
|
||||
|
||||
Tests Docker configuration parsing, entrypoint script, and security validation.
|
||||
|
||||
## Test Distribution and Coverage
|
||||
|
||||
### Test Distribution by Component
|
||||
|
||||
Based on our 3,336 tests:
|
||||
|
||||
**Integration Tests (570 tests):**
|
||||
1. **n8n API Integration** (172 tests)
|
||||
- Workflow management handlers: 11 tools with comprehensive scenarios
|
||||
- Execution management handlers: 2 tools
|
||||
- System tool handlers: 3 tools
|
||||
- TypeScript type safety with response interfaces
|
||||
|
||||
2. **Database Integration** (226 tests)
|
||||
- Repository operations and transactions
|
||||
- FTS5 full-text search with fuzzy matching
|
||||
- Performance and concurrent access tests
|
||||
- Template node configurations
|
||||
|
||||
3. **MCP Protocol** (119 tests)
|
||||
- Protocol compliance and session management
|
||||
- Tool invocation and error handling
|
||||
- Performance and stress testing
|
||||
- Workflow error validation
|
||||
|
||||
4. **Templates & Docker** (53 tests)
|
||||
- Template fetching and metadata operations
|
||||
- Docker configuration and security validation
|
||||
|
||||
**Unit Tests (2,766 tests):**
|
||||
1. **Services Layer** (largest suite)
|
||||
- `workflow-validator-comprehensive.test.ts`: 150+ tests
|
||||
- `enhanced-config-validator.test.ts`: 120+ tests
|
||||
- `node-specific-validators.test.ts`: 100+ tests
|
||||
- `n8n-api-client.test.ts`: 80+ tests
|
||||
- Config validation, property filtering, workflow diff engine
|
||||
|
||||
2. **Parsers** (~200 tests)
|
||||
- Node parsing with version support
|
||||
- Property extraction and documentation mapping
|
||||
- Simple parser for basic node information
|
||||
|
||||
3. **Database Layer** (~150 tests)
|
||||
- Repository core functionality with mocks
|
||||
- Database adapter unit tests
|
||||
- Template repository operations
|
||||
|
||||
4. **MCP Tools & HTTP Server** (~300 tests)
|
||||
- Tool definitions and documentation system
|
||||
- Multi-tenant support and security
|
||||
- Configuration validation
|
||||
|
||||
5. **Utils, Docker, Scripts, Telemetry** (remaining tests)
|
||||
- Cache utilities, database helpers
|
||||
- Docker config security and parsing
|
||||
- Template extraction scripts
|
||||
- Telemetry tracking
|
||||
|
||||
### Test Execution Performance
|
||||
|
||||
From our CI runs:
|
||||
- **Fastest tests**: Unit tests with mocks (<1ms each)
|
||||
- **Slowest tests**: Integration tests with real database and n8n API (100-5000ms)
|
||||
- **Average test time**: ~20ms per test
|
||||
- **Total suite execution**: ~3 minutes in CI (with coverage)
|
||||
- **Parallel execution**: Configurable thread pool for optimal performance
|
||||
|
||||
## CI/CD Pipeline
|
||||
|
||||
Our GitHub Actions workflow runs all tests automatically:
|
||||
|
||||
```yaml
|
||||
# .github/workflows/test.yml
|
||||
name: Test Suite
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Run unit tests with coverage
|
||||
run: npm run test:unit -- --coverage
|
||||
|
||||
- name: Run integration tests
|
||||
run: npm run test:integration
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
```
|
||||
|
||||
### Test Execution Scripts
|
||||
|
||||
```json
|
||||
// package.json
|
||||
{
|
||||
"scripts": {
|
||||
"test": "vitest",
|
||||
"test:unit": "vitest run tests/unit",
|
||||
"test:integration": "vitest run tests/integration --config vitest.config.integration.ts",
|
||||
"test:coverage": "vitest run --coverage",
|
||||
"test:watch": "vitest watch",
|
||||
"test:bench": "vitest bench --config vitest.config.benchmark.ts",
|
||||
"benchmark:ci": "CI=true node scripts/run-benchmarks-ci.js"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### CI Test Results Summary
|
||||
|
||||
From our latest CI run (#41):
|
||||
|
||||
```
|
||||
UNIT TESTS:
|
||||
Test Files 30 passed (30)
|
||||
Tests 932 passed | 1 skipped (933)
|
||||
|
||||
INTEGRATION TESTS:
|
||||
Test Files 14 passed (14)
|
||||
Tests 245 passed | 4 skipped (249)
|
||||
|
||||
TOTAL: 1,177 passed | 5 skipped | 0 failed
|
||||
```
|
||||
|
||||
## Performance Testing
|
||||
|
||||
We use Vitest's built-in benchmark functionality:
|
||||
|
||||
```typescript
|
||||
// tests/benchmarks/database-queries.bench.ts
|
||||
import { bench, describe } from 'vitest';
|
||||
|
||||
describe('Database Query Performance', () => {
|
||||
bench('search nodes by category', async () => {
|
||||
await nodeRepo.getNodesByCategory('trigger');
|
||||
});
|
||||
|
||||
bench('FTS5 search performance', async () => {
|
||||
await nodeRepo.searchNodes('webhook http request', 'AND');
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Environment Configuration
|
||||
|
||||
Test environment is configured via `.env.test`:
|
||||
|
||||
```bash
|
||||
# Test Environment Configuration
|
||||
NODE_ENV=test
|
||||
TEST_DB_PATH=:memory:
|
||||
TEST_PARALLEL=false
|
||||
TEST_MAX_WORKERS=4
|
||||
FEATURE_TEST_COVERAGE=true
|
||||
MSW_ENABLED=true
|
||||
```
|
||||
|
||||
## Key Patterns and Lessons Learned
|
||||
|
||||
### 1. Response Structure Consistency
|
||||
|
||||
All MCP responses follow a specific structure that must be handled correctly:
|
||||
|
||||
```typescript
|
||||
// Common pattern for handling MCP responses
|
||||
const response = await client.callTool({ name: 'list_nodes', arguments: {} });
|
||||
|
||||
// MCP responses have content array with text objects
|
||||
expect(response.content).toBeDefined();
|
||||
expect(response.content[0].type).toBe('text');
|
||||
|
||||
// Parse the actual data
|
||||
const data = JSON.parse(response.content[0].text);
|
||||
```
|
||||
|
||||
### 2. MSW Integration Setup
|
||||
|
||||
Proper MSW setup is crucial for integration tests:
|
||||
|
||||
```typescript
|
||||
// tests/integration/setup/integration-setup.ts
|
||||
import { setupServer } from 'msw/node';
|
||||
import { handlers } from '@tests/mocks/n8n-api/handlers';
|
||||
|
||||
// Create server but don't start it globally
|
||||
const server = setupServer(...handlers);
|
||||
|
||||
beforeAll(async () => {
|
||||
// Only start MSW for integration tests
|
||||
if (process.env.MSW_ENABLED === 'true') {
|
||||
server.listen({ onUnhandledRequest: 'bypass' });
|
||||
}
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
server.close();
|
||||
});
|
||||
```
|
||||
|
||||
### 3. Database Isolation for Parallel Tests
|
||||
|
||||
Each test gets its own database to enable parallel execution:
|
||||
|
||||
```typescript
|
||||
// tests/utils/database-utils.ts
|
||||
export function createTestDatabaseAdapter(
|
||||
db?: Database.Database,
|
||||
options: TestDatabaseOptions = {}
|
||||
): DatabaseAdapter {
|
||||
const database = db || new Database(':memory:');
|
||||
|
||||
// Enable FTS5 if needed
|
||||
if (options.enableFTS5) {
|
||||
database.exec('PRAGMA main.compile_options;');
|
||||
}
|
||||
|
||||
return new DatabaseAdapter(database);
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Environment-Aware Performance Thresholds
|
||||
|
||||
CI environments are slower, so we adjust expectations:
|
||||
|
||||
```typescript
|
||||
// Environment-aware thresholds
|
||||
const getThreshold = (local: number, ci: number) =>
|
||||
process.env.CI ? ci : local;
|
||||
|
||||
it('should respond quickly', async () => {
|
||||
const start = performance.now();
|
||||
await someOperation();
|
||||
const duration = performance.now() - start;
|
||||
|
||||
expect(duration).toBeLessThan(getThreshold(50, 200));
|
||||
});
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Test Isolation
|
||||
- Each test creates its own database instance
|
||||
- Tests clean up after themselves
|
||||
- No shared state between tests
|
||||
|
||||
### 2. Proper Cleanup Order
|
||||
```typescript
|
||||
afterEach(async () => {
|
||||
// Close client first to ensure no pending requests
|
||||
await client.close();
|
||||
|
||||
// Give time for client to fully close
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
|
||||
// Then close server
|
||||
await mcpServer.close();
|
||||
|
||||
// Finally cleanup database
|
||||
await testDb.cleanup();
|
||||
});
|
||||
```
|
||||
|
||||
### 3. Handle Async Operations Carefully
|
||||
```typescript
|
||||
// Avoid race conditions in cleanup
|
||||
it('should handle disconnection', async () => {
|
||||
// ... test code ...
|
||||
|
||||
// Ensure operations complete before cleanup
|
||||
await transport.close();
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
});
|
||||
```
|
||||
|
||||
### 4. Meaningful Test Organization
|
||||
- Group related tests using `describe` blocks
|
||||
- Use descriptive test names that explain the behavior
|
||||
- Follow AAA pattern: Arrange, Act, Assert
|
||||
- Keep tests focused on single behaviors
|
||||
|
||||
## Debugging Tests
|
||||
|
||||
### Running Specific Tests
|
||||
```bash
|
||||
# Run a single test file
|
||||
npm test tests/integration/mcp-protocol/tool-invocation.test.ts
|
||||
|
||||
# Run tests matching a pattern
|
||||
npm test -- --grep "should list nodes"
|
||||
|
||||
# Run with debugging output
|
||||
DEBUG=* npm test
|
||||
```
|
||||
|
||||
### VSCode Integration
|
||||
```json
|
||||
// .vscode/launch.json
|
||||
{
|
||||
"configurations": [
|
||||
{
|
||||
"type": "node",
|
||||
"request": "launch",
|
||||
"name": "Debug Tests",
|
||||
"program": "${workspaceFolder}/node_modules/vitest/vitest.mjs",
|
||||
"args": ["run", "${file}"],
|
||||
"console": "integratedTerminal"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Test Coverage
|
||||
|
||||
While we don't enforce strict coverage thresholds yet, the infrastructure is in place:
|
||||
- Coverage reports generated in `lcov`, `html`, and `text` formats
|
||||
- Integration with Codecov for tracking coverage over time
|
||||
- Per-file coverage visible in VSCode with extensions
|
||||
|
||||
## Future Improvements
|
||||
|
||||
1. **E2E Testing**: Add Playwright for testing the full MCP server interaction
|
||||
2. **Load Testing**: Implement k6 or Artillery for stress testing
|
||||
3. **Contract Testing**: Add Pact for ensuring API compatibility
|
||||
4. **Visual Regression**: For any UI components that may be added
|
||||
5. **Mutation Testing**: Use Stryker to ensure test quality
|
||||
|
||||
## Common Issues and Solutions
|
||||
|
||||
### 1. Tests Hanging in CI
|
||||
|
||||
**Problem**: Tests would hang indefinitely in CI due to `process.exit()` calls.
|
||||
|
||||
**Solution**: Remove all `process.exit()` calls from test code and use proper cleanup:
|
||||
```typescript
|
||||
// Bad
|
||||
afterAll(() => {
|
||||
process.exit(0); // This causes Vitest to hang
|
||||
});
|
||||
|
||||
// Good
|
||||
afterAll(async () => {
|
||||
await cleanup();
|
||||
// Let Vitest handle process termination
|
||||
});
|
||||
```
|
||||
|
||||
### 2. MCP Response Structure
|
||||
|
||||
**Problem**: Tests expecting wrong response format from MCP tools.
|
||||
|
||||
**Solution**: Always access responses through `content[0].text`:
|
||||
```typescript
|
||||
// Wrong
|
||||
const data = response[0].text;
|
||||
|
||||
// Correct
|
||||
const data = JSON.parse(response.content[0].text);
|
||||
```
|
||||
|
||||
### 3. Database Not Found Errors
|
||||
|
||||
**Problem**: Tests failing with "node not found" when database is empty.
|
||||
|
||||
**Solution**: Check for empty databases before assertions:
|
||||
```typescript
|
||||
const stats = await server.executeTool('get_database_statistics', {});
|
||||
if (stats.totalNodes > 0) {
|
||||
expect(result.nodes.length).toBeGreaterThan(0);
|
||||
} else {
|
||||
expect(result.nodes).toHaveLength(0);
|
||||
}
|
||||
```
|
||||
|
||||
### 4. MSW Loading Globally
|
||||
|
||||
**Problem**: MSW interfering with unit tests when loaded globally.
|
||||
|
||||
**Solution**: Only load MSW in integration test setup:
|
||||
```typescript
|
||||
// vitest.config.integration.ts
|
||||
setupFiles: [
|
||||
'./tests/setup/global-setup.ts',
|
||||
'./tests/integration/setup/integration-setup.ts' // MSW only here
|
||||
]
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
- [Vitest Documentation](https://vitest.dev/)
|
||||
- [MSW Documentation](https://mswjs.io/)
|
||||
- [Testing Best Practices](https://github.com/goldbergyoni/javascript-testing-best-practices)
|
||||
- [MCP SDK Documentation](https://modelcontextprotocol.io/)
|
||||
@@ -1,276 +0,0 @@
|
||||
# n8n-MCP Testing Implementation Checklist
|
||||
|
||||
## Test Suite Development Status
|
||||
|
||||
### Context
|
||||
- **Situation**: Building comprehensive test suite from scratch
|
||||
- **Branch**: feat/comprehensive-testing-suite (separate from main)
|
||||
- **Main Branch Status**: Working in production without tests
|
||||
- **Goal**: Add test coverage without disrupting development
|
||||
|
||||
## Immediate Actions (Day 1)
|
||||
|
||||
- [x] ~~Fix failing tests (Phase 0)~~ ✅ COMPLETED
|
||||
- [x] ~~Create GitHub Actions workflow file~~ ✅ COMPLETED
|
||||
- [x] ~~Install Vitest and remove Jest~~ ✅ COMPLETED
|
||||
- [x] ~~Create vitest.config.ts~~ ✅ COMPLETED
|
||||
- [x] ~~Setup global test configuration~~ ✅ COMPLETED
|
||||
- [x] ~~Migrate existing tests to Vitest syntax~~ ✅ COMPLETED
|
||||
- [x] ~~Setup coverage reporting with Codecov~~ ✅ COMPLETED
|
||||
|
||||
## Phase 1: Vitest Migration ✅ COMPLETED
|
||||
|
||||
All tests have been successfully migrated from Jest to Vitest:
|
||||
- ✅ Removed Jest and installed Vitest
|
||||
- ✅ Created vitest.config.ts with path aliases
|
||||
- ✅ Set up global test configuration
|
||||
- ✅ Migrated all 6 test files (68 tests passing)
|
||||
- ✅ Updated TypeScript configuration
|
||||
- ✅ Cleaned up Jest configuration files
|
||||
|
||||
## Week 1: Foundation
|
||||
|
||||
### Testing Infrastructure ✅ COMPLETED (Phase 2)
|
||||
- [x] ~~Create test directory structure~~ ✅ COMPLETED
|
||||
- [x] ~~Setup mock infrastructure for better-sqlite3~~ ✅ COMPLETED
|
||||
- [x] ~~Create mock for n8n-nodes-base package~~ ✅ COMPLETED
|
||||
- [x] ~~Setup test database utilities~~ ✅ COMPLETED
|
||||
- [x] ~~Create factory pattern for nodes~~ ✅ COMPLETED
|
||||
- [x] ~~Create builder pattern for workflows~~ ✅ COMPLETED
|
||||
- [x] ~~Setup global test utilities~~ ✅ COMPLETED
|
||||
- [x] ~~Configure test environment variables~~ ✅ COMPLETED
|
||||
|
||||
### CI/CD Pipeline ✅ COMPLETED (Phase 3.8)
|
||||
- [x] ~~GitHub Actions for test execution~~ ✅ COMPLETED & VERIFIED
|
||||
- Successfully running with Vitest
|
||||
- 1021 tests passing in CI
|
||||
- Build time: ~2 minutes
|
||||
- [x] ~~Coverage reporting integration~~ ✅ COMPLETED (Codecov setup)
|
||||
- [x] ~~Performance benchmark tracking~~ ✅ COMPLETED
|
||||
- [x] ~~Test result artifacts~~ ✅ COMPLETED
|
||||
- [ ] Branch protection rules
|
||||
- [ ] Required status checks
|
||||
|
||||
## Week 2: Mock Infrastructure
|
||||
|
||||
### Database Mocking
|
||||
- [ ] Complete better-sqlite3 mock implementation
|
||||
- [ ] Mock prepared statements
|
||||
- [ ] Mock transactions
|
||||
- [ ] Mock FTS5 search functionality
|
||||
- [ ] Test data seeding utilities
|
||||
|
||||
### External Dependencies
|
||||
- [ ] Mock axios for API calls
|
||||
- [ ] Mock file system operations
|
||||
- [ ] Mock MCP SDK
|
||||
- [ ] Mock Express server
|
||||
- [ ] Mock WebSocket connections
|
||||
|
||||
## Week 3-4: Unit Tests ✅ COMPLETED (Phase 3)
|
||||
|
||||
### Core Services (Priority 1) ✅ COMPLETED
|
||||
- [x] ~~`config-validator.ts` - 95% coverage~~ ✅ 96.9%
|
||||
- [x] ~~`enhanced-config-validator.ts` - 95% coverage~~ ✅ 94.55%
|
||||
- [x] ~~`workflow-validator.ts` - 90% coverage~~ ✅ 97.59%
|
||||
- [x] ~~`expression-validator.ts` - 90% coverage~~ ✅ 97.22%
|
||||
- [x] ~~`property-filter.ts` - 90% coverage~~ ✅ 95.25%
|
||||
- [x] ~~`example-generator.ts` - 85% coverage~~ ✅ 94.34%
|
||||
|
||||
### Parsers (Priority 2) ✅ COMPLETED
|
||||
- [x] ~~`node-parser.ts` - 90% coverage~~ ✅ 97.42%
|
||||
- [x] ~~`property-extractor.ts` - 90% coverage~~ ✅ 95.49%
|
||||
|
||||
### MCP Layer (Priority 3) ✅ COMPLETED
|
||||
- [x] ~~`tools.ts` - 90% coverage~~ ✅ 94.11%
|
||||
- [x] ~~`handlers-n8n-manager.ts` - 85% coverage~~ ✅ 92.71%
|
||||
- [x] ~~`handlers-workflow-diff.ts` - 85% coverage~~ ✅ 96.34%
|
||||
- [x] ~~`tools-documentation.ts` - 80% coverage~~ ✅ 94.12%
|
||||
|
||||
### Database Layer (Priority 4) ✅ COMPLETED
|
||||
- [x] ~~`node-repository.ts` - 85% coverage~~ ✅ 91.48%
|
||||
- [x] ~~`database-adapter.ts` - 85% coverage~~ ✅ 89.29%
|
||||
- [x] ~~`template-repository.ts` - 80% coverage~~ ✅ 86.78%
|
||||
|
||||
### Loaders and Mappers (Priority 5) ✅ COMPLETED
|
||||
- [x] ~~`node-loader.ts` - 85% coverage~~ ✅ 91.89%
|
||||
- [x] ~~`docs-mapper.ts` - 80% coverage~~ ✅ 95.45%
|
||||
|
||||
### Additional Critical Services Tested ✅ COMPLETED (Phase 3.5)
|
||||
- [x] ~~`n8n-api-client.ts`~~ ✅ 83.87%
|
||||
- [x] ~~`workflow-diff-engine.ts`~~ ✅ 90.06%
|
||||
- [x] ~~`n8n-validation.ts`~~ ✅ 97.14%
|
||||
- [x] ~~`node-specific-validators.ts`~~ ✅ 98.7%
|
||||
|
||||
## Week 5-6: Integration Tests 🚧 IN PROGRESS
|
||||
|
||||
### Real Status (July 29, 2025)
|
||||
**Context**: Building test suite from scratch on testing branch. Main branch has no tests.
|
||||
|
||||
**Overall Status**: 187/246 tests passing (76% pass rate)
|
||||
**Critical Issue**: CI shows green despite 58 failing tests due to `|| true` in workflow
|
||||
|
||||
### MCP Protocol Tests 🔄 MIXED STATUS
|
||||
- [x] ~~Full MCP server initialization~~ ✅ COMPLETED
|
||||
- [x] ~~Tool invocation flow~~ ✅ FIXED (30 tests in tool-invocation.test.ts)
|
||||
- [ ] Error handling and recovery ⚠️ 16 FAILING (error-handling.test.ts)
|
||||
- [x] ~~Concurrent request handling~~ ✅ COMPLETED
|
||||
- [ ] Session management ⚠️ 5 FAILING (timeout issues)
|
||||
|
||||
### n8n API Integration 🔄 PENDING
|
||||
- [ ] Workflow CRUD operations (MSW mocks ready)
|
||||
- [ ] Webhook triggering
|
||||
- [ ] Execution monitoring
|
||||
- [ ] Authentication handling
|
||||
- [ ] Error scenarios
|
||||
|
||||
### Database Integration ⚠️ ISSUES FOUND
|
||||
- [x] ~~SQLite operations with real DB~~ ✅ BASIC TESTS PASS
|
||||
- [ ] FTS5 search functionality ⚠️ 7 FAILING (syntax errors)
|
||||
- [ ] Transaction handling ⚠️ 1 FAILING (isolation issues)
|
||||
- [ ] Migration testing 🔄 NOT STARTED
|
||||
- [ ] Performance under load ⚠️ 4 FAILING (slower than thresholds)
|
||||
|
||||
## Week 7-8: E2E & Performance
|
||||
|
||||
### End-to-End Scenarios
|
||||
- [ ] Complete workflow creation flow
|
||||
- [ ] AI agent workflow setup
|
||||
- [ ] Template import and validation
|
||||
- [ ] Workflow execution monitoring
|
||||
- [ ] Error recovery scenarios
|
||||
|
||||
### Performance Benchmarks
|
||||
- [ ] Node loading speed (< 50ms per node)
|
||||
- [ ] Search performance (< 100ms for 1000 nodes)
|
||||
- [ ] Validation speed (< 10ms simple, < 100ms complex)
|
||||
- [ ] Database query performance
|
||||
- [ ] Memory usage profiling
|
||||
- [ ] Concurrent request handling
|
||||
|
||||
### Load Testing
|
||||
- [ ] 100 concurrent MCP requests
|
||||
- [ ] 10,000 nodes in database
|
||||
- [ ] 1,000 workflow validations/minute
|
||||
- [ ] Memory leak detection
|
||||
- [ ] Resource cleanup verification
|
||||
|
||||
## Testing Quality Gates
|
||||
|
||||
### Coverage Requirements
|
||||
- [ ] Overall: 80%+ (Currently: 62.67%)
|
||||
- [x] ~~Core services: 90%+~~ ✅ COMPLETED
|
||||
- [x] ~~MCP tools: 90%+~~ ✅ COMPLETED
|
||||
- [x] ~~Critical paths: 95%+~~ ✅ COMPLETED
|
||||
- [x] ~~New code: 90%+~~ ✅ COMPLETED
|
||||
|
||||
### Performance Requirements
|
||||
- [x] ~~All unit tests < 10ms~~ ✅ COMPLETED
|
||||
- [ ] Integration tests < 1s
|
||||
- [ ] E2E tests < 10s
|
||||
- [x] ~~Full suite < 5 minutes~~ ✅ COMPLETED (~2 minutes)
|
||||
- [x] ~~No memory leaks~~ ✅ COMPLETED
|
||||
|
||||
### Code Quality
|
||||
- [x] ~~No ESLint errors~~ ✅ COMPLETED
|
||||
- [x] ~~No TypeScript errors~~ ✅ COMPLETED
|
||||
- [x] ~~No console.log in tests~~ ✅ COMPLETED
|
||||
- [x] ~~All tests have descriptions~~ ✅ COMPLETED
|
||||
- [x] ~~No hardcoded values~~ ✅ COMPLETED
|
||||
|
||||
## Monitoring & Maintenance
|
||||
|
||||
### Daily
|
||||
- [ ] Check CI pipeline status
|
||||
- [ ] Review failed tests
|
||||
- [ ] Monitor flaky tests
|
||||
|
||||
### Weekly
|
||||
- [ ] Review coverage reports
|
||||
- [ ] Update test documentation
|
||||
- [ ] Performance benchmark review
|
||||
- [ ] Team sync on testing progress
|
||||
|
||||
### Monthly
|
||||
- [ ] Update baseline benchmarks
|
||||
- [ ] Review and refactor tests
|
||||
- [ ] Update testing strategy
|
||||
- [ ] Training/knowledge sharing
|
||||
|
||||
## Risk Mitigation
|
||||
|
||||
### Technical Risks
|
||||
- [ ] Mock complexity - Use simple, maintainable mocks
|
||||
- [ ] Test brittleness - Focus on behavior, not implementation
|
||||
- [ ] Performance impact - Run heavy tests in parallel
|
||||
- [ ] Flaky tests - Proper async handling and isolation
|
||||
|
||||
### Process Risks
|
||||
- [ ] Slow adoption - Provide training and examples
|
||||
- [ ] Coverage gaming - Review test quality, not just numbers
|
||||
- [ ] Maintenance burden - Automate what's possible
|
||||
- [ ] Integration complexity - Use test containers
|
||||
|
||||
## Success Criteria
|
||||
|
||||
### Current Reality Check
|
||||
- **Unit Tests**: ✅ SOLID (932 passing, 87.8% coverage)
|
||||
- **Integration Tests**: ⚠️ NEEDS WORK (58 failing, 76% pass rate)
|
||||
- **E2E Tests**: 🔄 NOT STARTED
|
||||
- **CI/CD**: ⚠️ BROKEN (hiding failures with || true)
|
||||
|
||||
### Revised Technical Metrics
|
||||
- Coverage: Currently 87.8% for unit tests ✅
|
||||
- Integration test pass rate: Target 100% (currently 76%)
|
||||
- Performance: Adjust thresholds based on reality
|
||||
- Reliability: Fix flaky tests during repair
|
||||
- Speed: CI pipeline < 5 minutes ✅ (~2 minutes)
|
||||
|
||||
### Team Metrics
|
||||
- All developers writing tests ✅
|
||||
- Tests reviewed in PRs ✅
|
||||
- No production bugs from tested code
|
||||
- Improved development velocity ✅
|
||||
|
||||
## Phases Completed
|
||||
|
||||
- **Phase 0**: Immediate Fixes ✅ COMPLETED
|
||||
- **Phase 1**: Vitest Migration ✅ COMPLETED
|
||||
- **Phase 2**: Test Infrastructure ✅ COMPLETED
|
||||
- **Phase 3**: Unit Tests (All 943 tests) ✅ COMPLETED
|
||||
- **Phase 3.5**: Critical Service Testing ✅ COMPLETED
|
||||
- **Phase 3.8**: CI/CD & Infrastructure ✅ COMPLETED
|
||||
- **Phase 4**: Integration Tests 🚧 IN PROGRESS
|
||||
- **Status**: 58 out of 246 tests failing (23.6% failure rate)
|
||||
- **CI Issue**: Tests appear green due to `|| true` error suppression
|
||||
- **Categories of Failures**:
|
||||
- Database: 9 tests (state isolation, FTS5 syntax)
|
||||
- MCP Protocol: 16 tests (response structure in error-handling.test.ts)
|
||||
- MSW: 6 tests (not initialized properly)
|
||||
- FTS5 Search: 7 tests (query syntax issues)
|
||||
- Session Management: 5 tests (async cleanup)
|
||||
- Performance: 15 tests (threshold mismatches)
|
||||
- **Next Steps**:
|
||||
1. Get team buy-in for "red" CI
|
||||
2. Remove `|| true` from workflow
|
||||
3. Fix tests systematically by category
|
||||
- **Phase 5**: E2E Tests 🔄 PENDING
|
||||
|
||||
## Resources & Tools
|
||||
|
||||
### Documentation
|
||||
- Vitest: https://vitest.dev/
|
||||
- Testing Library: https://testing-library.com/
|
||||
- MSW: https://mswjs.io/
|
||||
- Testcontainers: https://www.testcontainers.com/
|
||||
|
||||
### Monitoring
|
||||
- Codecov: https://codecov.io/
|
||||
- GitHub Actions: https://github.com/features/actions
|
||||
- Benchmark Action: https://github.com/benchmark-action/github-action-benchmark
|
||||
|
||||
### Team Resources
|
||||
- Testing best practices guide
|
||||
- Example test implementations
|
||||
- Mock usage patterns
|
||||
- Performance optimization tips
|
||||
@@ -1,472 +0,0 @@
|
||||
# n8n-MCP Testing Implementation Guide
|
||||
|
||||
## Phase 1: Foundation Setup (Week 1-2)
|
||||
|
||||
### 1.1 Install Vitest and Dependencies
|
||||
|
||||
```bash
|
||||
# Remove Jest
|
||||
npm uninstall jest ts-jest @types/jest
|
||||
|
||||
# Install Vitest and related packages
|
||||
npm install -D vitest @vitest/ui @vitest/coverage-v8
|
||||
npm install -D @testing-library/jest-dom
|
||||
npm install -D msw # For API mocking
|
||||
npm install -D @faker-js/faker # For test data
|
||||
npm install -D fishery # For factories
|
||||
```
|
||||
|
||||
### 1.2 Update package.json Scripts
|
||||
|
||||
```json
|
||||
{
|
||||
"scripts": {
|
||||
// Testing
|
||||
"test": "vitest",
|
||||
"test:ui": "vitest --ui",
|
||||
"test:unit": "vitest run tests/unit",
|
||||
"test:integration": "vitest run tests/integration",
|
||||
"test:e2e": "vitest run tests/e2e",
|
||||
"test:watch": "vitest watch",
|
||||
"test:coverage": "vitest run --coverage",
|
||||
"test:coverage:check": "vitest run --coverage --coverage.thresholdAutoUpdate=false",
|
||||
|
||||
// Benchmarks
|
||||
"bench": "vitest bench",
|
||||
"bench:compare": "vitest bench --compare",
|
||||
|
||||
// CI specific
|
||||
"test:ci": "vitest run --reporter=junit --reporter=default",
|
||||
"test:ci:coverage": "vitest run --coverage --reporter=junit --reporter=default"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 1.3 Migrate Existing Tests
|
||||
|
||||
```typescript
|
||||
// Before (Jest)
|
||||
import { describe, test, expect } from '@jest/globals';
|
||||
|
||||
// After (Vitest)
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
|
||||
// Update mock syntax
|
||||
// Jest: jest.mock('module')
|
||||
// Vitest: vi.mock('module')
|
||||
|
||||
// Update timer mocks
|
||||
// Jest: jest.useFakeTimers()
|
||||
// Vitest: vi.useFakeTimers()
|
||||
```
|
||||
|
||||
### 1.4 Create Test Database Setup
|
||||
|
||||
```typescript
|
||||
// tests/setup/test-database.ts
|
||||
import Database from 'better-sqlite3';
|
||||
import { readFileSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
|
||||
export class TestDatabase {
|
||||
private db: Database.Database;
|
||||
|
||||
constructor() {
|
||||
this.db = new Database(':memory:');
|
||||
this.initialize();
|
||||
}
|
||||
|
||||
private initialize() {
|
||||
const schema = readFileSync(
|
||||
join(__dirname, '../../src/database/schema.sql'),
|
||||
'utf8'
|
||||
);
|
||||
this.db.exec(schema);
|
||||
}
|
||||
|
||||
seedNodes(nodes: any[]) {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT INTO nodes (type, displayName, name, group, version, description, properties)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
`);
|
||||
|
||||
const insertMany = this.db.transaction((nodes) => {
|
||||
for (const node of nodes) {
|
||||
stmt.run(
|
||||
node.type,
|
||||
node.displayName,
|
||||
node.name,
|
||||
node.group,
|
||||
node.version,
|
||||
node.description,
|
||||
JSON.stringify(node.properties)
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
insertMany(nodes);
|
||||
}
|
||||
|
||||
close() {
|
||||
this.db.close();
|
||||
}
|
||||
|
||||
getDb() {
|
||||
return this.db;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Phase 2: Core Unit Tests (Week 3-4)
|
||||
|
||||
### 2.1 Test Organization Template
|
||||
|
||||
```typescript
|
||||
// tests/unit/services/[service-name].test.ts
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
|
||||
import { ServiceName } from '@/services/service-name';
|
||||
|
||||
describe('ServiceName', () => {
|
||||
let service: ServiceName;
|
||||
let mockDependency: any;
|
||||
|
||||
beforeEach(() => {
|
||||
// Setup mocks
|
||||
mockDependency = {
|
||||
method: vi.fn()
|
||||
};
|
||||
|
||||
// Create service instance
|
||||
service = new ServiceName(mockDependency);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('methodName', () => {
|
||||
it('should handle happy path', async () => {
|
||||
// Arrange
|
||||
const input = { /* test data */ };
|
||||
mockDependency.method.mockResolvedValue({ /* mock response */ });
|
||||
|
||||
// Act
|
||||
const result = await service.methodName(input);
|
||||
|
||||
// Assert
|
||||
expect(result).toEqual(/* expected output */);
|
||||
expect(mockDependency.method).toHaveBeenCalledWith(/* expected args */);
|
||||
});
|
||||
|
||||
it('should handle errors gracefully', async () => {
|
||||
// Arrange
|
||||
mockDependency.method.mockRejectedValue(new Error('Test error'));
|
||||
|
||||
// Act & Assert
|
||||
await expect(service.methodName({})).rejects.toThrow('Expected error message');
|
||||
});
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### 2.2 Mock Strategies by Layer
|
||||
|
||||
#### Database Layer
|
||||
```typescript
|
||||
// tests/unit/database/node-repository.test.ts
|
||||
import { vi } from 'vitest';
|
||||
|
||||
vi.mock('better-sqlite3', () => ({
|
||||
default: vi.fn(() => ({
|
||||
prepare: vi.fn(() => ({
|
||||
all: vi.fn(() => mockData),
|
||||
get: vi.fn((id) => mockData.find(d => d.id === id)),
|
||||
run: vi.fn(() => ({ changes: 1 }))
|
||||
})),
|
||||
exec: vi.fn(),
|
||||
close: vi.fn()
|
||||
}))
|
||||
}));
|
||||
```
|
||||
|
||||
#### External APIs
|
||||
```typescript
|
||||
// tests/unit/services/__mocks__/axios.ts
|
||||
export default {
|
||||
create: vi.fn(() => ({
|
||||
get: vi.fn(() => Promise.resolve({ data: {} })),
|
||||
post: vi.fn(() => Promise.resolve({ data: { id: '123' } })),
|
||||
put: vi.fn(() => Promise.resolve({ data: {} })),
|
||||
delete: vi.fn(() => Promise.resolve({ data: {} }))
|
||||
}))
|
||||
};
|
||||
```
|
||||
|
||||
#### File System
|
||||
```typescript
|
||||
// Use memfs for file system mocking
|
||||
import { vol } from 'memfs';
|
||||
|
||||
vi.mock('fs', () => vol);
|
||||
|
||||
beforeEach(() => {
|
||||
vol.reset();
|
||||
vol.fromJSON({
|
||||
'/test/file.json': JSON.stringify({ test: 'data' })
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### 2.3 Critical Path Tests
|
||||
|
||||
```typescript
|
||||
// Priority 1: Node Loading and Parsing
|
||||
// tests/unit/loaders/node-loader.test.ts
|
||||
|
||||
// Priority 2: Configuration Validation
|
||||
// tests/unit/services/config-validator.test.ts
|
||||
|
||||
// Priority 3: MCP Tools
|
||||
// tests/unit/mcp/tools.test.ts
|
||||
|
||||
// Priority 4: Database Operations
|
||||
// tests/unit/database/node-repository.test.ts
|
||||
|
||||
// Priority 5: Workflow Validation
|
||||
// tests/unit/services/workflow-validator.test.ts
|
||||
```
|
||||
|
||||
## Phase 3: Integration Tests (Week 5-6)
|
||||
|
||||
### 3.1 Test Container Setup
|
||||
|
||||
```typescript
|
||||
// tests/setup/test-containers.ts
|
||||
import { GenericContainer, StartedTestContainer } from 'testcontainers';
|
||||
|
||||
export class N8nTestContainer {
|
||||
private container: StartedTestContainer;
|
||||
|
||||
async start() {
|
||||
this.container = await new GenericContainer('n8nio/n8n:latest')
|
||||
.withExposedPorts(5678)
|
||||
.withEnv('N8N_BASIC_AUTH_ACTIVE', 'false')
|
||||
.withEnv('N8N_ENCRYPTION_KEY', 'test-key')
|
||||
.start();
|
||||
|
||||
return {
|
||||
url: `http://localhost:${this.container.getMappedPort(5678)}`,
|
||||
stop: () => this.container.stop()
|
||||
};
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3.2 Integration Test Pattern
|
||||
|
||||
```typescript
|
||||
// tests/integration/n8n-api/workflow-crud.test.ts
|
||||
import { N8nTestContainer } from '@tests/setup/test-containers';
|
||||
import { N8nAPIClient } from '@/services/n8n-api-client';
|
||||
|
||||
describe('n8n API Integration', () => {
|
||||
let container: any;
|
||||
let apiClient: N8nAPIClient;
|
||||
|
||||
beforeAll(async () => {
|
||||
container = await new N8nTestContainer().start();
|
||||
apiClient = new N8nAPIClient(container.url);
|
||||
}, 30000);
|
||||
|
||||
afterAll(async () => {
|
||||
await container.stop();
|
||||
});
|
||||
|
||||
it('should create and retrieve workflow', async () => {
|
||||
// Create workflow
|
||||
const workflow = createTestWorkflow();
|
||||
const created = await apiClient.createWorkflow(workflow);
|
||||
|
||||
expect(created.id).toBeDefined();
|
||||
|
||||
// Retrieve workflow
|
||||
const retrieved = await apiClient.getWorkflow(created.id);
|
||||
expect(retrieved.name).toBe(workflow.name);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Phase 4: E2E & Performance (Week 7-8)
|
||||
|
||||
### 4.1 E2E Test Setup
|
||||
|
||||
```typescript
|
||||
// tests/e2e/workflows/complete-workflow.test.ts
|
||||
import { MCPClient } from '@tests/utils/mcp-client';
|
||||
import { N8nTestContainer } from '@tests/setup/test-containers';
|
||||
|
||||
describe('Complete Workflow E2E', () => {
|
||||
let mcpServer: any;
|
||||
let n8nContainer: any;
|
||||
let mcpClient: MCPClient;
|
||||
|
||||
beforeAll(async () => {
|
||||
// Start n8n
|
||||
n8nContainer = await new N8nTestContainer().start();
|
||||
|
||||
// Start MCP server
|
||||
mcpServer = await startMCPServer({
|
||||
n8nUrl: n8nContainer.url
|
||||
});
|
||||
|
||||
// Create MCP client
|
||||
mcpClient = new MCPClient(mcpServer.url);
|
||||
}, 60000);
|
||||
|
||||
it('should execute complete workflow creation flow', async () => {
|
||||
// 1. Search for nodes
|
||||
const searchResult = await mcpClient.call('search_nodes', {
|
||||
query: 'webhook http slack'
|
||||
});
|
||||
|
||||
// 2. Get node details
|
||||
const webhookInfo = await mcpClient.call('get_node_info', {
|
||||
nodeType: 'nodes-base.webhook'
|
||||
});
|
||||
|
||||
// 3. Create workflow
|
||||
const workflow = new WorkflowBuilder('E2E Test')
|
||||
.addWebhookNode()
|
||||
.addHttpRequestNode()
|
||||
.addSlackNode()
|
||||
.connectSequentially()
|
||||
.build();
|
||||
|
||||
// 4. Validate workflow
|
||||
const validation = await mcpClient.call('validate_workflow', {
|
||||
workflow
|
||||
});
|
||||
|
||||
expect(validation.isValid).toBe(true);
|
||||
|
||||
// 5. Deploy to n8n
|
||||
const deployed = await mcpClient.call('n8n_create_workflow', {
|
||||
...workflow
|
||||
});
|
||||
|
||||
expect(deployed.id).toBeDefined();
|
||||
expect(deployed.active).toBe(false);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### 4.2 Performance Benchmarks
|
||||
|
||||
```typescript
|
||||
// vitest.benchmark.config.ts
|
||||
export default {
|
||||
test: {
|
||||
benchmark: {
|
||||
// Output benchmark results
|
||||
outputFile: './benchmark-results.json',
|
||||
|
||||
// Compare with baseline
|
||||
compare: './benchmark-baseline.json',
|
||||
|
||||
// Fail if performance degrades by more than 10%
|
||||
threshold: {
|
||||
p95: 1.1, // 110% of baseline
|
||||
p99: 1.2 // 120% of baseline
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
## Testing Best Practices
|
||||
|
||||
### 1. Test Naming Convention
|
||||
```typescript
|
||||
// Format: should [expected behavior] when [condition]
|
||||
it('should return user data when valid ID is provided')
|
||||
it('should throw ValidationError when email is invalid')
|
||||
it('should retry 3 times when network fails')
|
||||
```
|
||||
|
||||
### 2. Test Data Builders
|
||||
```typescript
|
||||
// Use builders for complex test data
|
||||
const user = new UserBuilder()
|
||||
.withEmail('test@example.com')
|
||||
.withRole('admin')
|
||||
.build();
|
||||
```
|
||||
|
||||
### 3. Custom Matchers
|
||||
```typescript
|
||||
// tests/utils/matchers.ts
|
||||
export const toBeValidNode = (received: any) => {
|
||||
const pass =
|
||||
received.type &&
|
||||
received.displayName &&
|
||||
received.properties &&
|
||||
Array.isArray(received.properties);
|
||||
|
||||
return {
|
||||
pass,
|
||||
message: () => `expected ${received} to be a valid node`
|
||||
};
|
||||
};
|
||||
|
||||
// Usage
|
||||
expect(node).toBeValidNode();
|
||||
```
|
||||
|
||||
### 4. Snapshot Testing
|
||||
```typescript
|
||||
// For complex structures
|
||||
it('should generate correct node schema', () => {
|
||||
const schema = generateNodeSchema(node);
|
||||
expect(schema).toMatchSnapshot();
|
||||
});
|
||||
```
|
||||
|
||||
### 5. Test Isolation
|
||||
```typescript
|
||||
// Always clean up after tests
|
||||
afterEach(async () => {
|
||||
await cleanup();
|
||||
vi.clearAllMocks();
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
```
|
||||
|
||||
## Coverage Goals by Module
|
||||
|
||||
| Module | Target | Priority | Notes |
|
||||
|--------|--------|----------|-------|
|
||||
| services/config-validator | 95% | High | Critical for reliability |
|
||||
| services/workflow-validator | 90% | High | Core functionality |
|
||||
| mcp/tools | 90% | High | User-facing API |
|
||||
| database/node-repository | 85% | Medium | Well-tested DB layer |
|
||||
| loaders/node-loader | 85% | Medium | External dependencies |
|
||||
| parsers/* | 90% | High | Data transformation |
|
||||
| utils/* | 80% | Low | Helper functions |
|
||||
| scripts/* | 50% | Low | One-time scripts |
|
||||
|
||||
## Continuous Improvement
|
||||
|
||||
1. **Weekly Reviews**: Review test coverage and identify gaps
|
||||
2. **Performance Baselines**: Update benchmarks monthly
|
||||
3. **Flaky Test Detection**: Monitor and fix within 48 hours
|
||||
4. **Test Documentation**: Keep examples updated
|
||||
5. **Developer Training**: Pair programming on tests
|
||||
|
||||
## Success Metrics
|
||||
|
||||
- [ ] All tests pass in CI (0 failures)
|
||||
- [ ] Coverage > 80% overall
|
||||
- [ ] No flaky tests
|
||||
- [ ] CI runs < 5 minutes
|
||||
- [ ] Performance benchmarks stable
|
||||
- [ ] Zero production bugs from tested code
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,66 +0,0 @@
|
||||
# Token Efficiency Improvements Summary
|
||||
|
||||
## Overview
|
||||
Made all MCP tool descriptions concise and token-efficient while preserving essential information.
|
||||
|
||||
## Key Improvements
|
||||
|
||||
### Before vs After Examples
|
||||
|
||||
1. **search_nodes**
|
||||
- Before: ~350 chars with verbose explanation
|
||||
- After: 165 chars
|
||||
- `Search nodes by keywords. Modes: OR (any word), AND (all words), FUZZY (typos OK). Primary nodes ranked first. Examples: "webhook"→Webhook, "http call"→HTTP Request.`
|
||||
|
||||
2. **get_node_info**
|
||||
- Before: ~450 chars with warnings about size
|
||||
- After: 174 chars
|
||||
- `Get FULL node schema (100KB+). TIP: Use get_node_essentials first! Returns all properties/operations/credentials. Prefix required: "nodes-base.httpRequest" not "httpRequest".`
|
||||
|
||||
3. **validate_node_minimal**
|
||||
- Before: ~350 chars explaining what it doesn't do
|
||||
- After: 102 chars
|
||||
- `Fast check for missing required fields only. No warnings/suggestions. Returns: list of missing fields.`
|
||||
|
||||
4. **get_property_dependencies**
|
||||
- Before: ~400 chars with full example
|
||||
- After: 131 chars
|
||||
- `Shows property dependencies and visibility rules. Example: sendBody=true reveals body fields. Test visibility with optional config.`
|
||||
|
||||
## Statistics
|
||||
|
||||
### Documentation Tools (22 tools)
|
||||
- Average description length: **129 characters**
|
||||
- Total characters: 2,836
|
||||
- Tools over 200 chars: 1 (list_nodes at 204)
|
||||
|
||||
### Management Tools (17 tools)
|
||||
- Average description length: **93 characters**
|
||||
- Total characters: 1,578
|
||||
- Tools over 200 chars: 1 (n8n_update_partial_workflow at 284)
|
||||
|
||||
## Strategy Used
|
||||
|
||||
1. **Remove redundancy**: Eliminated repeated information available in parameter descriptions
|
||||
2. **Use abbreviations**: "vs" instead of "versus", "&" instead of "and" where appropriate
|
||||
3. **Compact examples**: `"webhook"→Webhook` instead of verbose explanations
|
||||
4. **Direct language**: "Fast check" instead of "Quick validation that only checks"
|
||||
5. **Move details to documentation**: Complex tools reference `tools_documentation()` for full details
|
||||
6. **Essential info only**: Focus on what the tool does, not how it works internally
|
||||
|
||||
## Special Cases
|
||||
|
||||
### n8n_update_partial_workflow
|
||||
This tool's description is necessarily longer (284 chars) because:
|
||||
- Lists all 13 operation types
|
||||
- Critical for users to know available operations
|
||||
- Directs to full documentation for details
|
||||
|
||||
### Complex Documentation Preserved
|
||||
For tools like `n8n_update_partial_workflow`, detailed documentation was moved to `tools-documentation.ts` rather than deleted, ensuring users can still access comprehensive information when needed.
|
||||
|
||||
## Impact
|
||||
- **Token savings**: ~65-70% reduction in description tokens
|
||||
- **Faster AI responses**: Less context used for tool descriptions
|
||||
- **Better UX**: Clearer, more scannable tool list
|
||||
- **Maintained functionality**: All essential information preserved
|
||||
@@ -1,118 +0,0 @@
|
||||
# Transactional Updates Example
|
||||
|
||||
This example demonstrates the new transactional update capabilities in v2.7.0.
|
||||
|
||||
## Before (v2.6.x and earlier)
|
||||
|
||||
Previously, you had to carefully order operations to ensure nodes existed before connecting them:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "workflow-123",
|
||||
"operations": [
|
||||
// 1. First add all nodes
|
||||
{ "type": "addNode", "node": { "name": "Process", "type": "n8n-nodes-base.set", ... }},
|
||||
{ "type": "addNode", "node": { "name": "Notify", "type": "n8n-nodes-base.slack", ... }},
|
||||
|
||||
// 2. Then add connections (would fail if done before nodes)
|
||||
{ "type": "addConnection", "source": "Webhook", "target": "Process" },
|
||||
{ "type": "addConnection", "source": "Process", "target": "Notify" }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## After (v2.7.0+)
|
||||
|
||||
Now you can write operations in any order - the engine automatically handles dependencies:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "workflow-123",
|
||||
"operations": [
|
||||
// Connections can come first!
|
||||
{ "type": "addConnection", "source": "Webhook", "target": "Process" },
|
||||
{ "type": "addConnection", "source": "Process", "target": "Notify" },
|
||||
|
||||
// Nodes added later - still works!
|
||||
{ "type": "addNode", "node": { "name": "Process", "type": "n8n-nodes-base.set", "position": [400, 300] }},
|
||||
{ "type": "addNode", "node": { "name": "Notify", "type": "n8n-nodes-base.slack", "position": [600, 300] }}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
1. **Two-Pass Processing**:
|
||||
- Pass 1: All node operations (add, remove, update, move, enable, disable)
|
||||
- Pass 2: All other operations (connections, settings, metadata)
|
||||
|
||||
2. **Operation Limit**: Maximum 5 operations per request keeps complexity manageable
|
||||
|
||||
3. **Atomic Updates**: All operations succeed or all fail - no partial updates
|
||||
|
||||
## Benefits for AI Agents
|
||||
|
||||
- **Intuitive**: Write operations in the order that makes sense logically
|
||||
- **Reliable**: No need to track dependencies manually
|
||||
- **Simple**: Focus on what to change, not how to order changes
|
||||
- **Safe**: Built-in limits prevent overly complex operations
|
||||
|
||||
## Complete Example
|
||||
|
||||
Here's a real-world example of adding error handling to a workflow:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "workflow-123",
|
||||
"operations": [
|
||||
// Define the flow first (makes logical sense)
|
||||
{
|
||||
"type": "removeConnection",
|
||||
"source": "HTTP Request",
|
||||
"target": "Save to DB"
|
||||
},
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "HTTP Request",
|
||||
"target": "Error Handler"
|
||||
},
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "Error Handler",
|
||||
"target": "Send Alert"
|
||||
},
|
||||
|
||||
// Then add the nodes
|
||||
{
|
||||
"type": "addNode",
|
||||
"node": {
|
||||
"name": "Error Handler",
|
||||
"type": "n8n-nodes-base.if",
|
||||
"position": [500, 400],
|
||||
"parameters": {
|
||||
"conditions": {
|
||||
"boolean": [{
|
||||
"value1": "={{$json.error}}",
|
||||
"value2": true
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "addNode",
|
||||
"node": {
|
||||
"name": "Send Alert",
|
||||
"type": "n8n-nodes-base.emailSend",
|
||||
"position": [700, 400],
|
||||
"parameters": {
|
||||
"to": "alerts@company.com",
|
||||
"subject": "Workflow Error Alert"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
All operations will be processed correctly, even though connections reference nodes that don't exist yet!
|
||||
@@ -1,92 +0,0 @@
|
||||
# Validation Improvements v2.4.2
|
||||
|
||||
Based on AI agent feedback, we've implemented several improvements to the `validate_node_operation` tool:
|
||||
|
||||
## 🎯 Issues Addressed
|
||||
|
||||
### 1. **@version Warnings** ✅ FIXED
|
||||
- **Issue**: Showed confusing warnings about `@version` property not being used
|
||||
- **Fix**: Filter out internal properties starting with `@` or `_`
|
||||
- **Result**: No more false warnings about internal n8n properties
|
||||
|
||||
### 2. **Duplicate Errors** ✅ FIXED
|
||||
- **Issue**: Same error shown multiple times (e.g., missing `ts` field)
|
||||
- **Fix**: Implemented deduplication that keeps the most specific error message
|
||||
- **Result**: Each error shown only once with the best description
|
||||
|
||||
### 3. **Basic Code Validation** ✅ ADDED
|
||||
- **Issue**: No syntax validation for Code node
|
||||
- **Fix**: Added basic syntax checks for JavaScript and Python
|
||||
- **Features**:
|
||||
- Unbalanced braces/parentheses detection
|
||||
- Python indentation consistency check
|
||||
- n8n-specific patterns (return statement, input access)
|
||||
- Security warnings (eval/exec usage)
|
||||
|
||||
## 📊 Before & After
|
||||
|
||||
### Before (v2.4.1):
|
||||
```json
|
||||
{
|
||||
"errors": [
|
||||
{ "property": "ts", "message": "Required property 'Message Timestamp' is missing" },
|
||||
{ "property": "ts", "message": "Message timestamp (ts) is required to update a message" }
|
||||
],
|
||||
"warnings": [
|
||||
{ "property": "@version", "message": "Property '@version' is configured but won't be used" }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### After (v2.4.2):
|
||||
```json
|
||||
{
|
||||
"errors": [
|
||||
{ "property": "ts", "message": "Message timestamp (ts) is required to update a message",
|
||||
"fix": "Provide the timestamp of the message to update" }
|
||||
],
|
||||
"warnings": [] // No @version warning
|
||||
}
|
||||
```
|
||||
|
||||
## 🆕 Code Validation Examples
|
||||
|
||||
### JavaScript Syntax Check:
|
||||
```javascript
|
||||
// Missing closing brace
|
||||
if (true) {
|
||||
return items;
|
||||
// Error: "Unbalanced braces detected"
|
||||
```
|
||||
|
||||
### Python Indentation Check:
|
||||
```python
|
||||
def process():
|
||||
if True: # Tab
|
||||
return items # Spaces
|
||||
# Error: "Mixed tabs and spaces in indentation"
|
||||
```
|
||||
|
||||
### n8n Pattern Check:
|
||||
```javascript
|
||||
const result = items.map(item => item.json);
|
||||
// Warning: "No return statement found"
|
||||
// Suggestion: "Add: return items;"
|
||||
```
|
||||
|
||||
## 🚀 Impact
|
||||
|
||||
- **Cleaner validation results** - No more noise from internal properties
|
||||
- **Clearer error messages** - Each issue reported once with best description
|
||||
- **Better code quality** - Basic syntax validation catches common mistakes
|
||||
- **n8n best practices** - Warns about missing return statements and input handling
|
||||
|
||||
## 📝 Summary
|
||||
|
||||
The `validate_node_operation` tool is now even more helpful for AI agents and developers:
|
||||
- 95% reduction in false positives (operation-aware)
|
||||
- No duplicate or confusing warnings
|
||||
- Basic code validation for common syntax errors
|
||||
- n8n-specific pattern checking
|
||||
|
||||
**Rating improved from 9/10 to 9.5/10!** 🎉
|
||||
@@ -116,17 +116,46 @@ The `n8n_update_partial_workflow` tool allows you to make targeted changes to wo
|
||||
}
|
||||
```
|
||||
|
||||
#### Update Connection (Change routing)
|
||||
#### Rewire Connection
|
||||
```json
|
||||
{
|
||||
"type": "updateConnection",
|
||||
"type": "rewireConnection",
|
||||
"source": "Webhook",
|
||||
"from": "Old Handler",
|
||||
"to": "New Handler",
|
||||
"description": "Rewire connection to new handler"
|
||||
}
|
||||
```
|
||||
|
||||
#### Smart Parameters for IF Nodes
|
||||
```json
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "IF",
|
||||
"target": "Send Email",
|
||||
"changes": {
|
||||
"sourceOutput": "false", // Change from 'true' to 'false' output
|
||||
"targetInput": "main"
|
||||
},
|
||||
"description": "Route failed conditions to email"
|
||||
"target": "Success Handler",
|
||||
"branch": "true", // Semantic parameter instead of sourceIndex
|
||||
"description": "Route true branch to success handler"
|
||||
}
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "IF",
|
||||
"target": "Error Handler",
|
||||
"branch": "false", // Routes to false branch (sourceIndex=1)
|
||||
"description": "Route false branch to error handler"
|
||||
}
|
||||
```
|
||||
|
||||
#### Smart Parameters for Switch Nodes
|
||||
```json
|
||||
{
|
||||
"type": "addConnection",
|
||||
"source": "Switch",
|
||||
"target": "Handler A",
|
||||
"case": 0, // First output
|
||||
"description": "Route case 0 to Handler A"
|
||||
}
|
||||
```
|
||||
|
||||
@@ -577,13 +606,13 @@ The tool validates all operations before applying any changes. Common errors inc
|
||||
|
||||
Always check the response for validation errors and adjust your operations accordingly.
|
||||
|
||||
## Transactional Updates (v2.7.0+)
|
||||
## Transactional Updates
|
||||
|
||||
The diff engine now supports transactional updates using a **two-pass processing** approach:
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Operation Limit**: Maximum 5 operations per request to ensure reliability
|
||||
1. **No Operation Limit**: Process unlimited operations in a single request
|
||||
2. **Two-Pass Processing**:
|
||||
- **Pass 1**: All node operations (add, remove, update, move, enable, disable)
|
||||
- **Pass 2**: All other operations (connections, settings, metadata)
|
||||
@@ -633,9 +662,9 @@ This allows you to add nodes and connect them in the same request:
|
||||
### Benefits
|
||||
|
||||
- **Order Independence**: You don't need to worry about operation order
|
||||
- **Atomic Updates**: All operations succeed or all fail
|
||||
- **Atomic Updates**: All operations succeed or all fail (unless continueOnError is enabled)
|
||||
- **Intuitive Usage**: Add complex workflow structures in one call
|
||||
- **Clear Limits**: 5 operations max keeps things simple and reliable
|
||||
- **No Hard Limits**: Process unlimited operations efficiently
|
||||
|
||||
### Example: Complete Workflow Addition
|
||||
|
||||
@@ -694,4 +723,4 @@ This allows you to add nodes and connect them in the same request:
|
||||
}
|
||||
```
|
||||
|
||||
All 5 operations will be processed correctly regardless of order!
|
||||
All operations will be processed correctly regardless of order!
|
||||
0
n8n-nodes.db
Normal file
0
n8n-nodes.db
Normal file
6055
package-lock.json
generated
6055
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
22
package.json
22
package.json
@@ -1,8 +1,16 @@
|
||||
{
|
||||
"name": "n8n-mcp",
|
||||
"version": "2.15.5",
|
||||
"version": "2.22.7",
|
||||
"description": "Integration between n8n workflow automation and Model Context Protocol (MCP)",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./dist/index.d.ts",
|
||||
"require": "./dist/index.js",
|
||||
"import": "./dist/index.js"
|
||||
}
|
||||
},
|
||||
"bin": {
|
||||
"n8n-mcp": "./dist/mcp/index.js"
|
||||
},
|
||||
@@ -131,17 +139,19 @@
|
||||
"vitest": "^3.2.4"
|
||||
},
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.13.2",
|
||||
"@n8n/n8n-nodes-langchain": "^1.112.2",
|
||||
"@modelcontextprotocol/sdk": "^1.20.1",
|
||||
"@n8n/n8n-nodes-langchain": "^1.115.1",
|
||||
"@supabase/supabase-js": "^2.57.4",
|
||||
"dotenv": "^16.5.0",
|
||||
"express": "^5.1.0",
|
||||
"express-rate-limit": "^7.1.5",
|
||||
"lru-cache": "^11.2.1",
|
||||
"n8n": "^1.113.3",
|
||||
"n8n-core": "^1.112.1",
|
||||
"n8n-workflow": "^1.110.0",
|
||||
"n8n": "^1.116.2",
|
||||
"n8n-core": "^1.115.1",
|
||||
"n8n-workflow": "^1.113.0",
|
||||
"openai": "^4.77.0",
|
||||
"sql.js": "^1.13.0",
|
||||
"tslib": "^2.6.2",
|
||||
"uuid": "^10.0.0",
|
||||
"zod": "^3.24.1"
|
||||
},
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
{
|
||||
"name": "n8n-mcp-runtime",
|
||||
"version": "2.15.1",
|
||||
"version": "2.22.7",
|
||||
"description": "n8n MCP Server Runtime Dependencies Only",
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.13.2",
|
||||
"@supabase/supabase-js": "^2.57.4",
|
||||
"express": "^5.1.0",
|
||||
"express-rate-limit": "^7.1.5",
|
||||
"dotenv": "^16.5.0",
|
||||
"lru-cache": "^11.2.1",
|
||||
"sql.js": "^1.13.0",
|
||||
"tslib": "^2.6.2",
|
||||
"uuid": "^10.0.0",
|
||||
"axios": "^1.7.7"
|
||||
},
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
# n8n-MCP v2.7.0 Release Notes
|
||||
|
||||
## 🎉 What's New
|
||||
|
||||
### 🔧 File Refactoring & Version Management
|
||||
- **Renamed core MCP files** to remove unnecessary suffixes for cleaner codebase:
|
||||
- `tools-update.ts` → `tools.ts`
|
||||
- `server-update.ts` → `server.ts`
|
||||
- `http-server-fixed.ts` → `http-server.ts`
|
||||
- **Fixed version management** - Now reads from package.json as single source of truth (fixes #5)
|
||||
- **Updated imports** across 21+ files to use the new file names
|
||||
|
||||
### 🔍 New Diagnostic Tool
|
||||
- **Added `n8n_diagnostic` tool** - Helps troubleshoot why n8n management tools might not be appearing
|
||||
- Shows environment variable status, API connectivity, and tool availability
|
||||
- Provides step-by-step troubleshooting guidance
|
||||
- Includes verbose mode for additional debug information
|
||||
|
||||
### 🧹 Code Cleanup
|
||||
- Removed legacy HTTP server implementation with known issues
|
||||
- Removed unused legacy API client
|
||||
- Added version utility for consistent version handling
|
||||
- Added script to sync runtime package version
|
||||
|
||||
## 📦 Installation
|
||||
|
||||
### Docker (Recommended)
|
||||
```bash
|
||||
docker pull ghcr.io/czlonkowski/n8n-mcp:2.7.0
|
||||
```
|
||||
|
||||
### Claude Desktop
|
||||
Update your configuration to use the latest version:
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"n8n-mcp": {
|
||||
"command": "docker",
|
||||
"args": ["run", "-i", "--rm", "ghcr.io/czlonkowski/n8n-mcp:2.7.0"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 🐛 Bug Fixes
|
||||
- Fixed version mismatch where version was hardcoded as 2.4.1 instead of reading from package.json
|
||||
- Improved error messages for better debugging
|
||||
|
||||
## 📚 Documentation Updates
|
||||
- Condensed version history in CLAUDE.md
|
||||
- Updated documentation structure in README.md
|
||||
- Removed outdated documentation files
|
||||
- Added n8n_diagnostic tool to documentation
|
||||
|
||||
## 🙏 Acknowledgments
|
||||
Thanks to all contributors and users who reported issues!
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: https://github.com/czlonkowski/n8n-mcp/blob/main/CHANGELOG.md
|
||||
78
scripts/audit-schema-coverage.ts
Normal file
78
scripts/audit-schema-coverage.ts
Normal file
@@ -0,0 +1,78 @@
|
||||
/**
|
||||
* Database Schema Coverage Audit Script
|
||||
*
|
||||
* Audits the database to determine how many nodes have complete schema information
|
||||
* for resourceLocator mode validation. This helps assess the coverage of our
|
||||
* schema-driven validation approach.
|
||||
*/
|
||||
|
||||
import Database from 'better-sqlite3';
|
||||
import path from 'path';
|
||||
|
||||
const dbPath = path.join(__dirname, '../data/nodes.db');
|
||||
const db = new Database(dbPath, { readonly: true });
|
||||
|
||||
console.log('=== Schema Coverage Audit ===\n');
|
||||
|
||||
// Query 1: How many nodes have resourceLocator properties?
|
||||
const totalResourceLocator = db.prepare(`
|
||||
SELECT COUNT(*) as count FROM nodes
|
||||
WHERE properties_schema LIKE '%resourceLocator%'
|
||||
`).get() as { count: number };
|
||||
|
||||
console.log(`Nodes with resourceLocator properties: ${totalResourceLocator.count}`);
|
||||
|
||||
// Query 2: Of those, how many have modes defined?
|
||||
const withModes = db.prepare(`
|
||||
SELECT COUNT(*) as count FROM nodes
|
||||
WHERE properties_schema LIKE '%resourceLocator%'
|
||||
AND properties_schema LIKE '%modes%'
|
||||
`).get() as { count: number };
|
||||
|
||||
console.log(`Nodes with modes defined: ${withModes.count}`);
|
||||
|
||||
// Query 3: Which nodes have resourceLocator but NO modes?
|
||||
const withoutModes = db.prepare(`
|
||||
SELECT node_type, display_name
|
||||
FROM nodes
|
||||
WHERE properties_schema LIKE '%resourceLocator%'
|
||||
AND properties_schema NOT LIKE '%modes%'
|
||||
LIMIT 10
|
||||
`).all() as Array<{ node_type: string; display_name: string }>;
|
||||
|
||||
console.log(`\nSample nodes WITHOUT modes (showing 10):`);
|
||||
withoutModes.forEach(node => {
|
||||
console.log(` - ${node.display_name} (${node.node_type})`);
|
||||
});
|
||||
|
||||
// Calculate coverage percentage
|
||||
const coverage = totalResourceLocator.count > 0
|
||||
? (withModes.count / totalResourceLocator.count) * 100
|
||||
: 0;
|
||||
|
||||
console.log(`\nSchema coverage: ${coverage.toFixed(1)}% of resourceLocator nodes have modes defined`);
|
||||
|
||||
// Query 4: Get some examples of nodes WITH modes for verification
|
||||
console.log('\nSample nodes WITH modes (showing 5):');
|
||||
const withModesExamples = db.prepare(`
|
||||
SELECT node_type, display_name
|
||||
FROM nodes
|
||||
WHERE properties_schema LIKE '%resourceLocator%'
|
||||
AND properties_schema LIKE '%modes%'
|
||||
LIMIT 5
|
||||
`).all() as Array<{ node_type: string; display_name: string }>;
|
||||
|
||||
withModesExamples.forEach(node => {
|
||||
console.log(` - ${node.display_name} (${node.node_type})`);
|
||||
});
|
||||
|
||||
// Summary
|
||||
console.log('\n=== Summary ===');
|
||||
console.log(`Total nodes in database: ${db.prepare('SELECT COUNT(*) as count FROM nodes').get() as any as { count: number }.count}`);
|
||||
console.log(`Nodes with resourceLocator: ${totalResourceLocator.count}`);
|
||||
console.log(`Nodes with complete mode schemas: ${withModes.count}`);
|
||||
console.log(`Nodes without mode schemas: ${totalResourceLocator.count - withModes.count}`);
|
||||
console.log(`\nImplication: Schema-driven validation will apply to ${withModes.count} nodes.`);
|
||||
console.log(`For the remaining ${totalResourceLocator.count - withModes.count} nodes, validation will be skipped (graceful degradation).`);
|
||||
|
||||
db.close();
|
||||
45
scripts/generate-initial-release-notes.js
Normal file
45
scripts/generate-initial-release-notes.js
Normal file
@@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Generate release notes for the initial release
|
||||
* Used by GitHub Actions when no previous tag exists
|
||||
*/
|
||||
|
||||
const { execSync } = require('child_process');
|
||||
|
||||
function generateInitialReleaseNotes(version) {
|
||||
try {
|
||||
// Get total commit count
|
||||
const commitCount = execSync('git rev-list --count HEAD', { encoding: 'utf8' }).trim();
|
||||
|
||||
// Generate release notes
|
||||
const releaseNotes = [
|
||||
'### 🎉 Initial Release',
|
||||
'',
|
||||
`This is the initial release of n8n-mcp v${version}.`,
|
||||
'',
|
||||
'---',
|
||||
'',
|
||||
'**Release Statistics:**',
|
||||
`- Commit count: ${commitCount}`,
|
||||
'- First release setup'
|
||||
];
|
||||
|
||||
return releaseNotes.join('\n');
|
||||
|
||||
} catch (error) {
|
||||
console.error(`Error generating initial release notes: ${error.message}`);
|
||||
return `Failed to generate initial release notes: ${error.message}`;
|
||||
}
|
||||
}
|
||||
|
||||
// Parse command line arguments
|
||||
const version = process.argv[2];
|
||||
|
||||
if (!version) {
|
||||
console.error('Usage: generate-initial-release-notes.js <version>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const releaseNotes = generateInitialReleaseNotes(version);
|
||||
console.log(releaseNotes);
|
||||
121
scripts/generate-release-notes.js
Normal file
121
scripts/generate-release-notes.js
Normal file
@@ -0,0 +1,121 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Generate release notes from commit messages between two tags
|
||||
* Used by GitHub Actions to create automated release notes
|
||||
*/
|
||||
|
||||
const { execSync } = require('child_process');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
function generateReleaseNotes(previousTag, currentTag) {
|
||||
try {
|
||||
console.log(`Generating release notes from ${previousTag} to ${currentTag}`);
|
||||
|
||||
// Get commits between tags
|
||||
const gitLogCommand = `git log --pretty=format:"%H|%s|%an|%ae|%ad" --date=short --no-merges ${previousTag}..${currentTag}`;
|
||||
const commitsOutput = execSync(gitLogCommand, { encoding: 'utf8' });
|
||||
|
||||
if (!commitsOutput.trim()) {
|
||||
console.log('No commits found between tags');
|
||||
return 'No changes in this release.';
|
||||
}
|
||||
|
||||
const commits = commitsOutput.trim().split('\n').map(line => {
|
||||
const [hash, subject, author, email, date] = line.split('|');
|
||||
return { hash, subject, author, email, date };
|
||||
});
|
||||
|
||||
// Categorize commits
|
||||
const categories = {
|
||||
'feat': { title: '✨ Features', commits: [] },
|
||||
'fix': { title: '🐛 Bug Fixes', commits: [] },
|
||||
'docs': { title: '📚 Documentation', commits: [] },
|
||||
'refactor': { title: '♻️ Refactoring', commits: [] },
|
||||
'test': { title: '🧪 Testing', commits: [] },
|
||||
'perf': { title: '⚡ Performance', commits: [] },
|
||||
'style': { title: '💅 Styling', commits: [] },
|
||||
'ci': { title: '🔧 CI/CD', commits: [] },
|
||||
'build': { title: '📦 Build', commits: [] },
|
||||
'chore': { title: '🔧 Maintenance', commits: [] },
|
||||
'other': { title: '📝 Other Changes', commits: [] }
|
||||
};
|
||||
|
||||
commits.forEach(commit => {
|
||||
const subject = commit.subject.toLowerCase();
|
||||
let categorized = false;
|
||||
|
||||
// Check for conventional commit prefixes
|
||||
for (const [prefix, category] of Object.entries(categories)) {
|
||||
if (prefix !== 'other' && subject.startsWith(`${prefix}:`)) {
|
||||
category.commits.push(commit);
|
||||
categorized = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If not categorized, put in other
|
||||
if (!categorized) {
|
||||
categories.other.commits.push(commit);
|
||||
}
|
||||
});
|
||||
|
||||
// Generate release notes
|
||||
const releaseNotes = [];
|
||||
|
||||
for (const [key, category] of Object.entries(categories)) {
|
||||
if (category.commits.length > 0) {
|
||||
releaseNotes.push(`### ${category.title}`);
|
||||
releaseNotes.push('');
|
||||
|
||||
category.commits.forEach(commit => {
|
||||
// Clean up the subject by removing the prefix if it exists
|
||||
let cleanSubject = commit.subject;
|
||||
const colonIndex = cleanSubject.indexOf(':');
|
||||
if (colonIndex !== -1 && cleanSubject.substring(0, colonIndex).match(/^(feat|fix|docs|refactor|test|perf|style|ci|build|chore)$/)) {
|
||||
cleanSubject = cleanSubject.substring(colonIndex + 1).trim();
|
||||
// Capitalize first letter
|
||||
cleanSubject = cleanSubject.charAt(0).toUpperCase() + cleanSubject.slice(1);
|
||||
}
|
||||
|
||||
releaseNotes.push(`- ${cleanSubject} (${commit.hash.substring(0, 7)})`);
|
||||
});
|
||||
|
||||
releaseNotes.push('');
|
||||
}
|
||||
}
|
||||
|
||||
// Add commit statistics
|
||||
const totalCommits = commits.length;
|
||||
const contributors = [...new Set(commits.map(c => c.author))];
|
||||
|
||||
releaseNotes.push('---');
|
||||
releaseNotes.push('');
|
||||
releaseNotes.push(`**Release Statistics:**`);
|
||||
releaseNotes.push(`- ${totalCommits} commit${totalCommits !== 1 ? 's' : ''}`);
|
||||
releaseNotes.push(`- ${contributors.length} contributor${contributors.length !== 1 ? 's' : ''}`);
|
||||
|
||||
if (contributors.length <= 5) {
|
||||
releaseNotes.push(`- Contributors: ${contributors.join(', ')}`);
|
||||
}
|
||||
|
||||
return releaseNotes.join('\n');
|
||||
|
||||
} catch (error) {
|
||||
console.error(`Error generating release notes: ${error.message}`);
|
||||
return `Failed to generate release notes: ${error.message}`;
|
||||
}
|
||||
}
|
||||
|
||||
// Parse command line arguments
|
||||
const previousTag = process.argv[2];
|
||||
const currentTag = process.argv[3];
|
||||
|
||||
if (!previousTag || !currentTag) {
|
||||
console.error('Usage: generate-release-notes.js <previous-tag> <current-tag>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const releaseNotes = generateReleaseNotes(previousTag, currentTag);
|
||||
console.log(releaseNotes);
|
||||
@@ -11,29 +11,8 @@ NC='\033[0m' # No Color
|
||||
|
||||
echo "🚀 Preparing n8n-mcp for npm publish..."
|
||||
|
||||
# Run tests first to ensure quality
|
||||
echo "🧪 Running tests..."
|
||||
TEST_OUTPUT=$(npm test 2>&1)
|
||||
TEST_EXIT_CODE=$?
|
||||
|
||||
# Check test results - look for actual test failures vs coverage issues
|
||||
if echo "$TEST_OUTPUT" | grep -q "Tests.*failed"; then
|
||||
# Extract failed count using sed (portable)
|
||||
FAILED_COUNT=$(echo "$TEST_OUTPUT" | sed -n 's/.*Tests.*\([0-9]*\) failed.*/\1/p' | head -1)
|
||||
if [ "$FAILED_COUNT" != "0" ] && [ "$FAILED_COUNT" != "" ]; then
|
||||
echo -e "${RED}❌ $FAILED_COUNT test(s) failed. Aborting publish.${NC}"
|
||||
echo "$TEST_OUTPUT" | tail -20
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# If we got here, tests passed - check coverage
|
||||
if echo "$TEST_OUTPUT" | grep -q "Coverage.*does not meet global threshold"; then
|
||||
echo -e "${YELLOW}⚠️ All tests passed but coverage is below threshold${NC}"
|
||||
echo -e "${YELLOW} Consider improving test coverage before next release${NC}"
|
||||
else
|
||||
echo -e "${GREEN}✅ All tests passed with good coverage!${NC}"
|
||||
fi
|
||||
# Skip tests - they already run in CI before merge/publish
|
||||
echo "⏭️ Skipping tests (already verified in CI)"
|
||||
|
||||
# Sync version to runtime package first
|
||||
echo "🔄 Syncing version to package.runtime.json..."
|
||||
@@ -80,6 +59,15 @@ node -e "
|
||||
const pkg = require('./package.json');
|
||||
pkg.name = 'n8n-mcp';
|
||||
pkg.description = 'Integration between n8n workflow automation and Model Context Protocol (MCP)';
|
||||
pkg.main = 'dist/index.js';
|
||||
pkg.types = 'dist/index.d.ts';
|
||||
pkg.exports = {
|
||||
'.': {
|
||||
types: './dist/index.d.ts',
|
||||
require: './dist/index.js',
|
||||
import: './dist/index.js'
|
||||
}
|
||||
};
|
||||
pkg.bin = { 'n8n-mcp': './dist/mcp/index.js' };
|
||||
pkg.repository = { type: 'git', url: 'git+https://github.com/czlonkowski/n8n-mcp.git' };
|
||||
pkg.keywords = ['n8n', 'mcp', 'model-context-protocol', 'ai', 'workflow', 'automation'];
|
||||
|
||||
189
scripts/test-ai-validation-debug.ts
Normal file
189
scripts/test-ai-validation-debug.ts
Normal file
@@ -0,0 +1,189 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Debug test for AI validation issues
|
||||
* Reproduces the bugs found by n8n-mcp-tester
|
||||
*/
|
||||
|
||||
import { validateAISpecificNodes, buildReverseConnectionMap } from '../src/services/ai-node-validator';
|
||||
import type { WorkflowJson } from '../src/services/ai-tool-validators';
|
||||
import { NodeTypeNormalizer } from '../src/utils/node-type-normalizer';
|
||||
|
||||
console.log('=== AI Validation Debug Tests ===\n');
|
||||
|
||||
// Test 1: AI Agent with NO language model connection
|
||||
console.log('Test 1: Missing Language Model Detection');
|
||||
const workflow1: WorkflowJson = {
|
||||
name: 'Test Missing LM',
|
||||
nodes: [
|
||||
{
|
||||
id: 'ai-agent-1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [500, 300],
|
||||
parameters: {
|
||||
promptType: 'define',
|
||||
text: 'You are a helpful assistant'
|
||||
},
|
||||
typeVersion: 1.7
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
// NO connections - AI Agent is isolated
|
||||
}
|
||||
};
|
||||
|
||||
console.log('Workflow:', JSON.stringify(workflow1, null, 2));
|
||||
|
||||
const reverseMap1 = buildReverseConnectionMap(workflow1);
|
||||
console.log('\nReverse connection map for AI Agent:');
|
||||
console.log('Entries:', Array.from(reverseMap1.entries()));
|
||||
console.log('AI Agent connections:', reverseMap1.get('AI Agent'));
|
||||
|
||||
// Check node normalization
|
||||
const normalizedType1 = NodeTypeNormalizer.normalizeToFullForm(workflow1.nodes[0].type);
|
||||
console.log(`\nNode type: ${workflow1.nodes[0].type}`);
|
||||
console.log(`Normalized type: ${normalizedType1}`);
|
||||
console.log(`Match check: ${normalizedType1 === '@n8n/n8n-nodes-langchain.agent'}`);
|
||||
|
||||
const issues1 = validateAISpecificNodes(workflow1);
|
||||
console.log('\nValidation issues:');
|
||||
console.log(JSON.stringify(issues1, null, 2));
|
||||
|
||||
const hasMissingLMError = issues1.some(
|
||||
i => i.severity === 'error' && i.code === 'MISSING_LANGUAGE_MODEL'
|
||||
);
|
||||
console.log(`\n✓ Has MISSING_LANGUAGE_MODEL error: ${hasMissingLMError}`);
|
||||
console.log(`✗ Expected: true, Got: ${hasMissingLMError}`);
|
||||
|
||||
// Test 2: AI Agent WITH language model connection
|
||||
console.log('\n\n' + '='.repeat(60));
|
||||
console.log('Test 2: AI Agent WITH Language Model (Should be valid)');
|
||||
const workflow2: WorkflowJson = {
|
||||
name: 'Test With LM',
|
||||
nodes: [
|
||||
{
|
||||
id: 'openai-1',
|
||||
name: 'OpenAI Chat Model',
|
||||
type: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
|
||||
position: [200, 300],
|
||||
parameters: {
|
||||
modelName: 'gpt-4'
|
||||
},
|
||||
typeVersion: 1
|
||||
},
|
||||
{
|
||||
id: 'ai-agent-1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [500, 300],
|
||||
parameters: {
|
||||
promptType: 'define',
|
||||
text: 'You are a helpful assistant'
|
||||
},
|
||||
typeVersion: 1.7
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'OpenAI Chat Model': {
|
||||
ai_languageModel: [
|
||||
[
|
||||
{
|
||||
node: 'AI Agent',
|
||||
type: 'ai_languageModel',
|
||||
index: 0
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
console.log('\nConnections:', JSON.stringify(workflow2.connections, null, 2));
|
||||
|
||||
const reverseMap2 = buildReverseConnectionMap(workflow2);
|
||||
console.log('\nReverse connection map for AI Agent:');
|
||||
console.log('AI Agent connections:', reverseMap2.get('AI Agent'));
|
||||
|
||||
const issues2 = validateAISpecificNodes(workflow2);
|
||||
console.log('\nValidation issues:');
|
||||
console.log(JSON.stringify(issues2, null, 2));
|
||||
|
||||
const hasMissingLMError2 = issues2.some(
|
||||
i => i.severity === 'error' && i.code === 'MISSING_LANGUAGE_MODEL'
|
||||
);
|
||||
console.log(`\n✓ Should NOT have MISSING_LANGUAGE_MODEL error: ${!hasMissingLMError2}`);
|
||||
console.log(`Expected: false, Got: ${hasMissingLMError2}`);
|
||||
|
||||
// Test 3: AI Agent with tools but no language model
|
||||
console.log('\n\n' + '='.repeat(60));
|
||||
console.log('Test 3: AI Agent with Tools but NO Language Model');
|
||||
const workflow3: WorkflowJson = {
|
||||
name: 'Test Tools No LM',
|
||||
nodes: [
|
||||
{
|
||||
id: 'http-tool-1',
|
||||
name: 'HTTP Request Tool',
|
||||
type: '@n8n/n8n-nodes-langchain.toolHttpRequest',
|
||||
position: [200, 300],
|
||||
parameters: {
|
||||
toolDescription: 'Calls an API',
|
||||
url: 'https://api.example.com'
|
||||
},
|
||||
typeVersion: 1.1
|
||||
},
|
||||
{
|
||||
id: 'ai-agent-1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [500, 300],
|
||||
parameters: {
|
||||
promptType: 'define',
|
||||
text: 'You are a helpful assistant'
|
||||
},
|
||||
typeVersion: 1.7
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'HTTP Request Tool': {
|
||||
ai_tool: [
|
||||
[
|
||||
{
|
||||
node: 'AI Agent',
|
||||
type: 'ai_tool',
|
||||
index: 0
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
console.log('\nConnections:', JSON.stringify(workflow3.connections, null, 2));
|
||||
|
||||
const reverseMap3 = buildReverseConnectionMap(workflow3);
|
||||
console.log('\nReverse connection map for AI Agent:');
|
||||
const aiAgentConns = reverseMap3.get('AI Agent');
|
||||
console.log('AI Agent connections:', aiAgentConns);
|
||||
console.log('Connection types:', aiAgentConns?.map(c => c.type));
|
||||
|
||||
const issues3 = validateAISpecificNodes(workflow3);
|
||||
console.log('\nValidation issues:');
|
||||
console.log(JSON.stringify(issues3, null, 2));
|
||||
|
||||
const hasMissingLMError3 = issues3.some(
|
||||
i => i.severity === 'error' && i.code === 'MISSING_LANGUAGE_MODEL'
|
||||
);
|
||||
const hasNoToolsInfo3 = issues3.some(
|
||||
i => i.severity === 'info' && i.message.includes('no ai_tool connections')
|
||||
);
|
||||
|
||||
console.log(`\n✓ Should have MISSING_LANGUAGE_MODEL error: ${hasMissingLMError3}`);
|
||||
console.log(`Expected: true, Got: ${hasMissingLMError3}`);
|
||||
console.log(`✗ Should NOT have "no tools" info: ${!hasNoToolsInfo3}`);
|
||||
console.log(`Expected: false, Got: ${hasNoToolsInfo3}`);
|
||||
|
||||
console.log('\n' + '='.repeat(60));
|
||||
console.log('Summary:');
|
||||
console.log(`Test 1 (No LM): ${hasMissingLMError ? 'PASS ✓' : 'FAIL ✗'}`);
|
||||
console.log(`Test 2 (With LM): ${!hasMissingLMError2 ? 'PASS ✓' : 'FAIL ✗'}`);
|
||||
console.log(`Test 3 (Tools, No LM): ${hasMissingLMError3 && !hasNoToolsInfo3 ? 'PASS ✓' : 'FAIL ✗'}`);
|
||||
163
scripts/test-docker-fingerprint.ts
Normal file
163
scripts/test-docker-fingerprint.ts
Normal file
@@ -0,0 +1,163 @@
|
||||
/**
|
||||
* Test Docker Host Fingerprinting
|
||||
* Verifies that host machine characteristics are stable across container recreations
|
||||
*/
|
||||
|
||||
import { existsSync, readFileSync } from 'fs';
|
||||
import { platform, arch } from 'os';
|
||||
import { createHash } from 'crypto';
|
||||
|
||||
console.log('=== Docker Host Fingerprinting Test ===\n');
|
||||
|
||||
function generateHostFingerprint(): string {
|
||||
try {
|
||||
const signals: string[] = [];
|
||||
|
||||
console.log('Collecting host signals...\n');
|
||||
|
||||
// CPU info (stable across container recreations)
|
||||
if (existsSync('/proc/cpuinfo')) {
|
||||
const cpuinfo = readFileSync('/proc/cpuinfo', 'utf-8');
|
||||
const modelMatch = cpuinfo.match(/model name\s*:\s*(.+)/);
|
||||
const coresMatch = cpuinfo.match(/processor\s*:/g);
|
||||
|
||||
if (modelMatch) {
|
||||
const cpuModel = modelMatch[1].trim();
|
||||
signals.push(cpuModel);
|
||||
console.log('✓ CPU Model:', cpuModel);
|
||||
}
|
||||
|
||||
if (coresMatch) {
|
||||
const cores = `cores:${coresMatch.length}`;
|
||||
signals.push(cores);
|
||||
console.log('✓ CPU Cores:', coresMatch.length);
|
||||
}
|
||||
} else {
|
||||
console.log('✗ /proc/cpuinfo not available (Windows/Mac Docker)');
|
||||
}
|
||||
|
||||
// Memory (stable)
|
||||
if (existsSync('/proc/meminfo')) {
|
||||
const meminfo = readFileSync('/proc/meminfo', 'utf-8');
|
||||
const totalMatch = meminfo.match(/MemTotal:\s+(\d+)/);
|
||||
|
||||
if (totalMatch) {
|
||||
const memory = `mem:${totalMatch[1]}`;
|
||||
signals.push(memory);
|
||||
console.log('✓ Total Memory:', totalMatch[1], 'kB');
|
||||
}
|
||||
} else {
|
||||
console.log('✗ /proc/meminfo not available (Windows/Mac Docker)');
|
||||
}
|
||||
|
||||
// Docker network subnet
|
||||
const networkInfo = getDockerNetworkInfo();
|
||||
if (networkInfo) {
|
||||
signals.push(networkInfo);
|
||||
console.log('✓ Network Info:', networkInfo);
|
||||
} else {
|
||||
console.log('✗ Network info not available');
|
||||
}
|
||||
|
||||
// Platform basics (stable)
|
||||
signals.push(platform(), arch());
|
||||
console.log('✓ Platform:', platform());
|
||||
console.log('✓ Architecture:', arch());
|
||||
|
||||
// Generate stable ID from all signals
|
||||
console.log('\nCombined signals:', signals.join(' | '));
|
||||
const fingerprint = signals.join('-');
|
||||
const userId = createHash('sha256').update(fingerprint).digest('hex').substring(0, 16);
|
||||
|
||||
return userId;
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error generating fingerprint:', error);
|
||||
// Fallback
|
||||
return createHash('sha256')
|
||||
.update(`${platform()}-${arch()}-docker`)
|
||||
.digest('hex')
|
||||
.substring(0, 16);
|
||||
}
|
||||
}
|
||||
|
||||
function getDockerNetworkInfo(): string | null {
|
||||
try {
|
||||
// Read routing table to get bridge network
|
||||
if (existsSync('/proc/net/route')) {
|
||||
const routes = readFileSync('/proc/net/route', 'utf-8');
|
||||
const lines = routes.split('\n');
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.includes('eth0')) {
|
||||
const parts = line.split(/\s+/);
|
||||
if (parts[2]) {
|
||||
const gateway = parseInt(parts[2], 16).toString(16);
|
||||
return `net:${gateway}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Ignore errors
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// Test environment detection
|
||||
console.log('\n=== Environment Detection ===\n');
|
||||
|
||||
const isDocker = process.env.IS_DOCKER === 'true';
|
||||
const isCloudEnvironment = !!(
|
||||
process.env.RAILWAY_ENVIRONMENT ||
|
||||
process.env.RENDER ||
|
||||
process.env.FLY_APP_NAME ||
|
||||
process.env.HEROKU_APP_NAME ||
|
||||
process.env.AWS_EXECUTION_ENV ||
|
||||
process.env.KUBERNETES_SERVICE_HOST
|
||||
);
|
||||
|
||||
console.log('IS_DOCKER env:', process.env.IS_DOCKER);
|
||||
console.log('Docker detected:', isDocker);
|
||||
console.log('Cloud environment:', isCloudEnvironment);
|
||||
|
||||
// Generate fingerprints
|
||||
console.log('\n=== Fingerprint Generation ===\n');
|
||||
|
||||
const fingerprint1 = generateHostFingerprint();
|
||||
const fingerprint2 = generateHostFingerprint();
|
||||
const fingerprint3 = generateHostFingerprint();
|
||||
|
||||
console.log('\nFingerprint 1:', fingerprint1);
|
||||
console.log('Fingerprint 2:', fingerprint2);
|
||||
console.log('Fingerprint 3:', fingerprint3);
|
||||
|
||||
const consistent = fingerprint1 === fingerprint2 && fingerprint2 === fingerprint3;
|
||||
console.log('\nConsistent:', consistent ? '✓ YES' : '✗ NO');
|
||||
|
||||
// Test explicit ID override
|
||||
console.log('\n=== Environment Variable Override Test ===\n');
|
||||
|
||||
if (process.env.N8N_MCP_USER_ID) {
|
||||
console.log('Explicit user ID:', process.env.N8N_MCP_USER_ID);
|
||||
console.log('This would override the fingerprint');
|
||||
} else {
|
||||
console.log('No explicit user ID set');
|
||||
console.log('To test: N8N_MCP_USER_ID=my-custom-id npx tsx ' + process.argv[1]);
|
||||
}
|
||||
|
||||
// Stability estimate
|
||||
console.log('\n=== Stability Analysis ===\n');
|
||||
|
||||
const hasStableSignals = existsSync('/proc/cpuinfo') || existsSync('/proc/meminfo');
|
||||
if (hasStableSignals) {
|
||||
console.log('✓ Host-based signals available');
|
||||
console.log('✓ Fingerprint should be stable across container recreations');
|
||||
console.log('✓ Different fingerprints on different physical hosts');
|
||||
} else {
|
||||
console.log('⚠️ Limited host signals (Windows/Mac Docker Desktop)');
|
||||
console.log('⚠️ Fingerprint may not be fully stable');
|
||||
console.log('💡 Recommendation: Use N8N_MCP_USER_ID env var for stability');
|
||||
}
|
||||
|
||||
console.log('\n');
|
||||
119
scripts/test-user-id-persistence.ts
Normal file
119
scripts/test-user-id-persistence.ts
Normal file
@@ -0,0 +1,119 @@
|
||||
/**
|
||||
* Test User ID Persistence
|
||||
* Verifies that user IDs are consistent across sessions and modes
|
||||
*/
|
||||
|
||||
import { TelemetryConfigManager } from '../src/telemetry/config-manager';
|
||||
import { hostname, platform, arch, homedir } from 'os';
|
||||
import { createHash } from 'crypto';
|
||||
|
||||
console.log('=== User ID Persistence Test ===\n');
|
||||
|
||||
// Test 1: Verify deterministic ID generation
|
||||
console.log('Test 1: Deterministic ID Generation');
|
||||
console.log('-----------------------------------');
|
||||
|
||||
const machineId = `${hostname()}-${platform()}-${arch()}-${homedir()}`;
|
||||
const expectedUserId = createHash('sha256')
|
||||
.update(machineId)
|
||||
.digest('hex')
|
||||
.substring(0, 16);
|
||||
|
||||
console.log('Machine characteristics:');
|
||||
console.log(' hostname:', hostname());
|
||||
console.log(' platform:', platform());
|
||||
console.log(' arch:', arch());
|
||||
console.log(' homedir:', homedir());
|
||||
console.log('\nGenerated machine ID:', machineId);
|
||||
console.log('Expected user ID:', expectedUserId);
|
||||
|
||||
// Test 2: Load actual config
|
||||
console.log('\n\nTest 2: Actual Config Manager');
|
||||
console.log('-----------------------------------');
|
||||
|
||||
const configManager = TelemetryConfigManager.getInstance();
|
||||
const actualUserId = configManager.getUserId();
|
||||
const config = configManager.loadConfig();
|
||||
|
||||
console.log('Actual user ID:', actualUserId);
|
||||
console.log('Config first run:', config.firstRun || 'Unknown');
|
||||
console.log('Config version:', config.version || 'Unknown');
|
||||
console.log('Telemetry enabled:', config.enabled);
|
||||
|
||||
// Test 3: Verify consistency
|
||||
console.log('\n\nTest 3: Consistency Check');
|
||||
console.log('-----------------------------------');
|
||||
|
||||
const match = actualUserId === expectedUserId;
|
||||
console.log('User IDs match:', match ? '✓ YES' : '✗ NO');
|
||||
|
||||
if (!match) {
|
||||
console.log('WARNING: User ID mismatch detected!');
|
||||
console.log('This could indicate an implementation issue.');
|
||||
}
|
||||
|
||||
// Test 4: Multiple loads (simulate multiple sessions)
|
||||
console.log('\n\nTest 4: Multiple Session Simulation');
|
||||
console.log('-----------------------------------');
|
||||
|
||||
const userId1 = configManager.getUserId();
|
||||
const userId2 = TelemetryConfigManager.getInstance().getUserId();
|
||||
const userId3 = configManager.getUserId();
|
||||
|
||||
console.log('Session 1 user ID:', userId1);
|
||||
console.log('Session 2 user ID:', userId2);
|
||||
console.log('Session 3 user ID:', userId3);
|
||||
|
||||
const consistent = userId1 === userId2 && userId2 === userId3;
|
||||
console.log('All sessions consistent:', consistent ? '✓ YES' : '✗ NO');
|
||||
|
||||
// Test 5: Docker environment simulation
|
||||
console.log('\n\nTest 5: Docker Environment Check');
|
||||
console.log('-----------------------------------');
|
||||
|
||||
const isDocker = process.env.IS_DOCKER === 'true';
|
||||
console.log('Running in Docker:', isDocker);
|
||||
|
||||
if (isDocker) {
|
||||
console.log('\n⚠️ DOCKER MODE DETECTED');
|
||||
console.log('In Docker, user IDs may change across container recreations because:');
|
||||
console.log(' 1. Container hostname changes each time');
|
||||
console.log(' 2. Config file is not persisted (no volume mount)');
|
||||
console.log(' 3. Each container gets a new ephemeral filesystem');
|
||||
console.log('\nRecommendation: Mount ~/.n8n-mcp as a volume for persistent user IDs');
|
||||
}
|
||||
|
||||
// Test 6: Environment variable override check
|
||||
console.log('\n\nTest 6: Environment Variable Override');
|
||||
console.log('-----------------------------------');
|
||||
|
||||
const telemetryDisabledVars = [
|
||||
'N8N_MCP_TELEMETRY_DISABLED',
|
||||
'TELEMETRY_DISABLED',
|
||||
'DISABLE_TELEMETRY'
|
||||
];
|
||||
|
||||
telemetryDisabledVars.forEach(varName => {
|
||||
const value = process.env[varName];
|
||||
if (value !== undefined) {
|
||||
console.log(`${varName}:`, value);
|
||||
}
|
||||
});
|
||||
|
||||
console.log('\nTelemetry status:', configManager.isEnabled() ? 'ENABLED' : 'DISABLED');
|
||||
|
||||
// Summary
|
||||
console.log('\n\n=== SUMMARY ===');
|
||||
console.log('User ID:', actualUserId);
|
||||
console.log('Deterministic:', match ? 'YES ✓' : 'NO ✗');
|
||||
console.log('Persistent across sessions:', consistent ? 'YES ✓' : 'NO ✗');
|
||||
console.log('Telemetry enabled:', config.enabled ? 'YES' : 'NO');
|
||||
console.log('Docker mode:', isDocker ? 'YES' : 'NO');
|
||||
|
||||
if (isDocker && !process.env.N8N_MCP_CONFIG_VOLUME) {
|
||||
console.log('\n⚠️ WARNING: Running in Docker without persistent volume!');
|
||||
console.log('User IDs will change on container recreation.');
|
||||
console.log('Mount /home/nodejs/.n8n-mcp to persist telemetry config.');
|
||||
}
|
||||
|
||||
console.log('\n');
|
||||
287
scripts/test-workflow-versioning.ts
Normal file
287
scripts/test-workflow-versioning.ts
Normal file
@@ -0,0 +1,287 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Test Workflow Versioning System
|
||||
*
|
||||
* Tests the complete workflow rollback and versioning functionality:
|
||||
* - Automatic backup creation
|
||||
* - Auto-pruning to 10 versions
|
||||
* - Version history retrieval
|
||||
* - Rollback with validation
|
||||
* - Manual pruning and cleanup
|
||||
* - Storage statistics
|
||||
*/
|
||||
|
||||
import { NodeRepository } from '../src/database/node-repository';
|
||||
import { createDatabaseAdapter } from '../src/database/database-adapter';
|
||||
import { WorkflowVersioningService } from '../src/services/workflow-versioning-service';
|
||||
import { logger } from '../src/utils/logger';
|
||||
import { existsSync } from 'fs';
|
||||
import * as path from 'path';
|
||||
|
||||
// Mock workflow for testing
|
||||
const createMockWorkflow = (id: string, name: string, nodeCount: number = 3) => ({
|
||||
id,
|
||||
name,
|
||||
active: false,
|
||||
nodes: Array.from({ length: nodeCount }, (_, i) => ({
|
||||
id: `node-${i}`,
|
||||
name: `Node ${i}`,
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 1,
|
||||
position: [250 + i * 200, 300],
|
||||
parameters: { values: { string: [{ name: `field${i}`, value: `value${i}` }] } }
|
||||
})),
|
||||
connections: nodeCount > 1 ? {
|
||||
'node-0': { main: [[{ node: 'node-1', type: 'main', index: 0 }]] },
|
||||
...(nodeCount > 2 && { 'node-1': { main: [[{ node: 'node-2', type: 'main', index: 0 }]] } })
|
||||
} : {},
|
||||
settings: {}
|
||||
});
|
||||
|
||||
async function runTests() {
|
||||
console.log('🧪 Testing Workflow Versioning System\n');
|
||||
|
||||
// Find database path
|
||||
const possiblePaths = [
|
||||
path.join(process.cwd(), 'data', 'nodes.db'),
|
||||
path.join(__dirname, '../../data', 'nodes.db'),
|
||||
'./data/nodes.db'
|
||||
];
|
||||
|
||||
let dbPath: string | null = null;
|
||||
for (const p of possiblePaths) {
|
||||
if (existsSync(p)) {
|
||||
dbPath = p;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!dbPath) {
|
||||
console.error('❌ Database not found. Please run npm run rebuild first.');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log(`📁 Using database: ${dbPath}\n`);
|
||||
|
||||
// Initialize repository
|
||||
const db = await createDatabaseAdapter(dbPath);
|
||||
const repository = new NodeRepository(db);
|
||||
const service = new WorkflowVersioningService(repository);
|
||||
|
||||
const workflowId = 'test-workflow-001';
|
||||
let testsPassed = 0;
|
||||
let testsFailed = 0;
|
||||
|
||||
try {
|
||||
// Test 1: Create initial backup
|
||||
console.log('📝 Test 1: Create initial backup');
|
||||
const workflow1 = createMockWorkflow(workflowId, 'Test Workflow v1', 3);
|
||||
const backup1 = await service.createBackup(workflowId, workflow1, {
|
||||
trigger: 'partial_update',
|
||||
operations: [{ type: 'addNode', node: workflow1.nodes[0] }]
|
||||
});
|
||||
|
||||
if (backup1.versionId && backup1.versionNumber === 1 && backup1.pruned === 0) {
|
||||
console.log('✅ Initial backup created successfully');
|
||||
console.log(` Version ID: ${backup1.versionId}, Version Number: ${backup1.versionNumber}`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Failed to create initial backup');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 2: Create multiple backups to test auto-pruning
|
||||
console.log('\n📝 Test 2: Create 12 backups to test auto-pruning (should keep only 10)');
|
||||
for (let i = 2; i <= 12; i++) {
|
||||
const workflow = createMockWorkflow(workflowId, `Test Workflow v${i}`, 3 + i);
|
||||
await service.createBackup(workflowId, workflow, {
|
||||
trigger: i % 3 === 0 ? 'full_update' : 'partial_update',
|
||||
operations: [{ type: 'addNode', node: { id: `node-${i}` } }]
|
||||
});
|
||||
}
|
||||
|
||||
const versions = await service.getVersionHistory(workflowId, 100);
|
||||
if (versions.length === 10) {
|
||||
console.log(`✅ Auto-pruning works correctly (kept exactly 10 versions)`);
|
||||
console.log(` Latest version: ${versions[0].versionNumber}, Oldest: ${versions[9].versionNumber}`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log(`❌ Auto-pruning failed (expected 10 versions, got ${versions.length})`);
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 3: Get version history
|
||||
console.log('\n📝 Test 3: Get version history');
|
||||
const history = await service.getVersionHistory(workflowId, 5);
|
||||
if (history.length === 5 && history[0].versionNumber > history[4].versionNumber) {
|
||||
console.log(`✅ Version history retrieved successfully (${history.length} versions)`);
|
||||
console.log(' Recent versions:');
|
||||
history.forEach(v => {
|
||||
console.log(` - v${v.versionNumber} (${v.trigger}) - ${v.workflowName} - ${(v.size / 1024).toFixed(2)} KB`);
|
||||
});
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Failed to get version history');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 4: Get specific version
|
||||
console.log('\n📝 Test 4: Get specific version details');
|
||||
const specificVersion = await service.getVersion(history[2].id);
|
||||
if (specificVersion && specificVersion.workflowSnapshot) {
|
||||
console.log(`✅ Retrieved version ${specificVersion.versionNumber} successfully`);
|
||||
console.log(` Workflow name: ${specificVersion.workflowName}`);
|
||||
console.log(` Node count: ${specificVersion.workflowSnapshot.nodes.length}`);
|
||||
console.log(` Trigger: ${specificVersion.trigger}`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Failed to get specific version');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 5: Compare two versions
|
||||
console.log('\n📝 Test 5: Compare two versions');
|
||||
if (history.length >= 2) {
|
||||
const diff = await service.compareVersions(history[0].id, history[1].id);
|
||||
console.log(`✅ Version comparison successful`);
|
||||
console.log(` Comparing v${diff.version1Number} → v${diff.version2Number}`);
|
||||
console.log(` Added nodes: ${diff.addedNodes.length}`);
|
||||
console.log(` Removed nodes: ${diff.removedNodes.length}`);
|
||||
console.log(` Modified nodes: ${diff.modifiedNodes.length}`);
|
||||
console.log(` Connection changes: ${diff.connectionChanges}`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Not enough versions to compare');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 6: Manual pruning
|
||||
console.log('\n📝 Test 6: Manual pruning (keep only 5 versions)');
|
||||
const pruneResult = await service.pruneVersions(workflowId, 5);
|
||||
if (pruneResult.pruned === 5 && pruneResult.remaining === 5) {
|
||||
console.log(`✅ Manual pruning successful`);
|
||||
console.log(` Pruned: ${pruneResult.pruned} versions, Remaining: ${pruneResult.remaining}`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log(`❌ Manual pruning failed (expected 5 pruned, 5 remaining, got ${pruneResult.pruned} pruned, ${pruneResult.remaining} remaining)`);
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 7: Storage statistics
|
||||
console.log('\n📝 Test 7: Storage statistics');
|
||||
const stats = await service.getStorageStats();
|
||||
if (stats.totalVersions > 0 && stats.byWorkflow.length > 0) {
|
||||
console.log(`✅ Storage stats retrieved successfully`);
|
||||
console.log(` Total versions: ${stats.totalVersions}`);
|
||||
console.log(` Total size: ${stats.totalSizeFormatted}`);
|
||||
console.log(` Workflows with versions: ${stats.byWorkflow.length}`);
|
||||
stats.byWorkflow.forEach(w => {
|
||||
console.log(` - ${w.workflowName}: ${w.versionCount} versions, ${w.totalSizeFormatted}`);
|
||||
});
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Failed to get storage stats');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 8: Delete specific version
|
||||
console.log('\n📝 Test 8: Delete specific version');
|
||||
const versionsBeforeDelete = await service.getVersionHistory(workflowId, 100);
|
||||
const versionToDelete = versionsBeforeDelete[versionsBeforeDelete.length - 1];
|
||||
const deleteResult = await service.deleteVersion(versionToDelete.id);
|
||||
const versionsAfterDelete = await service.getVersionHistory(workflowId, 100);
|
||||
|
||||
if (deleteResult.success && versionsAfterDelete.length === versionsBeforeDelete.length - 1) {
|
||||
console.log(`✅ Version deletion successful`);
|
||||
console.log(` Deleted version ${versionToDelete.versionNumber}`);
|
||||
console.log(` Remaining versions: ${versionsAfterDelete.length}`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Failed to delete version');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 9: Test different trigger types
|
||||
console.log('\n📝 Test 9: Test different trigger types');
|
||||
const workflow2 = createMockWorkflow(workflowId, 'Test Workflow Autofix', 2);
|
||||
const backupAutofix = await service.createBackup(workflowId, workflow2, {
|
||||
trigger: 'autofix',
|
||||
fixTypes: ['expression-format', 'typeversion-correction']
|
||||
});
|
||||
|
||||
const workflow3 = createMockWorkflow(workflowId, 'Test Workflow Full Update', 4);
|
||||
const backupFull = await service.createBackup(workflowId, workflow3, {
|
||||
trigger: 'full_update',
|
||||
metadata: { reason: 'Major refactoring' }
|
||||
});
|
||||
|
||||
const allVersions = await service.getVersionHistory(workflowId, 100);
|
||||
const autofixVersions = allVersions.filter(v => v.trigger === 'autofix');
|
||||
const fullUpdateVersions = allVersions.filter(v => v.trigger === 'full_update');
|
||||
const partialUpdateVersions = allVersions.filter(v => v.trigger === 'partial_update');
|
||||
|
||||
if (autofixVersions.length > 0 && fullUpdateVersions.length > 0 && partialUpdateVersions.length > 0) {
|
||||
console.log(`✅ All trigger types working correctly`);
|
||||
console.log(` Partial updates: ${partialUpdateVersions.length}`);
|
||||
console.log(` Full updates: ${fullUpdateVersions.length}`);
|
||||
console.log(` Autofixes: ${autofixVersions.length}`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Failed to create versions with different trigger types');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 10: Cleanup - Delete all versions for workflow
|
||||
console.log('\n📝 Test 10: Delete all versions for workflow');
|
||||
const deleteAllResult = await service.deleteAllVersions(workflowId);
|
||||
const versionsAfterDeleteAll = await service.getVersionHistory(workflowId, 100);
|
||||
|
||||
if (deleteAllResult.deleted > 0 && versionsAfterDeleteAll.length === 0) {
|
||||
console.log(`✅ Delete all versions successful`);
|
||||
console.log(` Deleted ${deleteAllResult.deleted} versions`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Failed to delete all versions');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Test 11: Truncate all versions (requires confirmation)
|
||||
console.log('\n📝 Test 11: Test truncate without confirmation');
|
||||
const truncateResult1 = await service.truncateAllVersions(false);
|
||||
if (truncateResult1.deleted === 0 && truncateResult1.message.includes('not confirmed')) {
|
||||
console.log(`✅ Truncate safety check works (requires confirmation)`);
|
||||
testsPassed++;
|
||||
} else {
|
||||
console.log('❌ Truncate safety check failed');
|
||||
testsFailed++;
|
||||
}
|
||||
|
||||
// Summary
|
||||
console.log('\n' + '='.repeat(60));
|
||||
console.log('📊 Test Summary');
|
||||
console.log('='.repeat(60));
|
||||
console.log(`✅ Passed: ${testsPassed}`);
|
||||
console.log(`❌ Failed: ${testsFailed}`);
|
||||
console.log(`📈 Success Rate: ${((testsPassed / (testsPassed + testsFailed)) * 100).toFixed(1)}%`);
|
||||
console.log('='.repeat(60));
|
||||
|
||||
if (testsFailed === 0) {
|
||||
console.log('\n🎉 All tests passed! Workflow versioning system is working correctly.');
|
||||
process.exit(0);
|
||||
} else {
|
||||
console.log('\n⚠️ Some tests failed. Please review the implementation.');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
} catch (error: any) {
|
||||
console.error('\n❌ Test suite failed with error:', error.message);
|
||||
console.error(error.stack);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Run tests
|
||||
runTests().catch(error => {
|
||||
console.error('Fatal error:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
310
src/data/canonical-ai-tool-examples.json
Normal file
310
src/data/canonical-ai-tool-examples.json
Normal file
@@ -0,0 +1,310 @@
|
||||
{
|
||||
"description": "Canonical configuration examples for critical AI tools based on FINAL_AI_VALIDATION_SPEC.md",
|
||||
"version": "1.0.0",
|
||||
"examples": [
|
||||
{
|
||||
"node_type": "@n8n/n8n-nodes-langchain.toolHttpRequest",
|
||||
"display_name": "HTTP Request Tool",
|
||||
"examples": [
|
||||
{
|
||||
"name": "Weather API Tool",
|
||||
"use_case": "Fetch current weather data for AI Agent",
|
||||
"complexity": "simple",
|
||||
"parameters": {
|
||||
"method": "GET",
|
||||
"url": "https://api.weatherapi.com/v1/current.json?key={{$credentials.weatherApiKey}}&q={city}",
|
||||
"toolDescription": "Get current weather conditions for a city. Provide the city name (e.g., 'London', 'New York') and receive temperature, humidity, wind speed, and conditions.",
|
||||
"placeholderDefinitions": {
|
||||
"values": [
|
||||
{
|
||||
"name": "city",
|
||||
"description": "Name of the city to get weather for",
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"authentication": "predefinedCredentialType",
|
||||
"nodeCredentialType": "weatherApiApi"
|
||||
},
|
||||
"credentials": {
|
||||
"weatherApiApi": {
|
||||
"id": "1",
|
||||
"name": "Weather API account"
|
||||
}
|
||||
},
|
||||
"notes": "Example shows proper toolDescription, URL with placeholder, and credential configuration"
|
||||
},
|
||||
{
|
||||
"name": "GitHub Issues Tool",
|
||||
"use_case": "Create GitHub issues from AI Agent conversations",
|
||||
"complexity": "medium",
|
||||
"parameters": {
|
||||
"method": "POST",
|
||||
"url": "https://api.github.com/repos/{owner}/{repo}/issues",
|
||||
"toolDescription": "Create a new GitHub issue. Requires owner (repo owner username), repo (repository name), title, and body. Returns the created issue URL and number.",
|
||||
"placeholderDefinitions": {
|
||||
"values": [
|
||||
{
|
||||
"name": "owner",
|
||||
"description": "GitHub repository owner username",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "repo",
|
||||
"description": "Repository name",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "title",
|
||||
"description": "Issue title",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "body",
|
||||
"description": "Issue description and details",
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"sendBody": true,
|
||||
"specifyBody": "json",
|
||||
"jsonBody": "={{ { \"title\": $json.title, \"body\": $json.body } }}",
|
||||
"authentication": "predefinedCredentialType",
|
||||
"nodeCredentialType": "githubApi"
|
||||
},
|
||||
"credentials": {
|
||||
"githubApi": {
|
||||
"id": "2",
|
||||
"name": "GitHub credentials"
|
||||
}
|
||||
},
|
||||
"notes": "Example shows POST request with JSON body, multiple placeholders, and expressions"
|
||||
},
|
||||
{
|
||||
"name": "Slack Message Tool",
|
||||
"use_case": "Send Slack messages from AI Agent",
|
||||
"complexity": "simple",
|
||||
"parameters": {
|
||||
"method": "POST",
|
||||
"url": "https://slack.com/api/chat.postMessage",
|
||||
"toolDescription": "Send a message to a Slack channel. Provide channel ID or name (e.g., '#general', 'C1234567890') and message text.",
|
||||
"placeholderDefinitions": {
|
||||
"values": [
|
||||
{
|
||||
"name": "channel",
|
||||
"description": "Channel ID or name (e.g., #general)",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "text",
|
||||
"description": "Message text to send",
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"sendHeaders": true,
|
||||
"headerParameters": {
|
||||
"parameters": [
|
||||
{
|
||||
"name": "Content-Type",
|
||||
"value": "application/json; charset=utf-8"
|
||||
},
|
||||
{
|
||||
"name": "Authorization",
|
||||
"value": "=Bearer {{$credentials.slackApi.accessToken}}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"sendBody": true,
|
||||
"specifyBody": "json",
|
||||
"jsonBody": "={{ { \"channel\": $json.channel, \"text\": $json.text } }}",
|
||||
"authentication": "predefinedCredentialType",
|
||||
"nodeCredentialType": "slackApi"
|
||||
},
|
||||
"credentials": {
|
||||
"slackApi": {
|
||||
"id": "3",
|
||||
"name": "Slack account"
|
||||
}
|
||||
},
|
||||
"notes": "Example shows headers with credential expressions and JSON body construction"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"node_type": "@n8n/n8n-nodes-langchain.toolCode",
|
||||
"display_name": "Code Tool",
|
||||
"examples": [
|
||||
{
|
||||
"name": "Calculate Shipping Cost",
|
||||
"use_case": "Calculate shipping costs based on weight and distance",
|
||||
"complexity": "simple",
|
||||
"parameters": {
|
||||
"name": "calculate_shipping_cost",
|
||||
"description": "Calculate shipping cost based on package weight (in kg) and distance (in km). Returns the cost in USD.",
|
||||
"language": "javaScript",
|
||||
"code": "const baseRate = 5;\nconst perKgRate = 2;\nconst perKmRate = 0.1;\n\nconst weight = $input.weight || 0;\nconst distance = $input.distance || 0;\n\nconst cost = baseRate + (weight * perKgRate) + (distance * perKmRate);\n\nreturn { cost: parseFloat(cost.toFixed(2)), currency: 'USD' };",
|
||||
"specifyInputSchema": true,
|
||||
"schemaType": "manual",
|
||||
"inputSchema": "{\n \"type\": \"object\",\n \"properties\": {\n \"weight\": {\n \"type\": \"number\",\n \"description\": \"Package weight in kilograms\"\n },\n \"distance\": {\n \"type\": \"number\",\n \"description\": \"Shipping distance in kilometers\"\n }\n },\n \"required\": [\"weight\", \"distance\"]\n}"
|
||||
},
|
||||
"notes": "Example shows proper function naming, detailed description, input schema, and return value"
|
||||
},
|
||||
{
|
||||
"name": "Format Customer Data",
|
||||
"use_case": "Transform and validate customer information",
|
||||
"complexity": "medium",
|
||||
"parameters": {
|
||||
"name": "format_customer_data",
|
||||
"description": "Format and validate customer data. Takes raw customer info (name, email, phone) and returns formatted object with validation status.",
|
||||
"language": "javaScript",
|
||||
"code": "const { name, email, phone } = $input;\n\n// Validation\nconst emailRegex = /^[^\\s@]+@[^\\s@]+\\.[^\\s@]+$/;\nconst phoneRegex = /^\\+?[1-9]\\d{1,14}$/;\n\nconst errors = [];\nif (!emailRegex.test(email)) errors.push('Invalid email format');\nif (!phoneRegex.test(phone)) errors.push('Invalid phone format');\n\n// Formatting\nconst formatted = {\n name: name.trim(),\n email: email.toLowerCase().trim(),\n phone: phone.replace(/\\s/g, ''),\n valid: errors.length === 0,\n errors: errors\n};\n\nreturn formatted;",
|
||||
"specifyInputSchema": true,
|
||||
"schemaType": "manual",
|
||||
"inputSchema": "{\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\",\n \"description\": \"Customer full name\"\n },\n \"email\": {\n \"type\": \"string\",\n \"description\": \"Customer email address\"\n },\n \"phone\": {\n \"type\": \"string\",\n \"description\": \"Customer phone number\"\n }\n },\n \"required\": [\"name\", \"email\", \"phone\"]\n}"
|
||||
},
|
||||
"notes": "Example shows data validation, formatting, and structured error handling"
|
||||
},
|
||||
{
|
||||
"name": "Parse Date Range",
|
||||
"use_case": "Convert natural language date ranges to ISO format",
|
||||
"complexity": "medium",
|
||||
"parameters": {
|
||||
"name": "parse_date_range",
|
||||
"description": "Parse natural language date ranges (e.g., 'last 7 days', 'this month', 'Q1 2024') into start and end dates in ISO format.",
|
||||
"language": "javaScript",
|
||||
"code": "const input = $input.dateRange || '';\nconst now = new Date();\nlet start, end;\n\nif (input.includes('last') && input.includes('days')) {\n const days = parseInt(input.match(/\\d+/)[0]);\n start = new Date(now.getTime() - (days * 24 * 60 * 60 * 1000));\n end = now;\n} else if (input === 'this month') {\n start = new Date(now.getFullYear(), now.getMonth(), 1);\n end = new Date(now.getFullYear(), now.getMonth() + 1, 0);\n} else if (input === 'this year') {\n start = new Date(now.getFullYear(), 0, 1);\n end = new Date(now.getFullYear(), 11, 31);\n} else {\n throw new Error('Unsupported date range format');\n}\n\nreturn {\n startDate: start.toISOString().split('T')[0],\n endDate: end.toISOString().split('T')[0],\n daysCount: Math.ceil((end - start) / (24 * 60 * 60 * 1000))\n};",
|
||||
"specifyInputSchema": true,
|
||||
"schemaType": "manual",
|
||||
"inputSchema": "{\n \"type\": \"object\",\n \"properties\": {\n \"dateRange\": {\n \"type\": \"string\",\n \"description\": \"Natural language date range (e.g., 'last 7 days', 'this month')\"\n }\n },\n \"required\": [\"dateRange\"]\n}"
|
||||
},
|
||||
"notes": "Example shows complex logic, error handling, and date manipulation"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"node_type": "@n8n/n8n-nodes-langchain.agentTool",
|
||||
"display_name": "AI Agent Tool",
|
||||
"examples": [
|
||||
{
|
||||
"name": "Research Specialist Agent",
|
||||
"use_case": "Specialized sub-agent for in-depth research tasks",
|
||||
"complexity": "medium",
|
||||
"parameters": {
|
||||
"name": "research_specialist",
|
||||
"description": "Expert research agent that can search multiple sources, synthesize information, and provide comprehensive analysis on any topic. Use this when you need detailed, well-researched information.",
|
||||
"promptType": "define",
|
||||
"text": "You are a research specialist. Your role is to:\n1. Search for relevant information from multiple sources\n2. Synthesize findings into a coherent analysis\n3. Cite your sources\n4. Highlight key insights and patterns\n\nProvide thorough, well-structured research that answers the user's question comprehensively.",
|
||||
"systemMessage": "You are a meticulous researcher focused on accuracy and completeness. Always cite sources and acknowledge limitations in available information."
|
||||
},
|
||||
"connections": {
|
||||
"ai_languageModel": [
|
||||
{
|
||||
"node": "OpenAI GPT-4",
|
||||
"type": "ai_languageModel",
|
||||
"index": 0
|
||||
}
|
||||
],
|
||||
"ai_tool": [
|
||||
{
|
||||
"node": "SerpApi Tool",
|
||||
"type": "ai_tool",
|
||||
"index": 0
|
||||
},
|
||||
{
|
||||
"node": "Wikipedia Tool",
|
||||
"type": "ai_tool",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
},
|
||||
"notes": "Example shows specialized sub-agent with custom prompt, specific system message, and multiple search tools"
|
||||
},
|
||||
{
|
||||
"name": "Data Analysis Agent",
|
||||
"use_case": "Sub-agent for analyzing and visualizing data",
|
||||
"complexity": "complex",
|
||||
"parameters": {
|
||||
"name": "data_analyst",
|
||||
"description": "Data analysis specialist that can process datasets, calculate statistics, identify trends, and generate insights. Use for any data analysis or statistical questions.",
|
||||
"promptType": "auto",
|
||||
"systemMessage": "You are a data analyst with expertise in statistics and data interpretation. Break down complex datasets into understandable insights. Use the Code Tool to perform calculations when needed.",
|
||||
"maxIterations": 10
|
||||
},
|
||||
"connections": {
|
||||
"ai_languageModel": [
|
||||
{
|
||||
"node": "Anthropic Claude",
|
||||
"type": "ai_languageModel",
|
||||
"index": 0
|
||||
}
|
||||
],
|
||||
"ai_tool": [
|
||||
{
|
||||
"node": "Code Tool - Stats",
|
||||
"type": "ai_tool",
|
||||
"index": 0
|
||||
},
|
||||
{
|
||||
"node": "HTTP Request Tool - Data API",
|
||||
"type": "ai_tool",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
},
|
||||
"notes": "Example shows auto prompt type with specialized system message and analytical tools"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"node_type": "@n8n/n8n-nodes-langchain.mcpClientTool",
|
||||
"display_name": "MCP Client Tool",
|
||||
"examples": [
|
||||
{
|
||||
"name": "Filesystem MCP Tool",
|
||||
"use_case": "Access filesystem operations via MCP protocol",
|
||||
"complexity": "medium",
|
||||
"parameters": {
|
||||
"description": "Access file system operations through MCP. Can read files, list directories, create files, and search for content.",
|
||||
"mcpServer": {
|
||||
"transport": "stdio",
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-filesystem", "/path/to/allowed/directory"]
|
||||
},
|
||||
"tool": "read_file"
|
||||
},
|
||||
"notes": "Example shows stdio transport MCP server with filesystem access tool"
|
||||
},
|
||||
{
|
||||
"name": "Puppeteer MCP Tool",
|
||||
"use_case": "Browser automation via MCP for AI Agents",
|
||||
"complexity": "complex",
|
||||
"parameters": {
|
||||
"description": "Control a web browser to navigate pages, take screenshots, and extract content. Useful for web scraping and automated testing.",
|
||||
"mcpServer": {
|
||||
"transport": "stdio",
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-puppeteer"]
|
||||
},
|
||||
"tool": "puppeteer_navigate"
|
||||
},
|
||||
"notes": "Example shows Puppeteer MCP server for browser automation"
|
||||
},
|
||||
{
|
||||
"name": "Database MCP Tool",
|
||||
"use_case": "Query databases via MCP protocol",
|
||||
"complexity": "complex",
|
||||
"parameters": {
|
||||
"description": "Execute SQL queries and retrieve data from PostgreSQL databases. Supports SELECT, INSERT, UPDATE operations with proper escaping.",
|
||||
"mcpServer": {
|
||||
"transport": "sse",
|
||||
"url": "https://mcp-server.example.com/database"
|
||||
},
|
||||
"tool": "execute_query"
|
||||
},
|
||||
"notes": "Example shows SSE transport MCP server for remote database access"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -232,15 +232,45 @@ class BetterSQLiteAdapter implements DatabaseAdapter {
|
||||
*/
|
||||
class SQLJSAdapter implements DatabaseAdapter {
|
||||
private saveTimer: NodeJS.Timeout | null = null;
|
||||
|
||||
private saveIntervalMs: number;
|
||||
private closed = false; // Prevent multiple close() calls
|
||||
|
||||
// Default save interval: 5 seconds (balance between data safety and performance)
|
||||
// Configurable via SQLJS_SAVE_INTERVAL_MS environment variable
|
||||
//
|
||||
// DATA LOSS WINDOW: Up to 5 seconds of database changes may be lost if process
|
||||
// crashes before scheduleSave() timer fires. This is acceptable because:
|
||||
// 1. close() calls saveToFile() immediately on graceful shutdown
|
||||
// 2. Docker/Kubernetes SIGTERM provides 30s for cleanup (more than enough)
|
||||
// 3. The alternative (100ms interval) caused 2.2GB memory leaks in production
|
||||
// 4. MCP server is primarily read-heavy (writes are rare)
|
||||
private static readonly DEFAULT_SAVE_INTERVAL_MS = 5000;
|
||||
|
||||
constructor(private db: any, private dbPath: string) {
|
||||
// Set up auto-save on changes
|
||||
this.scheduleSave();
|
||||
// Read save interval from environment or use default
|
||||
const envInterval = process.env.SQLJS_SAVE_INTERVAL_MS;
|
||||
this.saveIntervalMs = envInterval ? parseInt(envInterval, 10) : SQLJSAdapter.DEFAULT_SAVE_INTERVAL_MS;
|
||||
|
||||
// Validate interval (minimum 100ms, maximum 60000ms = 1 minute)
|
||||
if (isNaN(this.saveIntervalMs) || this.saveIntervalMs < 100 || this.saveIntervalMs > 60000) {
|
||||
logger.warn(
|
||||
`Invalid SQLJS_SAVE_INTERVAL_MS value: ${envInterval} (must be 100-60000ms), ` +
|
||||
`using default ${SQLJSAdapter.DEFAULT_SAVE_INTERVAL_MS}ms`
|
||||
);
|
||||
this.saveIntervalMs = SQLJSAdapter.DEFAULT_SAVE_INTERVAL_MS;
|
||||
}
|
||||
|
||||
logger.debug(`SQLJSAdapter initialized with save interval: ${this.saveIntervalMs}ms`);
|
||||
|
||||
// NOTE: No initial save scheduled here (optimization)
|
||||
// Database is either:
|
||||
// 1. Loaded from existing file (already persisted), or
|
||||
// 2. New database (will be saved on first write operation)
|
||||
}
|
||||
|
||||
prepare(sql: string): PreparedStatement {
|
||||
const stmt = this.db.prepare(sql);
|
||||
this.scheduleSave();
|
||||
// Don't schedule save on prepare - only on actual writes (via SQLJSStatement.run())
|
||||
return new SQLJSStatement(stmt, () => this.scheduleSave());
|
||||
}
|
||||
|
||||
@@ -250,11 +280,18 @@ class SQLJSAdapter implements DatabaseAdapter {
|
||||
}
|
||||
|
||||
close(): void {
|
||||
if (this.closed) {
|
||||
logger.debug('SQLJSAdapter already closed, skipping');
|
||||
return;
|
||||
}
|
||||
|
||||
this.saveToFile();
|
||||
if (this.saveTimer) {
|
||||
clearTimeout(this.saveTimer);
|
||||
this.saveTimer = null;
|
||||
}
|
||||
this.db.close();
|
||||
this.closed = true;
|
||||
}
|
||||
|
||||
pragma(key: string, value?: any): any {
|
||||
@@ -301,19 +338,32 @@ class SQLJSAdapter implements DatabaseAdapter {
|
||||
if (this.saveTimer) {
|
||||
clearTimeout(this.saveTimer);
|
||||
}
|
||||
|
||||
// Save after 100ms of inactivity
|
||||
|
||||
// Save after configured interval of inactivity (default: 5000ms)
|
||||
// This debouncing reduces memory churn from frequent buffer allocations
|
||||
//
|
||||
// NOTE: Under constant write load, saves may be delayed until writes stop.
|
||||
// This is acceptable because:
|
||||
// 1. MCP server is primarily read-heavy (node lookups, searches)
|
||||
// 2. Writes are rare (only during database rebuilds)
|
||||
// 3. close() saves immediately on shutdown, flushing any pending changes
|
||||
this.saveTimer = setTimeout(() => {
|
||||
this.saveToFile();
|
||||
}, 100);
|
||||
}, this.saveIntervalMs);
|
||||
}
|
||||
|
||||
private saveToFile(): void {
|
||||
try {
|
||||
// Export database to Uint8Array (2-5MB typical)
|
||||
const data = this.db.export();
|
||||
const buffer = Buffer.from(data);
|
||||
fsSync.writeFileSync(this.dbPath, buffer);
|
||||
|
||||
// Write directly without Buffer.from() copy (saves 50% memory allocation)
|
||||
// writeFileSync accepts Uint8Array directly, no need for Buffer conversion
|
||||
fsSync.writeFileSync(this.dbPath, data);
|
||||
logger.debug(`Database saved to ${this.dbPath}`);
|
||||
|
||||
// Note: 'data' reference is automatically cleared when function exits
|
||||
// V8 GC will reclaim the Uint8Array once it's no longer referenced
|
||||
} catch (error) {
|
||||
logger.error('Failed to save database', error);
|
||||
}
|
||||
|
||||
@@ -7,11 +7,12 @@ export class NodeRepository {
|
||||
private db: DatabaseAdapter;
|
||||
|
||||
constructor(dbOrService: DatabaseAdapter | SQLiteStorageService) {
|
||||
if ('db' in dbOrService) {
|
||||
if (dbOrService instanceof SQLiteStorageService) {
|
||||
this.db = dbOrService.db;
|
||||
} else {
|
||||
this.db = dbOrService;
|
||||
return;
|
||||
}
|
||||
|
||||
this.db = dbOrService;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -122,10 +123,22 @@ export class NodeRepository {
|
||||
return rows.map(row => this.parseNodeRow(row));
|
||||
}
|
||||
|
||||
/**
|
||||
* Legacy LIKE-based search method for direct repository usage.
|
||||
*
|
||||
* NOTE: MCP tools do NOT use this method. They use MCPServer.searchNodes()
|
||||
* which automatically detects and uses FTS5 full-text search when available.
|
||||
* See src/mcp/server.ts:1135-1148 for FTS5 implementation.
|
||||
*
|
||||
* This method remains for:
|
||||
* - Direct repository access in scripts/benchmarks
|
||||
* - Fallback when FTS5 table doesn't exist
|
||||
* - Legacy compatibility
|
||||
*/
|
||||
searchNodes(query: string, mode: 'OR' | 'AND' | 'FUZZY' = 'OR', limit: number = 20): any[] {
|
||||
let sql = '';
|
||||
const params: any[] = [];
|
||||
|
||||
|
||||
if (mode === 'FUZZY') {
|
||||
// Simple fuzzy search
|
||||
sql = `
|
||||
@@ -449,4 +462,501 @@ export class NodeRepository {
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* VERSION MANAGEMENT METHODS
|
||||
* Methods for working with node_versions and version_property_changes tables
|
||||
*/
|
||||
|
||||
/**
|
||||
* Save a specific node version to the database
|
||||
*/
|
||||
saveNodeVersion(versionData: {
|
||||
nodeType: string;
|
||||
version: string;
|
||||
packageName: string;
|
||||
displayName: string;
|
||||
description?: string;
|
||||
category?: string;
|
||||
isCurrentMax?: boolean;
|
||||
propertiesSchema?: any;
|
||||
operations?: any;
|
||||
credentialsRequired?: any;
|
||||
outputs?: any;
|
||||
minimumN8nVersion?: string;
|
||||
breakingChanges?: any[];
|
||||
deprecatedProperties?: string[];
|
||||
addedProperties?: string[];
|
||||
releasedAt?: Date;
|
||||
}): void {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT OR REPLACE INTO node_versions (
|
||||
node_type, version, package_name, display_name, description,
|
||||
category, is_current_max, properties_schema, operations,
|
||||
credentials_required, outputs, minimum_n8n_version,
|
||||
breaking_changes, deprecated_properties, added_properties,
|
||||
released_at
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`);
|
||||
|
||||
stmt.run(
|
||||
versionData.nodeType,
|
||||
versionData.version,
|
||||
versionData.packageName,
|
||||
versionData.displayName,
|
||||
versionData.description || null,
|
||||
versionData.category || null,
|
||||
versionData.isCurrentMax ? 1 : 0,
|
||||
versionData.propertiesSchema ? JSON.stringify(versionData.propertiesSchema) : null,
|
||||
versionData.operations ? JSON.stringify(versionData.operations) : null,
|
||||
versionData.credentialsRequired ? JSON.stringify(versionData.credentialsRequired) : null,
|
||||
versionData.outputs ? JSON.stringify(versionData.outputs) : null,
|
||||
versionData.minimumN8nVersion || null,
|
||||
versionData.breakingChanges ? JSON.stringify(versionData.breakingChanges) : null,
|
||||
versionData.deprecatedProperties ? JSON.stringify(versionData.deprecatedProperties) : null,
|
||||
versionData.addedProperties ? JSON.stringify(versionData.addedProperties) : null,
|
||||
versionData.releasedAt || null
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all available versions for a specific node type
|
||||
*/
|
||||
getNodeVersions(nodeType: string): any[] {
|
||||
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
|
||||
const rows = this.db.prepare(`
|
||||
SELECT * FROM node_versions
|
||||
WHERE node_type = ?
|
||||
ORDER BY version DESC
|
||||
`).all(normalizedType) as any[];
|
||||
|
||||
return rows.map(row => this.parseNodeVersionRow(row));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the latest (current max) version for a node type
|
||||
*/
|
||||
getLatestNodeVersion(nodeType: string): any | null {
|
||||
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM node_versions
|
||||
WHERE node_type = ? AND is_current_max = 1
|
||||
LIMIT 1
|
||||
`).get(normalizedType) as any;
|
||||
|
||||
if (!row) return null;
|
||||
return this.parseNodeVersionRow(row);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a specific version of a node
|
||||
*/
|
||||
getNodeVersion(nodeType: string, version: string): any | null {
|
||||
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM node_versions
|
||||
WHERE node_type = ? AND version = ?
|
||||
`).get(normalizedType, version) as any;
|
||||
|
||||
if (!row) return null;
|
||||
return this.parseNodeVersionRow(row);
|
||||
}
|
||||
|
||||
/**
|
||||
* Save a property change between versions
|
||||
*/
|
||||
savePropertyChange(changeData: {
|
||||
nodeType: string;
|
||||
fromVersion: string;
|
||||
toVersion: string;
|
||||
propertyName: string;
|
||||
changeType: 'added' | 'removed' | 'renamed' | 'type_changed' | 'requirement_changed' | 'default_changed';
|
||||
isBreaking?: boolean;
|
||||
oldValue?: string;
|
||||
newValue?: string;
|
||||
migrationHint?: string;
|
||||
autoMigratable?: boolean;
|
||||
migrationStrategy?: any;
|
||||
severity?: 'LOW' | 'MEDIUM' | 'HIGH';
|
||||
}): void {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT INTO version_property_changes (
|
||||
node_type, from_version, to_version, property_name, change_type,
|
||||
is_breaking, old_value, new_value, migration_hint, auto_migratable,
|
||||
migration_strategy, severity
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`);
|
||||
|
||||
stmt.run(
|
||||
changeData.nodeType,
|
||||
changeData.fromVersion,
|
||||
changeData.toVersion,
|
||||
changeData.propertyName,
|
||||
changeData.changeType,
|
||||
changeData.isBreaking ? 1 : 0,
|
||||
changeData.oldValue || null,
|
||||
changeData.newValue || null,
|
||||
changeData.migrationHint || null,
|
||||
changeData.autoMigratable ? 1 : 0,
|
||||
changeData.migrationStrategy ? JSON.stringify(changeData.migrationStrategy) : null,
|
||||
changeData.severity || 'MEDIUM'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get property changes between two versions
|
||||
*/
|
||||
getPropertyChanges(nodeType: string, fromVersion: string, toVersion: string): any[] {
|
||||
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
|
||||
const rows = this.db.prepare(`
|
||||
SELECT * FROM version_property_changes
|
||||
WHERE node_type = ? AND from_version = ? AND to_version = ?
|
||||
ORDER BY severity DESC, property_name
|
||||
`).all(normalizedType, fromVersion, toVersion) as any[];
|
||||
|
||||
return rows.map(row => this.parsePropertyChangeRow(row));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all breaking changes for upgrading from one version to another
|
||||
* Can handle multi-step upgrades (e.g., 1.0 -> 2.0 via 1.5)
|
||||
*/
|
||||
getBreakingChanges(nodeType: string, fromVersion: string, toVersion?: string): any[] {
|
||||
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
|
||||
let sql = `
|
||||
SELECT * FROM version_property_changes
|
||||
WHERE node_type = ? AND is_breaking = 1
|
||||
`;
|
||||
const params: any[] = [normalizedType];
|
||||
|
||||
if (toVersion) {
|
||||
// Get changes between specific versions
|
||||
sql += ` AND from_version >= ? AND to_version <= ?`;
|
||||
params.push(fromVersion, toVersion);
|
||||
} else {
|
||||
// Get all breaking changes from this version onwards
|
||||
sql += ` AND from_version >= ?`;
|
||||
params.push(fromVersion);
|
||||
}
|
||||
|
||||
sql += ` ORDER BY from_version, to_version, severity DESC`;
|
||||
|
||||
const rows = this.db.prepare(sql).all(...params) as any[];
|
||||
return rows.map(row => this.parsePropertyChangeRow(row));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get auto-migratable changes for a version upgrade
|
||||
*/
|
||||
getAutoMigratableChanges(nodeType: string, fromVersion: string, toVersion: string): any[] {
|
||||
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
|
||||
const rows = this.db.prepare(`
|
||||
SELECT * FROM version_property_changes
|
||||
WHERE node_type = ?
|
||||
AND from_version = ?
|
||||
AND to_version = ?
|
||||
AND auto_migratable = 1
|
||||
ORDER BY severity DESC
|
||||
`).all(normalizedType, fromVersion, toVersion) as any[];
|
||||
|
||||
return rows.map(row => this.parsePropertyChangeRow(row));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a version upgrade path exists between two versions
|
||||
*/
|
||||
hasVersionUpgradePath(nodeType: string, fromVersion: string, toVersion: string): boolean {
|
||||
const versions = this.getNodeVersions(nodeType);
|
||||
if (versions.length === 0) return false;
|
||||
|
||||
// Check if both versions exist
|
||||
const fromExists = versions.some(v => v.version === fromVersion);
|
||||
const toExists = versions.some(v => v.version === toVersion);
|
||||
|
||||
return fromExists && toExists;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get count of nodes with multiple versions
|
||||
*/
|
||||
getVersionedNodesCount(): number {
|
||||
const result = this.db.prepare(`
|
||||
SELECT COUNT(DISTINCT node_type) as count
|
||||
FROM node_versions
|
||||
`).get() as any;
|
||||
return result.count;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse node version row from database
|
||||
*/
|
||||
private parseNodeVersionRow(row: any): any {
|
||||
return {
|
||||
id: row.id,
|
||||
nodeType: row.node_type,
|
||||
version: row.version,
|
||||
packageName: row.package_name,
|
||||
displayName: row.display_name,
|
||||
description: row.description,
|
||||
category: row.category,
|
||||
isCurrentMax: Number(row.is_current_max) === 1,
|
||||
propertiesSchema: row.properties_schema ? this.safeJsonParse(row.properties_schema, []) : null,
|
||||
operations: row.operations ? this.safeJsonParse(row.operations, []) : null,
|
||||
credentialsRequired: row.credentials_required ? this.safeJsonParse(row.credentials_required, []) : null,
|
||||
outputs: row.outputs ? this.safeJsonParse(row.outputs, null) : null,
|
||||
minimumN8nVersion: row.minimum_n8n_version,
|
||||
breakingChanges: row.breaking_changes ? this.safeJsonParse(row.breaking_changes, []) : [],
|
||||
deprecatedProperties: row.deprecated_properties ? this.safeJsonParse(row.deprecated_properties, []) : [],
|
||||
addedProperties: row.added_properties ? this.safeJsonParse(row.added_properties, []) : [],
|
||||
releasedAt: row.released_at,
|
||||
createdAt: row.created_at
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse property change row from database
|
||||
*/
|
||||
private parsePropertyChangeRow(row: any): any {
|
||||
return {
|
||||
id: row.id,
|
||||
nodeType: row.node_type,
|
||||
fromVersion: row.from_version,
|
||||
toVersion: row.to_version,
|
||||
propertyName: row.property_name,
|
||||
changeType: row.change_type,
|
||||
isBreaking: Number(row.is_breaking) === 1,
|
||||
oldValue: row.old_value,
|
||||
newValue: row.new_value,
|
||||
migrationHint: row.migration_hint,
|
||||
autoMigratable: Number(row.auto_migratable) === 1,
|
||||
migrationStrategy: row.migration_strategy ? this.safeJsonParse(row.migration_strategy, null) : null,
|
||||
severity: row.severity,
|
||||
createdAt: row.created_at
|
||||
};
|
||||
}
|
||||
|
||||
// ========================================
|
||||
// Workflow Versioning Methods
|
||||
// ========================================
|
||||
|
||||
/**
|
||||
* Create a new workflow version (backup before modification)
|
||||
*/
|
||||
createWorkflowVersion(data: {
|
||||
workflowId: string;
|
||||
versionNumber: number;
|
||||
workflowName: string;
|
||||
workflowSnapshot: any;
|
||||
trigger: 'partial_update' | 'full_update' | 'autofix';
|
||||
operations?: any[];
|
||||
fixTypes?: string[];
|
||||
metadata?: any;
|
||||
}): number {
|
||||
const stmt = this.db.prepare(`
|
||||
INSERT INTO workflow_versions (
|
||||
workflow_id, version_number, workflow_name, workflow_snapshot,
|
||||
trigger, operations, fix_types, metadata
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`);
|
||||
|
||||
const result = stmt.run(
|
||||
data.workflowId,
|
||||
data.versionNumber,
|
||||
data.workflowName,
|
||||
JSON.stringify(data.workflowSnapshot),
|
||||
data.trigger,
|
||||
data.operations ? JSON.stringify(data.operations) : null,
|
||||
data.fixTypes ? JSON.stringify(data.fixTypes) : null,
|
||||
data.metadata ? JSON.stringify(data.metadata) : null
|
||||
);
|
||||
|
||||
return result.lastInsertRowid as number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get workflow versions ordered by version number (newest first)
|
||||
*/
|
||||
getWorkflowVersions(workflowId: string, limit?: number): any[] {
|
||||
let sql = `
|
||||
SELECT * FROM workflow_versions
|
||||
WHERE workflow_id = ?
|
||||
ORDER BY version_number DESC
|
||||
`;
|
||||
|
||||
if (limit) {
|
||||
sql += ` LIMIT ?`;
|
||||
const rows = this.db.prepare(sql).all(workflowId, limit) as any[];
|
||||
return rows.map(row => this.parseWorkflowVersionRow(row));
|
||||
}
|
||||
|
||||
const rows = this.db.prepare(sql).all(workflowId) as any[];
|
||||
return rows.map(row => this.parseWorkflowVersionRow(row));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a specific workflow version by ID
|
||||
*/
|
||||
getWorkflowVersion(versionId: number): any | null {
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM workflow_versions WHERE id = ?
|
||||
`).get(versionId) as any;
|
||||
|
||||
if (!row) return null;
|
||||
return this.parseWorkflowVersionRow(row);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the latest workflow version for a workflow
|
||||
*/
|
||||
getLatestWorkflowVersion(workflowId: string): any | null {
|
||||
const row = this.db.prepare(`
|
||||
SELECT * FROM workflow_versions
|
||||
WHERE workflow_id = ?
|
||||
ORDER BY version_number DESC
|
||||
LIMIT 1
|
||||
`).get(workflowId) as any;
|
||||
|
||||
if (!row) return null;
|
||||
return this.parseWorkflowVersionRow(row);
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a specific workflow version
|
||||
*/
|
||||
deleteWorkflowVersion(versionId: number): void {
|
||||
this.db.prepare(`
|
||||
DELETE FROM workflow_versions WHERE id = ?
|
||||
`).run(versionId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete all versions for a specific workflow
|
||||
*/
|
||||
deleteWorkflowVersionsByWorkflowId(workflowId: string): number {
|
||||
const result = this.db.prepare(`
|
||||
DELETE FROM workflow_versions WHERE workflow_id = ?
|
||||
`).run(workflowId);
|
||||
|
||||
return result.changes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prune old workflow versions, keeping only the most recent N versions
|
||||
* Returns number of versions deleted
|
||||
*/
|
||||
pruneWorkflowVersions(workflowId: string, keepCount: number): number {
|
||||
// Get all versions ordered by version_number DESC
|
||||
const versions = this.db.prepare(`
|
||||
SELECT id FROM workflow_versions
|
||||
WHERE workflow_id = ?
|
||||
ORDER BY version_number DESC
|
||||
`).all(workflowId) as any[];
|
||||
|
||||
// If we have fewer versions than keepCount, no pruning needed
|
||||
if (versions.length <= keepCount) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Get IDs of versions to delete (all except the most recent keepCount)
|
||||
const idsToDelete = versions.slice(keepCount).map(v => v.id);
|
||||
|
||||
if (idsToDelete.length === 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Delete old versions
|
||||
const placeholders = idsToDelete.map(() => '?').join(',');
|
||||
const result = this.db.prepare(`
|
||||
DELETE FROM workflow_versions WHERE id IN (${placeholders})
|
||||
`).run(...idsToDelete);
|
||||
|
||||
return result.changes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Truncate the entire workflow_versions table
|
||||
* Returns number of rows deleted
|
||||
*/
|
||||
truncateWorkflowVersions(): number {
|
||||
const result = this.db.prepare(`
|
||||
DELETE FROM workflow_versions
|
||||
`).run();
|
||||
|
||||
return result.changes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get count of versions for a specific workflow
|
||||
*/
|
||||
getWorkflowVersionCount(workflowId: string): number {
|
||||
const result = this.db.prepare(`
|
||||
SELECT COUNT(*) as count FROM workflow_versions WHERE workflow_id = ?
|
||||
`).get(workflowId) as any;
|
||||
|
||||
return result.count;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get storage statistics for workflow versions
|
||||
*/
|
||||
getVersionStorageStats(): any {
|
||||
// Total versions
|
||||
const totalResult = this.db.prepare(`
|
||||
SELECT COUNT(*) as count FROM workflow_versions
|
||||
`).get() as any;
|
||||
|
||||
// Total size (approximate - sum of JSON lengths)
|
||||
const sizeResult = this.db.prepare(`
|
||||
SELECT SUM(LENGTH(workflow_snapshot)) as total_size FROM workflow_versions
|
||||
`).get() as any;
|
||||
|
||||
// Per-workflow breakdown
|
||||
const byWorkflow = this.db.prepare(`
|
||||
SELECT
|
||||
workflow_id,
|
||||
workflow_name,
|
||||
COUNT(*) as version_count,
|
||||
SUM(LENGTH(workflow_snapshot)) as total_size,
|
||||
MAX(created_at) as last_backup
|
||||
FROM workflow_versions
|
||||
GROUP BY workflow_id
|
||||
ORDER BY version_count DESC
|
||||
`).all() as any[];
|
||||
|
||||
return {
|
||||
totalVersions: totalResult.count,
|
||||
totalSize: sizeResult.total_size || 0,
|
||||
byWorkflow: byWorkflow.map(row => ({
|
||||
workflowId: row.workflow_id,
|
||||
workflowName: row.workflow_name,
|
||||
versionCount: row.version_count,
|
||||
totalSize: row.total_size,
|
||||
lastBackup: row.last_backup
|
||||
}))
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse workflow version row from database
|
||||
*/
|
||||
private parseWorkflowVersionRow(row: any): any {
|
||||
return {
|
||||
id: row.id,
|
||||
workflowId: row.workflow_id,
|
||||
versionNumber: row.version_number,
|
||||
workflowName: row.workflow_name,
|
||||
workflowSnapshot: this.safeJsonParse(row.workflow_snapshot, null),
|
||||
trigger: row.trigger,
|
||||
operations: row.operations ? this.safeJsonParse(row.operations, null) : null,
|
||||
fixTypes: row.fix_types ? this.safeJsonParse(row.fix_types, null) : null,
|
||||
metadata: row.metadata ? this.safeJsonParse(row.metadata, null) : null,
|
||||
createdAt: row.created_at
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -25,6 +25,40 @@ CREATE INDEX IF NOT EXISTS idx_package ON nodes(package_name);
|
||||
CREATE INDEX IF NOT EXISTS idx_ai_tool ON nodes(is_ai_tool);
|
||||
CREATE INDEX IF NOT EXISTS idx_category ON nodes(category);
|
||||
|
||||
-- FTS5 full-text search index for nodes
|
||||
CREATE VIRTUAL TABLE IF NOT EXISTS nodes_fts USING fts5(
|
||||
node_type,
|
||||
display_name,
|
||||
description,
|
||||
documentation,
|
||||
operations,
|
||||
content=nodes,
|
||||
content_rowid=rowid
|
||||
);
|
||||
|
||||
-- Triggers to keep FTS5 in sync with nodes table
|
||||
CREATE TRIGGER IF NOT EXISTS nodes_fts_insert AFTER INSERT ON nodes
|
||||
BEGIN
|
||||
INSERT INTO nodes_fts(rowid, node_type, display_name, description, documentation, operations)
|
||||
VALUES (new.rowid, new.node_type, new.display_name, new.description, new.documentation, new.operations);
|
||||
END;
|
||||
|
||||
CREATE TRIGGER IF NOT EXISTS nodes_fts_update AFTER UPDATE ON nodes
|
||||
BEGIN
|
||||
UPDATE nodes_fts
|
||||
SET node_type = new.node_type,
|
||||
display_name = new.display_name,
|
||||
description = new.description,
|
||||
documentation = new.documentation,
|
||||
operations = new.operations
|
||||
WHERE rowid = new.rowid;
|
||||
END;
|
||||
|
||||
CREATE TRIGGER IF NOT EXISTS nodes_fts_delete AFTER DELETE ON nodes
|
||||
BEGIN
|
||||
DELETE FROM nodes_fts WHERE rowid = old.rowid;
|
||||
END;
|
||||
|
||||
-- Templates table for n8n workflow templates
|
||||
CREATE TABLE IF NOT EXISTS templates (
|
||||
id INTEGER PRIMARY KEY,
|
||||
@@ -108,5 +142,95 @@ FROM template_node_configs
|
||||
WHERE rank <= 5 -- Top 5 per node type
|
||||
ORDER BY node_type, rank;
|
||||
|
||||
-- Note: FTS5 tables are created conditionally at runtime if FTS5 is supported
|
||||
-- See template-repository.ts initializeFTS5() method
|
||||
-- Note: Template FTS5 tables are created conditionally at runtime if FTS5 is supported
|
||||
-- See template-repository.ts initializeFTS5() method
|
||||
-- Node FTS5 table (nodes_fts) is created above during schema initialization
|
||||
|
||||
-- Node versions table for tracking all available versions of each node
|
||||
-- Enables version upgrade detection and migration
|
||||
CREATE TABLE IF NOT EXISTS node_versions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
node_type TEXT NOT NULL, -- e.g., "n8n-nodes-base.executeWorkflow"
|
||||
version TEXT NOT NULL, -- e.g., "1.0", "1.1", "2.0"
|
||||
package_name TEXT NOT NULL, -- e.g., "n8n-nodes-base"
|
||||
display_name TEXT NOT NULL,
|
||||
description TEXT,
|
||||
category TEXT,
|
||||
is_current_max INTEGER DEFAULT 0, -- 1 if this is the latest version
|
||||
properties_schema TEXT, -- JSON schema for this specific version
|
||||
operations TEXT, -- JSON array of operations for this version
|
||||
credentials_required TEXT, -- JSON array of required credentials
|
||||
outputs TEXT, -- JSON array of output definitions
|
||||
minimum_n8n_version TEXT, -- Minimum n8n version required (e.g., "1.0.0")
|
||||
breaking_changes TEXT, -- JSON array of breaking changes from previous version
|
||||
deprecated_properties TEXT, -- JSON array of removed/deprecated properties
|
||||
added_properties TEXT, -- JSON array of newly added properties
|
||||
released_at DATETIME, -- When this version was released
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
UNIQUE(node_type, version),
|
||||
FOREIGN KEY (node_type) REFERENCES nodes(node_type) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Indexes for version queries
|
||||
CREATE INDEX IF NOT EXISTS idx_version_node_type ON node_versions(node_type);
|
||||
CREATE INDEX IF NOT EXISTS idx_version_current_max ON node_versions(is_current_max);
|
||||
CREATE INDEX IF NOT EXISTS idx_version_composite ON node_versions(node_type, version);
|
||||
|
||||
-- Version property changes for detailed migration tracking
|
||||
-- Records specific property-level changes between versions
|
||||
CREATE TABLE IF NOT EXISTS version_property_changes (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
node_type TEXT NOT NULL,
|
||||
from_version TEXT NOT NULL, -- Version where change occurred (e.g., "1.0")
|
||||
to_version TEXT NOT NULL, -- Target version (e.g., "1.1")
|
||||
property_name TEXT NOT NULL, -- Property path (e.g., "parameters.inputFieldMapping")
|
||||
change_type TEXT NOT NULL CHECK(change_type IN (
|
||||
'added', -- Property added (may be required)
|
||||
'removed', -- Property removed/deprecated
|
||||
'renamed', -- Property renamed
|
||||
'type_changed', -- Property type changed
|
||||
'requirement_changed', -- Required → Optional or vice versa
|
||||
'default_changed' -- Default value changed
|
||||
)),
|
||||
is_breaking INTEGER DEFAULT 0, -- 1 if this is a breaking change
|
||||
old_value TEXT, -- For renamed/type_changed: old property name or type
|
||||
new_value TEXT, -- For renamed/type_changed: new property name or type
|
||||
migration_hint TEXT, -- Human-readable migration guidance
|
||||
auto_migratable INTEGER DEFAULT 0, -- 1 if can be automatically migrated
|
||||
migration_strategy TEXT, -- JSON: strategy for auto-migration
|
||||
severity TEXT CHECK(severity IN ('LOW', 'MEDIUM', 'HIGH')), -- Impact severity
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (node_type, from_version) REFERENCES node_versions(node_type, version) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Indexes for property change queries
|
||||
CREATE INDEX IF NOT EXISTS idx_prop_changes_node ON version_property_changes(node_type);
|
||||
CREATE INDEX IF NOT EXISTS idx_prop_changes_versions ON version_property_changes(node_type, from_version, to_version);
|
||||
CREATE INDEX IF NOT EXISTS idx_prop_changes_breaking ON version_property_changes(is_breaking);
|
||||
CREATE INDEX IF NOT EXISTS idx_prop_changes_auto ON version_property_changes(auto_migratable);
|
||||
|
||||
-- Workflow versions table for rollback and version history tracking
|
||||
-- Stores full workflow snapshots before modifications for guaranteed reversibility
|
||||
-- Auto-prunes to 10 versions per workflow to prevent memory leaks
|
||||
CREATE TABLE IF NOT EXISTS workflow_versions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
workflow_id TEXT NOT NULL, -- n8n workflow ID
|
||||
version_number INTEGER NOT NULL, -- Incremental version number (1, 2, 3...)
|
||||
workflow_name TEXT NOT NULL, -- Workflow name at time of backup
|
||||
workflow_snapshot TEXT NOT NULL, -- Full workflow JSON before modification
|
||||
trigger TEXT NOT NULL CHECK(trigger IN (
|
||||
'partial_update', -- Created by n8n_update_partial_workflow
|
||||
'full_update', -- Created by n8n_update_full_workflow
|
||||
'autofix' -- Created by n8n_autofix_workflow
|
||||
)),
|
||||
operations TEXT, -- JSON array of diff operations (if partial update)
|
||||
fix_types TEXT, -- JSON array of fix types (if autofix)
|
||||
metadata TEXT, -- Additional context (JSON)
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
UNIQUE(workflow_id, version_number)
|
||||
);
|
||||
|
||||
-- Indexes for workflow version queries
|
||||
CREATE INDEX IF NOT EXISTS idx_workflow_versions_workflow_id ON workflow_versions(workflow_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_workflow_versions_created_at ON workflow_versions(created_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_workflow_versions_trigger ON workflow_versions(trigger);
|
||||
@@ -5,11 +5,13 @@
|
||||
* while maintaining simplicity for single-player use case
|
||||
*/
|
||||
import express from 'express';
|
||||
import rateLimit from 'express-rate-limit';
|
||||
import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js';
|
||||
import { SSEServerTransport } from '@modelcontextprotocol/sdk/server/sse.js';
|
||||
import { N8NDocumentationMCPServer } from './mcp/server';
|
||||
import { ConsoleManager } from './utils/console-manager';
|
||||
import { logger } from './utils/logger';
|
||||
import { AuthManager } from './utils/auth';
|
||||
import { readFileSync } from 'fs';
|
||||
import dotenv from 'dotenv';
|
||||
import { getStartupBaseUrl, formatEndpointUrls, detectBaseUrl } from './utils/url-detector';
|
||||
@@ -186,11 +188,22 @@ export class SingleSessionHTTPServer {
|
||||
|
||||
/**
|
||||
* Validate session ID format
|
||||
*
|
||||
* Accepts any non-empty string to support various MCP clients:
|
||||
* - UUIDv4 (internal n8n-mcp format)
|
||||
* - instance-{userId}-{hash}-{uuid} (multi-tenant format)
|
||||
* - Custom formats from mcp-remote and other proxies
|
||||
*
|
||||
* Security: Session validation happens via lookup in this.transports,
|
||||
* not format validation. This ensures compatibility with all MCP clients.
|
||||
*
|
||||
* @param sessionId - Session identifier from MCP client
|
||||
* @returns true if valid, false otherwise
|
||||
*/
|
||||
private isValidSessionId(sessionId: string): boolean {
|
||||
// UUID v4 format validation
|
||||
const uuidv4Regex = /^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i;
|
||||
return uuidv4Regex.test(sessionId);
|
||||
// Accept any non-empty string as session ID
|
||||
// This ensures compatibility with all MCP clients and proxies
|
||||
return Boolean(sessionId && sessionId.length > 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -988,8 +1001,41 @@ export class SingleSessionHTTPServer {
|
||||
});
|
||||
|
||||
|
||||
// Main MCP endpoint with authentication
|
||||
app.post('/mcp', jsonParser, async (req: express.Request, res: express.Response): Promise<void> => {
|
||||
// SECURITY: Rate limiting for authentication endpoint
|
||||
// Prevents brute force attacks and DoS
|
||||
// See: https://github.com/czlonkowski/n8n-mcp/issues/265 (HIGH-02)
|
||||
const authLimiter = rateLimit({
|
||||
windowMs: parseInt(process.env.AUTH_RATE_LIMIT_WINDOW || '900000'), // 15 minutes
|
||||
max: parseInt(process.env.AUTH_RATE_LIMIT_MAX || '20'), // 20 authentication attempts per IP
|
||||
message: {
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32000,
|
||||
message: 'Too many authentication attempts. Please try again later.'
|
||||
},
|
||||
id: null
|
||||
},
|
||||
standardHeaders: true, // Return rate limit info in `RateLimit-*` headers
|
||||
legacyHeaders: false, // Disable `X-RateLimit-*` headers
|
||||
handler: (req, res) => {
|
||||
logger.warn('Rate limit exceeded', {
|
||||
ip: req.ip,
|
||||
userAgent: req.get('user-agent'),
|
||||
event: 'rate_limit'
|
||||
});
|
||||
res.status(429).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32000,
|
||||
message: 'Too many authentication attempts'
|
||||
},
|
||||
id: null
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Main MCP endpoint with authentication and rate limiting
|
||||
app.post('/mcp', authLimiter, jsonParser, async (req: express.Request, res: express.Response): Promise<void> => {
|
||||
// Log comprehensive debug info about the request
|
||||
logger.info('POST /mcp request received - DETAILED DEBUG', {
|
||||
headers: req.headers,
|
||||
@@ -1080,15 +1126,19 @@ export class SingleSessionHTTPServer {
|
||||
|
||||
// Extract token and trim whitespace
|
||||
const token = authHeader.slice(7).trim();
|
||||
|
||||
// Check if token matches
|
||||
if (token !== this.authToken) {
|
||||
logger.warn('Authentication failed: Invalid token', {
|
||||
|
||||
// SECURITY: Use timing-safe comparison to prevent timing attacks
|
||||
// See: https://github.com/czlonkowski/n8n-mcp/issues/265 (CRITICAL-02)
|
||||
const isValidToken = this.authToken &&
|
||||
AuthManager.timingSafeCompare(token, this.authToken);
|
||||
|
||||
if (!isValidToken) {
|
||||
logger.warn('Authentication failed: Invalid token', {
|
||||
ip: req.ip,
|
||||
userAgent: req.get('user-agent'),
|
||||
reason: 'invalid_token'
|
||||
});
|
||||
res.status(401).json({
|
||||
res.status(401).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32001,
|
||||
|
||||
@@ -9,6 +9,7 @@ import { n8nDocumentationToolsFinal } from './mcp/tools';
|
||||
import { n8nManagementTools } from './mcp/tools-n8n-manager';
|
||||
import { N8NDocumentationMCPServer } from './mcp/server';
|
||||
import { logger } from './utils/logger';
|
||||
import { AuthManager } from './utils/auth';
|
||||
import { PROJECT_VERSION } from './utils/version';
|
||||
import { isN8nApiConfigured } from './config/n8n-api';
|
||||
import dotenv from 'dotenv';
|
||||
@@ -22,6 +23,17 @@ import {
|
||||
|
||||
dotenv.config();
|
||||
|
||||
/**
|
||||
* MCP tool response format with optional structured content
|
||||
*/
|
||||
interface MCPToolResponse {
|
||||
content: Array<{
|
||||
type: 'text';
|
||||
text: string;
|
||||
}>;
|
||||
structuredContent?: unknown;
|
||||
}
|
||||
|
||||
let expressServer: any;
|
||||
let authToken: string | null = null;
|
||||
|
||||
@@ -308,15 +320,19 @@ export async function startFixedHTTPServer() {
|
||||
|
||||
// Extract token and trim whitespace
|
||||
const token = authHeader.slice(7).trim();
|
||||
|
||||
// Check if token matches
|
||||
if (token !== authToken) {
|
||||
logger.warn('Authentication failed: Invalid token', {
|
||||
|
||||
// SECURITY: Use timing-safe comparison to prevent timing attacks
|
||||
// See: https://github.com/czlonkowski/n8n-mcp/issues/265 (CRITICAL-02)
|
||||
const isValidToken = authToken &&
|
||||
AuthManager.timingSafeCompare(token, authToken);
|
||||
|
||||
if (!isValidToken) {
|
||||
logger.warn('Authentication failed: Invalid token', {
|
||||
ip: req.ip,
|
||||
userAgent: req.get('user-agent'),
|
||||
reason: 'invalid_token'
|
||||
});
|
||||
res.status(401).json({
|
||||
res.status(401).json({
|
||||
jsonrpc: '2.0',
|
||||
error: {
|
||||
code: -32001,
|
||||
@@ -396,19 +412,46 @@ export async function startFixedHTTPServer() {
|
||||
// Delegate to the MCP server
|
||||
const toolName = jsonRpcRequest.params?.name;
|
||||
const toolArgs = jsonRpcRequest.params?.arguments || {};
|
||||
|
||||
|
||||
try {
|
||||
const result = await mcpServer.executeTool(toolName, toolArgs);
|
||||
|
||||
// Convert result to JSON text for content field
|
||||
let responseText = JSON.stringify(result, null, 2);
|
||||
|
||||
// Build MCP-compliant response with structuredContent for validation tools
|
||||
const mcpResult: MCPToolResponse = {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: responseText
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
// Add structuredContent for validation tools (they have outputSchema)
|
||||
// Apply 1MB safety limit to prevent memory issues (matches STDIO server behavior)
|
||||
if (toolName.startsWith('validate_')) {
|
||||
const resultSize = responseText.length;
|
||||
|
||||
if (resultSize > 1000000) {
|
||||
// Response is too large - truncate and warn
|
||||
logger.warn(
|
||||
`Validation tool ${toolName} response is very large (${resultSize} chars). ` +
|
||||
`Truncating for HTTP transport safety.`
|
||||
);
|
||||
mcpResult.content[0].text = responseText.substring(0, 999000) +
|
||||
'\n\n[Response truncated due to size limits]';
|
||||
// Don't include structuredContent for truncated responses
|
||||
} else {
|
||||
// Normal case - include structured content for MCP protocol compliance
|
||||
mcpResult.structuredContent = result;
|
||||
}
|
||||
}
|
||||
|
||||
response = {
|
||||
jsonrpc: '2.0',
|
||||
result: {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(result, null, 2)
|
||||
}
|
||||
]
|
||||
},
|
||||
result: mcpResult,
|
||||
id: jsonRpcRequest.id
|
||||
};
|
||||
} catch (error) {
|
||||
|
||||
16
src/index.ts
16
src/index.ts
@@ -10,6 +10,22 @@ export { SingleSessionHTTPServer } from './http-server-single-session';
|
||||
export { ConsoleManager } from './utils/console-manager';
|
||||
export { N8NDocumentationMCPServer } from './mcp/server';
|
||||
|
||||
// Type exports for multi-tenant and library usage
|
||||
export type {
|
||||
InstanceContext
|
||||
} from './types/instance-context';
|
||||
export {
|
||||
validateInstanceContext,
|
||||
isInstanceContext
|
||||
} from './types/instance-context';
|
||||
|
||||
// Re-export MCP SDK types for convenience
|
||||
export type {
|
||||
Tool,
|
||||
CallToolResult,
|
||||
ListToolsResult
|
||||
} from '@modelcontextprotocol/sdk/types.js';
|
||||
|
||||
// Default export for convenience
|
||||
import N8NMCPEngine from './mcp-engine';
|
||||
export default N8NMCPEngine;
|
||||
|
||||
@@ -62,8 +62,12 @@ export class MCPEngine {
|
||||
hiddenProperties: []
|
||||
};
|
||||
}
|
||||
|
||||
return ConfigValidator.validate(args.nodeType, args.config, node.properties || []);
|
||||
|
||||
// CRITICAL FIX: Extract user-provided keys before validation
|
||||
// This prevents false warnings about default values
|
||||
const userProvidedKeys = new Set(Object.keys(args.config || {}));
|
||||
|
||||
return ConfigValidator.validate(args.nodeType, args.config, node.properties || [], userProvidedKeys);
|
||||
}
|
||||
|
||||
async validateNodeMinimal(args: any) {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -11,6 +11,9 @@ import { getN8nApiClient } from './handlers-n8n-manager';
|
||||
import { N8nApiError, getUserFriendlyErrorMessage } from '../utils/n8n-errors';
|
||||
import { logger } from '../utils/logger';
|
||||
import { InstanceContext } from '../types/instance-context';
|
||||
import { validateWorkflowStructure } from '../services/n8n-validation';
|
||||
import { NodeRepository } from '../database/node-repository';
|
||||
import { WorkflowVersioningService } from '../services/workflow-versioning-service';
|
||||
|
||||
// Zod schema for the diff request
|
||||
const workflowDiffSchema = z.object({
|
||||
@@ -27,10 +30,15 @@ const workflowDiffSchema = z.object({
|
||||
// Connection operations
|
||||
source: z.string().optional(),
|
||||
target: z.string().optional(),
|
||||
from: z.string().optional(), // For rewireConnection
|
||||
to: z.string().optional(), // For rewireConnection
|
||||
sourceOutput: z.string().optional(),
|
||||
targetInput: z.string().optional(),
|
||||
sourceIndex: z.number().optional(),
|
||||
targetIndex: z.number().optional(),
|
||||
// Smart parameters (Phase 1 UX improvement)
|
||||
branch: z.enum(['true', 'false']).optional(),
|
||||
case: z.number().optional(),
|
||||
ignoreErrors: z.boolean().optional(),
|
||||
// Connection cleanup operations
|
||||
dryRun: z.boolean().optional(),
|
||||
@@ -42,9 +50,14 @@ const workflowDiffSchema = z.object({
|
||||
})),
|
||||
validateOnly: z.boolean().optional(),
|
||||
continueOnError: z.boolean().optional(),
|
||||
createBackup: z.boolean().optional(),
|
||||
});
|
||||
|
||||
export async function handleUpdatePartialWorkflow(args: unknown, context?: InstanceContext): Promise<McpToolResponse> {
|
||||
export async function handleUpdatePartialWorkflow(
|
||||
args: unknown,
|
||||
repository: NodeRepository,
|
||||
context?: InstanceContext
|
||||
): Promise<McpToolResponse> {
|
||||
try {
|
||||
// Debug logging (only in debug mode)
|
||||
if (process.env.DEBUG_MCP === 'true') {
|
||||
@@ -82,7 +95,31 @@ export async function handleUpdatePartialWorkflow(args: unknown, context?: Insta
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
|
||||
|
||||
// Create backup before modifying workflow (default: true)
|
||||
if (input.createBackup !== false && !input.validateOnly) {
|
||||
try {
|
||||
const versioningService = new WorkflowVersioningService(repository, client);
|
||||
const backupResult = await versioningService.createBackup(input.id, workflow, {
|
||||
trigger: 'partial_update',
|
||||
operations: input.operations
|
||||
});
|
||||
|
||||
logger.info('Workflow backup created', {
|
||||
workflowId: input.id,
|
||||
versionId: backupResult.versionId,
|
||||
versionNumber: backupResult.versionNumber,
|
||||
pruned: backupResult.pruned
|
||||
});
|
||||
} catch (error: any) {
|
||||
logger.warn('Failed to create workflow backup', {
|
||||
workflowId: input.id,
|
||||
error: error.message
|
||||
});
|
||||
// Continue with update even if backup fails (non-blocking)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply diff operations
|
||||
const diffEngine = new WorkflowDiffEngine();
|
||||
const diffRequest = input as WorkflowDiffRequest;
|
||||
@@ -101,6 +138,7 @@ export async function handleUpdatePartialWorkflow(args: unknown, context?: Insta
|
||||
error: 'Failed to apply diff operations',
|
||||
details: {
|
||||
errors: diffResult.errors,
|
||||
warnings: diffResult.warnings,
|
||||
operationsApplied: diffResult.operationsApplied,
|
||||
applied: diffResult.applied,
|
||||
failed: diffResult.failed
|
||||
@@ -117,10 +155,93 @@ export async function handleUpdatePartialWorkflow(args: unknown, context?: Insta
|
||||
data: {
|
||||
valid: true,
|
||||
operationsToApply: input.operations.length
|
||||
},
|
||||
details: {
|
||||
warnings: diffResult.warnings
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
// Validate final workflow structure after applying all operations
|
||||
// This prevents creating workflows that pass operation-level validation
|
||||
// but fail workflow-level validation (e.g., UI can't render them)
|
||||
//
|
||||
// Validation can be skipped for specific integration tests that need to test
|
||||
// n8n API behavior with edge case workflows by setting SKIP_WORKFLOW_VALIDATION=true
|
||||
if (diffResult.workflow) {
|
||||
const structureErrors = validateWorkflowStructure(diffResult.workflow);
|
||||
if (structureErrors.length > 0) {
|
||||
const skipValidation = process.env.SKIP_WORKFLOW_VALIDATION === 'true';
|
||||
|
||||
logger.warn('Workflow structure validation failed after applying diff operations', {
|
||||
workflowId: input.id,
|
||||
errors: structureErrors,
|
||||
blocking: !skipValidation
|
||||
});
|
||||
|
||||
// Analyze error types to provide targeted recovery guidance
|
||||
const errorTypes = new Set<string>();
|
||||
structureErrors.forEach(err => {
|
||||
if (err.includes('operator') || err.includes('singleValue')) errorTypes.add('operator_issues');
|
||||
if (err.includes('connection') || err.includes('referenced')) errorTypes.add('connection_issues');
|
||||
if (err.includes('Missing') || err.includes('missing')) errorTypes.add('missing_metadata');
|
||||
if (err.includes('branch') || err.includes('output')) errorTypes.add('branch_mismatch');
|
||||
});
|
||||
|
||||
// Build recovery guidance based on error types
|
||||
const recoverySteps = [];
|
||||
if (errorTypes.has('operator_issues')) {
|
||||
recoverySteps.push('Operator structure issue detected. Use validate_node_operation to check specific nodes.');
|
||||
recoverySteps.push('Binary operators (equals, contains, greaterThan, etc.) must NOT have singleValue:true');
|
||||
recoverySteps.push('Unary operators (isEmpty, isNotEmpty, true, false) REQUIRE singleValue:true');
|
||||
}
|
||||
if (errorTypes.has('connection_issues')) {
|
||||
recoverySteps.push('Connection validation failed. Check all node connections reference existing nodes.');
|
||||
recoverySteps.push('Use cleanStaleConnections operation to remove connections to non-existent nodes.');
|
||||
}
|
||||
if (errorTypes.has('missing_metadata')) {
|
||||
recoverySteps.push('Missing metadata detected. Ensure filter-based nodes (IF v2.2+, Switch v3.2+) have complete conditions.options.');
|
||||
recoverySteps.push('Required options: {version: 2, leftValue: "", caseSensitive: true, typeValidation: "strict"}');
|
||||
}
|
||||
if (errorTypes.has('branch_mismatch')) {
|
||||
recoverySteps.push('Branch count mismatch. Ensure Switch nodes have outputs for all rules (e.g., 3 rules = 3 output branches).');
|
||||
}
|
||||
|
||||
// Add generic recovery steps if no specific guidance
|
||||
if (recoverySteps.length === 0) {
|
||||
recoverySteps.push('Review the validation errors listed above');
|
||||
recoverySteps.push('Fix issues using updateNode or cleanStaleConnections operations');
|
||||
recoverySteps.push('Run validate_workflow again to verify fixes');
|
||||
}
|
||||
|
||||
const errorMessage = structureErrors.length === 1
|
||||
? `Workflow validation failed: ${structureErrors[0]}`
|
||||
: `Workflow validation failed with ${structureErrors.length} structural issues`;
|
||||
|
||||
// If validation is not skipped, return error and block the save
|
||||
if (!skipValidation) {
|
||||
return {
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
details: {
|
||||
errors: structureErrors,
|
||||
errorCount: structureErrors.length,
|
||||
operationsApplied: diffResult.operationsApplied,
|
||||
applied: diffResult.applied,
|
||||
recoveryGuidance: recoverySteps,
|
||||
note: 'Operations were applied but created an invalid workflow structure. The workflow was NOT saved to n8n to prevent UI rendering errors.',
|
||||
autoSanitizationNote: 'Auto-sanitization runs on all nodes during updates to fix operator structures and add missing metadata. However, it cannot fix all issues (e.g., broken connections, branch mismatches). Use the recovery guidance above to resolve remaining issues.'
|
||||
}
|
||||
};
|
||||
}
|
||||
// Validation skipped: log warning but continue (for specific integration tests)
|
||||
logger.info('Workflow validation skipped (SKIP_WORKFLOW_VALIDATION=true): Allowing workflow with validation warnings to proceed', {
|
||||
workflowId: input.id,
|
||||
warningCount: structureErrors.length
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Update workflow via API
|
||||
try {
|
||||
const updatedWorkflow = await client.updateWorkflow(input.id, diffResult.workflow!);
|
||||
@@ -135,7 +256,8 @@ export async function handleUpdatePartialWorkflow(args: unknown, context?: Insta
|
||||
workflowName: updatedWorkflow.name,
|
||||
applied: diffResult.applied,
|
||||
failed: diffResult.failed,
|
||||
errors: diffResult.errors
|
||||
errors: diffResult.errors,
|
||||
warnings: diffResult.warnings
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
|
||||
152
src/mcp/index.ts
152
src/mcp/index.ts
@@ -3,6 +3,9 @@
|
||||
import { N8NDocumentationMCPServer } from './server';
|
||||
import { logger } from '../utils/logger';
|
||||
import { TelemetryConfigManager } from '../telemetry/config-manager';
|
||||
import { EarlyErrorLogger } from '../telemetry/early-error-logger';
|
||||
import { STARTUP_CHECKPOINTS, findFailedCheckpoint, StartupCheckpoint } from '../telemetry/startup-checkpoints';
|
||||
import { existsSync } from 'fs';
|
||||
|
||||
// Add error details to stderr for Claude Desktop debugging
|
||||
process.on('uncaughtException', (error) => {
|
||||
@@ -21,9 +24,50 @@ process.on('unhandledRejection', (reason, promise) => {
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
/**
|
||||
* Detects if running in a container environment (Docker, Podman, Kubernetes, etc.)
|
||||
* Uses multiple detection methods for robustness:
|
||||
* 1. Environment variables (IS_DOCKER, IS_CONTAINER with multiple formats)
|
||||
* 2. Filesystem markers (/.dockerenv, /run/.containerenv)
|
||||
*/
|
||||
function isContainerEnvironment(): boolean {
|
||||
// Check environment variables with multiple truthy formats
|
||||
const dockerEnv = (process.env.IS_DOCKER || '').toLowerCase();
|
||||
const containerEnv = (process.env.IS_CONTAINER || '').toLowerCase();
|
||||
|
||||
if (['true', '1', 'yes'].includes(dockerEnv)) {
|
||||
return true;
|
||||
}
|
||||
if (['true', '1', 'yes'].includes(containerEnv)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Fallback: Check filesystem markers
|
||||
// /.dockerenv exists in Docker containers
|
||||
// /run/.containerenv exists in Podman containers
|
||||
try {
|
||||
return existsSync('/.dockerenv') || existsSync('/run/.containerenv');
|
||||
} catch (error) {
|
||||
// If filesystem check fails, assume not in container
|
||||
logger.debug('Container detection filesystem check failed:', error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
// Handle telemetry CLI commands
|
||||
const args = process.argv.slice(2);
|
||||
// Initialize early error logger for pre-handshake error capture (v2.18.3)
|
||||
// Now using singleton pattern with defensive initialization
|
||||
const startTime = Date.now();
|
||||
const earlyLogger = EarlyErrorLogger.getInstance();
|
||||
const checkpoints: StartupCheckpoint[] = [];
|
||||
|
||||
try {
|
||||
// Checkpoint: Process started (fire-and-forget, no await)
|
||||
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.PROCESS_STARTED);
|
||||
checkpoints.push(STARTUP_CHECKPOINTS.PROCESS_STARTED);
|
||||
|
||||
// Handle telemetry CLI commands
|
||||
const args = process.argv.slice(2);
|
||||
if (args.length > 0 && args[0] === 'telemetry') {
|
||||
const telemetryConfig = TelemetryConfigManager.getInstance();
|
||||
const action = args[1];
|
||||
@@ -58,6 +102,15 @@ Learn more: https://github.com/czlonkowski/n8n-mcp/blob/main/PRIVACY.md
|
||||
|
||||
const mode = process.env.MCP_MODE || 'stdio';
|
||||
|
||||
// Checkpoint: Telemetry initializing (fire-and-forget, no await)
|
||||
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.TELEMETRY_INITIALIZING);
|
||||
checkpoints.push(STARTUP_CHECKPOINTS.TELEMETRY_INITIALIZING);
|
||||
|
||||
// Telemetry is already initialized by TelemetryConfigManager in imports
|
||||
// Mark as ready (fire-and-forget, no await)
|
||||
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.TELEMETRY_READY);
|
||||
checkpoints.push(STARTUP_CHECKPOINTS.TELEMETRY_READY);
|
||||
|
||||
try {
|
||||
// Only show debug messages in HTTP mode to avoid corrupting stdio communication
|
||||
if (mode === 'http') {
|
||||
@@ -65,6 +118,10 @@ Learn more: https://github.com/czlonkowski/n8n-mcp/blob/main/PRIVACY.md
|
||||
console.error('Current directory:', process.cwd());
|
||||
console.error('Node version:', process.version);
|
||||
}
|
||||
|
||||
// Checkpoint: MCP handshake starting (fire-and-forget, no await)
|
||||
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.MCP_HANDSHAKE_STARTING);
|
||||
checkpoints.push(STARTUP_CHECKPOINTS.MCP_HANDSHAKE_STARTING);
|
||||
|
||||
if (mode === 'http') {
|
||||
// Check if we should use the fixed implementation
|
||||
@@ -90,15 +147,95 @@ Learn more: https://github.com/czlonkowski/n8n-mcp/blob/main/PRIVACY.md
|
||||
}
|
||||
} else {
|
||||
// Stdio mode - for local Claude Desktop
|
||||
const server = new N8NDocumentationMCPServer();
|
||||
const server = new N8NDocumentationMCPServer(undefined, earlyLogger);
|
||||
|
||||
// Graceful shutdown handler (fixes Issue #277)
|
||||
let isShuttingDown = false;
|
||||
const shutdown = async (signal: string = 'UNKNOWN') => {
|
||||
if (isShuttingDown) return; // Prevent multiple shutdown calls
|
||||
isShuttingDown = true;
|
||||
|
||||
try {
|
||||
logger.info(`Shutdown initiated by: ${signal}`);
|
||||
|
||||
await server.shutdown();
|
||||
|
||||
// Close stdin to signal we're done reading
|
||||
if (process.stdin && !process.stdin.destroyed) {
|
||||
process.stdin.pause();
|
||||
process.stdin.destroy();
|
||||
}
|
||||
|
||||
// Exit with timeout to ensure we don't hang
|
||||
// Increased to 1000ms for slower systems
|
||||
setTimeout(() => {
|
||||
logger.warn('Shutdown timeout exceeded, forcing exit');
|
||||
process.exit(0);
|
||||
}, 1000).unref();
|
||||
|
||||
// Let the timeout handle the exit for graceful shutdown
|
||||
// (removed immediate exit to allow cleanup to complete)
|
||||
} catch (error) {
|
||||
logger.error('Error during shutdown:', error);
|
||||
process.exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
// Handle termination signals (fixes Issue #277)
|
||||
// Signal handling strategy:
|
||||
// - Claude Desktop (Windows/macOS/Linux): stdin handlers + signal handlers
|
||||
// Primary: stdin close when Claude quits | Fallback: SIGTERM/SIGINT/SIGHUP
|
||||
// - Container environments: signal handlers ONLY
|
||||
// stdin closed in detached mode would trigger immediate shutdown
|
||||
// Container detection via IS_DOCKER/IS_CONTAINER env vars + filesystem markers
|
||||
// - Manual execution: Both stdin and signal handlers work
|
||||
process.on('SIGTERM', () => shutdown('SIGTERM'));
|
||||
process.on('SIGINT', () => shutdown('SIGINT'));
|
||||
process.on('SIGHUP', () => shutdown('SIGHUP'));
|
||||
|
||||
// Handle stdio disconnect - PRIMARY shutdown mechanism for Claude Desktop
|
||||
// Skip in container environments (Docker, Kubernetes, Podman) to prevent
|
||||
// premature shutdown when stdin is closed in detached mode.
|
||||
// Containers rely on signal handlers (SIGTERM/SIGINT/SIGHUP) for proper shutdown.
|
||||
const isContainer = isContainerEnvironment();
|
||||
|
||||
if (!isContainer && process.stdin.readable && !process.stdin.destroyed) {
|
||||
try {
|
||||
process.stdin.on('end', () => shutdown('STDIN_END'));
|
||||
process.stdin.on('close', () => shutdown('STDIN_CLOSE'));
|
||||
} catch (error) {
|
||||
logger.error('Failed to register stdin handlers, using signal handlers only:', error);
|
||||
// Continue - signal handlers will still work
|
||||
}
|
||||
}
|
||||
|
||||
await server.run();
|
||||
}
|
||||
|
||||
// Checkpoint: MCP handshake complete (fire-and-forget, no await)
|
||||
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.MCP_HANDSHAKE_COMPLETE);
|
||||
checkpoints.push(STARTUP_CHECKPOINTS.MCP_HANDSHAKE_COMPLETE);
|
||||
|
||||
// Checkpoint: Server ready (fire-and-forget, no await)
|
||||
earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.SERVER_READY);
|
||||
checkpoints.push(STARTUP_CHECKPOINTS.SERVER_READY);
|
||||
|
||||
// Log successful startup (fire-and-forget, no await)
|
||||
const startupDuration = Date.now() - startTime;
|
||||
earlyLogger.logStartupSuccess(checkpoints, startupDuration);
|
||||
|
||||
logger.info(`Server startup completed in ${startupDuration}ms (${checkpoints.length} checkpoints passed)`);
|
||||
|
||||
} catch (error) {
|
||||
// Log startup error with checkpoint context (fire-and-forget, no await)
|
||||
const failedCheckpoint = findFailedCheckpoint(checkpoints);
|
||||
earlyLogger.logStartupError(failedCheckpoint, error);
|
||||
|
||||
// In stdio mode, we cannot output to console at all
|
||||
if (mode !== 'stdio') {
|
||||
console.error('Failed to start MCP server:', error);
|
||||
logger.error('Failed to start MCP server', error);
|
||||
|
||||
|
||||
// Provide helpful error messages
|
||||
if (error instanceof Error && error.message.includes('nodes.db not found')) {
|
||||
console.error('\nTo fix this issue:');
|
||||
@@ -112,7 +249,12 @@ Learn more: https://github.com/czlonkowski/n8n-mcp/blob/main/PRIVACY.md
|
||||
console.error('3. If that doesn\'t work, try: rm -rf node_modules && npm install');
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
process.exit(1);
|
||||
}
|
||||
} catch (outerError) {
|
||||
// Outer error catch for early initialization failures
|
||||
logger.error('Critical startup error:', outerError);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,6 +37,8 @@ import {
|
||||
} from '../utils/protocol-version';
|
||||
import { InstanceContext } from '../types/instance-context';
|
||||
import { telemetry } from '../telemetry';
|
||||
import { EarlyErrorLogger } from '../telemetry/early-error-logger';
|
||||
import { STARTUP_CHECKPOINTS } from '../telemetry/startup-checkpoints';
|
||||
|
||||
interface NodeRow {
|
||||
node_type: string;
|
||||
@@ -67,9 +69,11 @@ export class N8NDocumentationMCPServer {
|
||||
private instanceContext?: InstanceContext;
|
||||
private previousTool: string | null = null;
|
||||
private previousToolTimestamp: number = Date.now();
|
||||
private earlyLogger: EarlyErrorLogger | null = null;
|
||||
|
||||
constructor(instanceContext?: InstanceContext) {
|
||||
constructor(instanceContext?: InstanceContext, earlyLogger?: EarlyErrorLogger) {
|
||||
this.instanceContext = instanceContext;
|
||||
this.earlyLogger = earlyLogger || null;
|
||||
// Check for test environment first
|
||||
const envDbPath = process.env.NODE_DB_PATH;
|
||||
let dbPath: string | null = null;
|
||||
@@ -100,22 +104,49 @@ export class N8NDocumentationMCPServer {
|
||||
}
|
||||
|
||||
// Initialize database asynchronously
|
||||
this.initialized = this.initializeDatabase(dbPath);
|
||||
|
||||
this.initialized = this.initializeDatabase(dbPath).then(() => {
|
||||
// After database is ready, check n8n API configuration (v2.18.3)
|
||||
if (this.earlyLogger) {
|
||||
this.earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.N8N_API_CHECKING);
|
||||
}
|
||||
|
||||
// Log n8n API configuration status at startup
|
||||
const apiConfigured = isN8nApiConfigured();
|
||||
const totalTools = apiConfigured ?
|
||||
n8nDocumentationToolsFinal.length + n8nManagementTools.length :
|
||||
n8nDocumentationToolsFinal.length;
|
||||
|
||||
logger.info(`MCP server initialized with ${totalTools} tools (n8n API: ${apiConfigured ? 'configured' : 'not configured'})`);
|
||||
|
||||
if (this.earlyLogger) {
|
||||
this.earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.N8N_API_READY);
|
||||
}
|
||||
});
|
||||
|
||||
logger.info('Initializing n8n Documentation MCP server');
|
||||
|
||||
// Log n8n API configuration status at startup
|
||||
const apiConfigured = isN8nApiConfigured();
|
||||
const totalTools = apiConfigured ?
|
||||
n8nDocumentationToolsFinal.length + n8nManagementTools.length :
|
||||
n8nDocumentationToolsFinal.length;
|
||||
|
||||
logger.info(`MCP server initialized with ${totalTools} tools (n8n API: ${apiConfigured ? 'configured' : 'not configured'})`);
|
||||
|
||||
this.server = new Server(
|
||||
{
|
||||
name: 'n8n-documentation-mcp',
|
||||
version: '1.0.0',
|
||||
version: PROJECT_VERSION,
|
||||
icons: [
|
||||
{
|
||||
src: "https://www.n8n-mcp.com/logo.png",
|
||||
mimeType: "image/png",
|
||||
sizes: ["192x192"]
|
||||
},
|
||||
{
|
||||
src: "https://www.n8n-mcp.com/logo-128.png",
|
||||
mimeType: "image/png",
|
||||
sizes: ["128x128"]
|
||||
},
|
||||
{
|
||||
src: "https://www.n8n-mcp.com/logo-48.png",
|
||||
mimeType: "image/png",
|
||||
sizes: ["48x48"]
|
||||
}
|
||||
],
|
||||
websiteUrl: "https://n8n-mcp.com"
|
||||
},
|
||||
{
|
||||
capabilities: {
|
||||
@@ -129,20 +160,38 @@ export class N8NDocumentationMCPServer {
|
||||
|
||||
private async initializeDatabase(dbPath: string): Promise<void> {
|
||||
try {
|
||||
// Checkpoint: Database connecting (v2.18.3)
|
||||
if (this.earlyLogger) {
|
||||
this.earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.DATABASE_CONNECTING);
|
||||
}
|
||||
|
||||
logger.debug('Database initialization starting...', { dbPath });
|
||||
|
||||
this.db = await createDatabaseAdapter(dbPath);
|
||||
|
||||
logger.debug('Database adapter created');
|
||||
|
||||
// If using in-memory database for tests, initialize schema
|
||||
if (dbPath === ':memory:') {
|
||||
await this.initializeInMemorySchema();
|
||||
logger.debug('In-memory schema initialized');
|
||||
}
|
||||
|
||||
|
||||
this.repository = new NodeRepository(this.db);
|
||||
logger.debug('Node repository initialized');
|
||||
|
||||
this.templateService = new TemplateService(this.db);
|
||||
logger.debug('Template service initialized');
|
||||
|
||||
// Initialize similarity services for enhanced validation
|
||||
EnhancedConfigValidator.initializeSimilarityServices(this.repository);
|
||||
logger.debug('Similarity services initialized');
|
||||
|
||||
logger.info(`Initialized database from: ${dbPath}`);
|
||||
// Checkpoint: Database connected (v2.18.3)
|
||||
if (this.earlyLogger) {
|
||||
this.earlyLogger.logCheckpoint(STARTUP_CHECKPOINTS.DATABASE_CONNECTED);
|
||||
}
|
||||
|
||||
logger.info(`Database initialized successfully from: ${dbPath}`);
|
||||
} catch (error) {
|
||||
logger.error('Failed to initialize database:', error);
|
||||
throw new Error(`Failed to open database: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
||||
@@ -151,25 +200,122 @@ export class N8NDocumentationMCPServer {
|
||||
|
||||
private async initializeInMemorySchema(): Promise<void> {
|
||||
if (!this.db) return;
|
||||
|
||||
|
||||
// Read and execute schema
|
||||
const schemaPath = path.join(__dirname, '../../src/database/schema.sql');
|
||||
const schema = await fs.readFile(schemaPath, 'utf-8');
|
||||
|
||||
// Execute schema statements
|
||||
const statements = schema.split(';').filter(stmt => stmt.trim());
|
||||
|
||||
// Parse SQL statements properly (handles BEGIN...END blocks in triggers)
|
||||
const statements = this.parseSQLStatements(schema);
|
||||
|
||||
for (const statement of statements) {
|
||||
if (statement.trim()) {
|
||||
this.db.exec(statement);
|
||||
try {
|
||||
this.db.exec(statement);
|
||||
} catch (error) {
|
||||
logger.error(`Failed to execute SQL statement: ${statement.substring(0, 100)}...`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse SQL statements from schema file, properly handling multi-line statements
|
||||
* including triggers with BEGIN...END blocks
|
||||
*/
|
||||
private parseSQLStatements(sql: string): string[] {
|
||||
const statements: string[] = [];
|
||||
let current = '';
|
||||
let inBlock = false;
|
||||
|
||||
const lines = sql.split('\n');
|
||||
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim().toUpperCase();
|
||||
|
||||
// Skip comments and empty lines
|
||||
if (trimmed.startsWith('--') || trimmed === '') {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Track BEGIN...END blocks (triggers, procedures)
|
||||
if (trimmed.includes('BEGIN')) {
|
||||
inBlock = true;
|
||||
}
|
||||
|
||||
current += line + '\n';
|
||||
|
||||
// End of block (trigger/procedure)
|
||||
if (inBlock && trimmed === 'END;') {
|
||||
statements.push(current.trim());
|
||||
current = '';
|
||||
inBlock = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Regular statement end (not in block)
|
||||
if (!inBlock && trimmed.endsWith(';')) {
|
||||
statements.push(current.trim());
|
||||
current = '';
|
||||
}
|
||||
}
|
||||
|
||||
// Add any remaining content
|
||||
if (current.trim()) {
|
||||
statements.push(current.trim());
|
||||
}
|
||||
|
||||
return statements.filter(s => s.length > 0);
|
||||
}
|
||||
|
||||
private async ensureInitialized(): Promise<void> {
|
||||
await this.initialized;
|
||||
if (!this.db || !this.repository) {
|
||||
throw new Error('Database not initialized');
|
||||
}
|
||||
|
||||
// Validate database health on first access
|
||||
if (!this.dbHealthChecked) {
|
||||
await this.validateDatabaseHealth();
|
||||
this.dbHealthChecked = true;
|
||||
}
|
||||
}
|
||||
|
||||
private dbHealthChecked: boolean = false;
|
||||
|
||||
private async validateDatabaseHealth(): Promise<void> {
|
||||
if (!this.db) return;
|
||||
|
||||
try {
|
||||
// Check if nodes table has data
|
||||
const nodeCount = this.db.prepare('SELECT COUNT(*) as count FROM nodes').get() as { count: number };
|
||||
|
||||
if (nodeCount.count === 0) {
|
||||
logger.error('CRITICAL: Database is empty - no nodes found! Please run: npm run rebuild');
|
||||
throw new Error('Database is empty. Run "npm run rebuild" to populate node data.');
|
||||
}
|
||||
|
||||
// Check if FTS5 table exists
|
||||
const ftsExists = this.db.prepare(`
|
||||
SELECT name FROM sqlite_master
|
||||
WHERE type='table' AND name='nodes_fts'
|
||||
`).get();
|
||||
|
||||
if (!ftsExists) {
|
||||
logger.warn('FTS5 table missing - search performance will be degraded. Please run: npm run rebuild');
|
||||
} else {
|
||||
const ftsCount = this.db.prepare('SELECT COUNT(*) as count FROM nodes_fts').get() as { count: number };
|
||||
if (ftsCount.count === 0) {
|
||||
logger.warn('FTS5 index is empty - search will not work properly. Please run: npm run rebuild');
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(`Database health check passed: ${nodeCount.count} nodes loaded`);
|
||||
} catch (error) {
|
||||
logger.error('Database health check failed:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
private setupHandlers(): void {
|
||||
@@ -599,16 +745,23 @@ export class N8NDocumentationMCPServer {
|
||||
*/
|
||||
private validateToolParamsBasic(toolName: string, args: any, requiredParams: string[]): void {
|
||||
const missing: string[] = [];
|
||||
|
||||
const invalid: string[] = [];
|
||||
|
||||
for (const param of requiredParams) {
|
||||
if (!(param in args) || args[param] === undefined || args[param] === null) {
|
||||
missing.push(param);
|
||||
} else if (typeof args[param] === 'string' && args[param].trim() === '') {
|
||||
invalid.push(`${param} (empty string)`);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (missing.length > 0) {
|
||||
throw new Error(`Missing required parameters for ${toolName}: ${missing.join(', ')}. Please provide the required parameters to use this tool.`);
|
||||
}
|
||||
|
||||
if (invalid.length > 0) {
|
||||
throw new Error(`Invalid parameters for ${toolName}: ${invalid.join(', ')}. String parameters cannot be empty.`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -856,10 +1009,10 @@ export class N8NDocumentationMCPServer {
|
||||
return n8nHandlers.handleGetWorkflowMinimal(args, this.instanceContext);
|
||||
case 'n8n_update_full_workflow':
|
||||
this.validateToolParams(name, args, ['id']);
|
||||
return n8nHandlers.handleUpdateWorkflow(args, this.instanceContext);
|
||||
return n8nHandlers.handleUpdateWorkflow(args, this.repository!, this.instanceContext);
|
||||
case 'n8n_update_partial_workflow':
|
||||
this.validateToolParams(name, args, ['id', 'operations']);
|
||||
return handleUpdatePartialWorkflow(args, this.instanceContext);
|
||||
return handleUpdatePartialWorkflow(args, this.repository!, this.instanceContext);
|
||||
case 'n8n_delete_workflow':
|
||||
this.validateToolParams(name, args, ['id']);
|
||||
return n8nHandlers.handleDeleteWorkflow(args, this.instanceContext);
|
||||
@@ -897,7 +1050,10 @@ export class N8NDocumentationMCPServer {
|
||||
case 'n8n_diagnostic':
|
||||
// No required parameters
|
||||
return n8nHandlers.handleDiagnostic({ params: { arguments: args } }, this.instanceContext);
|
||||
|
||||
case 'n8n_workflow_versions':
|
||||
this.validateToolParams(name, args, ['mode']);
|
||||
return n8nHandlers.handleWorkflowVersions(args, this.repository!, this.instanceContext);
|
||||
|
||||
default:
|
||||
throw new Error(`Unknown tool: ${name}`);
|
||||
}
|
||||
@@ -1027,6 +1183,15 @@ export class N8NDocumentationMCPServer {
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Primary search method used by ALL MCP search tools.
|
||||
*
|
||||
* This method automatically detects and uses FTS5 full-text search when available
|
||||
* (lines 1189-1203), falling back to LIKE queries only if FTS5 table doesn't exist.
|
||||
*
|
||||
* NOTE: This is separate from NodeRepository.searchNodes() which is legacy LIKE-based.
|
||||
* All MCP tool invocations route through this method to leverage FTS5 performance.
|
||||
*/
|
||||
private async searchNodes(
|
||||
query: string,
|
||||
limit: number = 20,
|
||||
@@ -1038,7 +1203,7 @@ export class N8NDocumentationMCPServer {
|
||||
): Promise<any> {
|
||||
await this.ensureInitialized();
|
||||
if (!this.db) throw new Error('Database not initialized');
|
||||
|
||||
|
||||
// Normalize the query if it looks like a full node type
|
||||
let normalizedQuery = query;
|
||||
|
||||
@@ -1114,20 +1279,20 @@ export class N8NDocumentationMCPServer {
|
||||
try {
|
||||
// Use FTS5 with ranking
|
||||
const nodes = this.db.prepare(`
|
||||
SELECT
|
||||
SELECT
|
||||
n.*,
|
||||
rank
|
||||
FROM nodes n
|
||||
JOIN nodes_fts ON n.rowid = nodes_fts.rowid
|
||||
WHERE nodes_fts MATCH ?
|
||||
ORDER BY
|
||||
rank,
|
||||
CASE
|
||||
WHEN n.display_name = ? THEN 0
|
||||
WHEN n.display_name LIKE ? THEN 1
|
||||
WHEN n.node_type LIKE ? THEN 2
|
||||
ORDER BY
|
||||
CASE
|
||||
WHEN LOWER(n.display_name) = LOWER(?) THEN 0
|
||||
WHEN LOWER(n.display_name) LIKE LOWER(?) THEN 1
|
||||
WHEN LOWER(n.node_type) LIKE LOWER(?) THEN 2
|
||||
ELSE 3
|
||||
END,
|
||||
rank,
|
||||
n.display_name
|
||||
LIMIT ?
|
||||
`).all(ftsQuery, cleanedQuery, `%${cleanedQuery}%`, `%${cleanedQuery}%`, limit) as (NodeRow & { rank: number })[];
|
||||
@@ -1907,7 +2072,8 @@ Full documentation is being prepared. For now, use get_node_essentials for confi
|
||||
// Add examples from templates if requested
|
||||
if (includeExamples) {
|
||||
try {
|
||||
const fullNodeType = getWorkflowNodeType(node.package ?? 'n8n-nodes-base', node.nodeType);
|
||||
// Use the already-computed workflowNodeType from result (line 1888)
|
||||
// This ensures consistency with search_nodes behavior (line 1203)
|
||||
const examples = this.db!.prepare(`
|
||||
SELECT
|
||||
parameters_json,
|
||||
@@ -1921,7 +2087,7 @@ Full documentation is being prepared. For now, use get_node_essentials for confi
|
||||
WHERE node_type = ?
|
||||
ORDER BY rank
|
||||
LIMIT 3
|
||||
`).all(fullNodeType) as any[];
|
||||
`).all(result.workflowNodeType) as any[];
|
||||
|
||||
if (examples.length > 0) {
|
||||
(result as any).examples = examples.map((ex: any) => ({
|
||||
|
||||
@@ -4,26 +4,30 @@ export const listAiToolsDoc: ToolDocumentation = {
|
||||
name: 'list_ai_tools',
|
||||
category: 'discovery',
|
||||
essentials: {
|
||||
description: 'Returns 263 nodes with built-in AI features. CRITICAL: Any of the 525 n8n nodes can be used as an AI tool by connecting it to an AI Agent node\'s tool port. This list only shows nodes with AI-specific features, not all usable nodes.',
|
||||
description: 'DEPRECATED: Basic list of 263 AI nodes. For comprehensive AI Agent guidance, use tools_documentation({topic: "ai_agents_guide"}). That guide covers architecture, connections, tools, validation, and best practices. Use search_nodes({query: "AI", includeExamples: true}) for AI nodes with working examples.',
|
||||
keyParameters: [],
|
||||
example: 'list_ai_tools()',
|
||||
example: 'tools_documentation({topic: "ai_agents_guide"}) // Recommended alternative',
|
||||
performance: 'Instant (cached)',
|
||||
tips: [
|
||||
'ANY node can be an AI tool - not limited to this list',
|
||||
'Connect Slack, Database, HTTP Request, etc. to AI Agent tool port',
|
||||
'NEW: Use ai_agents_guide for comprehensive AI workflow documentation',
|
||||
'Use search_nodes({includeExamples: true}) for AI nodes with real-world examples',
|
||||
'ANY node can be an AI tool - not limited to AI-specific nodes',
|
||||
'Use get_node_as_tool_info for guidance on any node'
|
||||
]
|
||||
},
|
||||
full: {
|
||||
description: 'Lists 263 nodes that have built-in AI capabilities or are optimized for AI workflows. IMPORTANT: This is NOT a complete list of nodes usable as AI tools. Any of the 525 n8n nodes can be connected to an AI Agent node\'s tool port to function as an AI tool. This includes Slack, Google Sheets, databases, HTTP requests, and more.',
|
||||
description: '**DEPRECATED in favor of ai_agents_guide**. Lists 263 nodes with built-in AI capabilities. For comprehensive documentation on building AI Agent workflows, use tools_documentation({topic: "ai_agents_guide"}) which covers architecture, the 8 AI connection types, validation, and best practices with real examples. IMPORTANT: This basic list is NOT a complete guide - use the full AI Agents guide instead.',
|
||||
parameters: {},
|
||||
returns: 'Array of 263 AI-optimized nodes including: OpenAI (GPT-3/4), Anthropic (Claude), Google AI (Gemini/PaLM), Cohere, HuggingFace, Pinecone, Qdrant, Supabase Vector Store, LangChain nodes, embeddings processors, vector stores, chat models, and AI-specific utilities. Each entry includes nodeType, displayName, and AI-specific capabilities.',
|
||||
returns: 'Array of 263 AI-optimized nodes. RECOMMENDED: Use ai_agents_guide for comprehensive guidance, or search_nodes({query: "AI", includeExamples: true}) for AI nodes with working configuration examples.',
|
||||
examples: [
|
||||
'list_ai_tools() - Returns all 263 AI-optimized nodes',
|
||||
'// To use ANY node as AI tool:',
|
||||
'// 1. Add any node (e.g., Slack, MySQL, HTTP Request)',
|
||||
'// 2. Connect it to AI Agent node\'s "Tool" input port',
|
||||
'// 3. The AI agent can now use that node\'s functionality'
|
||||
'// RECOMMENDED: Use the comprehensive AI Agents guide',
|
||||
'tools_documentation({topic: "ai_agents_guide"})',
|
||||
'',
|
||||
'// Or search for AI nodes with real-world examples',
|
||||
'search_nodes({query: "AI Agent", includeExamples: true})',
|
||||
'',
|
||||
'// Basic list (deprecated)',
|
||||
'list_ai_tools() - Returns 263 AI-optimized nodes'
|
||||
],
|
||||
useCases: [
|
||||
'Discover AI model integrations (OpenAI, Anthropic, Google AI)',
|
||||
|
||||
738
src/mcp/tool-docs/guides/ai-agents-guide.ts
Normal file
738
src/mcp/tool-docs/guides/ai-agents-guide.ts
Normal file
@@ -0,0 +1,738 @@
|
||||
import { ToolDocumentation } from '../types';
|
||||
|
||||
export const aiAgentsGuide: ToolDocumentation = {
|
||||
name: 'ai_agents_guide',
|
||||
category: 'guides',
|
||||
essentials: {
|
||||
description: 'Comprehensive guide to building AI Agent workflows in n8n. Covers architecture, connections, tools, validation, and best practices for production AI systems.',
|
||||
keyParameters: [],
|
||||
example: 'Use tools_documentation({topic: "ai_agents_guide"}) to access this guide',
|
||||
performance: 'N/A - Documentation only',
|
||||
tips: [
|
||||
'Start with Chat Trigger → AI Agent → Language Model pattern',
|
||||
'Always connect language model BEFORE enabling AI Agent',
|
||||
'Use proper toolDescription for all AI tools (15+ characters)',
|
||||
'Validate workflows with n8n_validate_workflow before deployment',
|
||||
'Use includeExamples=true when searching for AI nodes',
|
||||
'Check FINAL_AI_VALIDATION_SPEC.md for detailed requirements'
|
||||
]
|
||||
},
|
||||
full: {
|
||||
description: `# Complete Guide to AI Agents in n8n
|
||||
|
||||
This comprehensive guide covers everything you need to build production-ready AI Agent workflows in n8n.
|
||||
|
||||
## Table of Contents
|
||||
1. [AI Agent Architecture](#architecture)
|
||||
2. [Essential Connection Types](#connections)
|
||||
3. [Building Your First AI Agent](#first-agent)
|
||||
4. [AI Tools Deep Dive](#tools)
|
||||
5. [Advanced Patterns](#advanced)
|
||||
6. [Validation & Best Practices](#validation)
|
||||
7. [Troubleshooting](#troubleshooting)
|
||||
|
||||
---
|
||||
|
||||
## 1. AI Agent Architecture {#architecture}
|
||||
|
||||
### Core Components
|
||||
|
||||
An n8n AI Agent workflow typically consists of:
|
||||
|
||||
1. **Chat Trigger**: Entry point for user interactions
|
||||
- Webhook-based or manual trigger
|
||||
- Supports streaming responses (responseMode)
|
||||
- Passes user message to AI Agent
|
||||
|
||||
2. **AI Agent**: The orchestrator
|
||||
- Manages conversation flow
|
||||
- Decides when to use tools
|
||||
- Iterates until task is complete
|
||||
- Supports fallback models for reliability
|
||||
|
||||
3. **Language Model**: The AI brain
|
||||
- OpenAI GPT-4, Claude, Gemini, etc.
|
||||
- Connected via ai_languageModel port
|
||||
- Can have primary + fallback for reliability
|
||||
|
||||
4. **Tools**: AI Agent's capabilities
|
||||
- HTTP Request, Code, Vector Store, etc.
|
||||
- Connected via ai_tool port
|
||||
- Each tool needs clear toolDescription
|
||||
|
||||
5. **Optional Components**:
|
||||
- Memory (conversation history)
|
||||
- Output Parser (structured responses)
|
||||
- Vector Store (knowledge retrieval)
|
||||
|
||||
### Connection Flow
|
||||
|
||||
**CRITICAL**: AI connections flow TO the consumer (reversed from standard n8n):
|
||||
|
||||
\`\`\`
|
||||
Standard n8n: [Source] --main--> [Target]
|
||||
AI pattern: [Language Model] --ai_languageModel--> [AI Agent]
|
||||
[HTTP Tool] --ai_tool--> [AI Agent]
|
||||
\`\`\`
|
||||
|
||||
This is why you use \`sourceOutput: "ai_languageModel"\` when connecting components.
|
||||
|
||||
---
|
||||
|
||||
## 2. Essential Connection Types {#connections}
|
||||
|
||||
### The 8 AI Connection Types
|
||||
|
||||
1. **ai_languageModel**
|
||||
- FROM: OpenAI Chat Model, Anthropic, Google Gemini, etc.
|
||||
- TO: AI Agent, Basic LLM Chain
|
||||
- REQUIRED: Every AI Agent needs 1-2 language models
|
||||
- Example: \`{type: "addConnection", source: "OpenAI", target: "AI Agent", sourceOutput: "ai_languageModel"}\`
|
||||
|
||||
2. **ai_tool**
|
||||
- FROM: Any tool node (HTTP Request Tool, Code Tool, etc.)
|
||||
- TO: AI Agent
|
||||
- REQUIRED: At least 1 tool recommended
|
||||
- Example: \`{type: "addConnection", source: "HTTP Request Tool", target: "AI Agent", sourceOutput: "ai_tool"}\`
|
||||
|
||||
3. **ai_memory**
|
||||
- FROM: Window Buffer Memory, Conversation Summary, etc.
|
||||
- TO: AI Agent
|
||||
- OPTIONAL: 0-1 memory system
|
||||
- Enables conversation history tracking
|
||||
|
||||
4. **ai_outputParser**
|
||||
- FROM: Structured Output Parser, JSON Parser, etc.
|
||||
- TO: AI Agent
|
||||
- OPTIONAL: For structured responses
|
||||
- Must set hasOutputParser=true on AI Agent
|
||||
|
||||
5. **ai_embedding**
|
||||
- FROM: Embeddings OpenAI, Embeddings Google, etc.
|
||||
- TO: Vector Store (Pinecone, In-Memory, etc.)
|
||||
- REQUIRED: For vector-based retrieval
|
||||
|
||||
6. **ai_vectorStore**
|
||||
- FROM: Vector Store node
|
||||
- TO: Vector Store Tool
|
||||
- REQUIRED: For retrieval-augmented generation (RAG)
|
||||
|
||||
7. **ai_document**
|
||||
- FROM: Document Loader, Default Data Loader
|
||||
- TO: Vector Store
|
||||
- REQUIRED: Provides data for vector storage
|
||||
|
||||
8. **ai_textSplitter**
|
||||
- FROM: Text Splitter nodes
|
||||
- TO: Document processing chains
|
||||
- OPTIONAL: Chunk large documents
|
||||
|
||||
### Connection Examples
|
||||
|
||||
\`\`\`typescript
|
||||
// Basic AI Agent setup
|
||||
n8n_update_partial_workflow({
|
||||
id: "workflow_id",
|
||||
operations: [
|
||||
// Connect language model (REQUIRED)
|
||||
{
|
||||
type: "addConnection",
|
||||
source: "OpenAI Chat Model",
|
||||
target: "AI Agent",
|
||||
sourceOutput: "ai_languageModel"
|
||||
},
|
||||
// Connect tools
|
||||
{
|
||||
type: "addConnection",
|
||||
source: "HTTP Request Tool",
|
||||
target: "AI Agent",
|
||||
sourceOutput: "ai_tool"
|
||||
},
|
||||
{
|
||||
type: "addConnection",
|
||||
source: "Code Tool",
|
||||
target: "AI Agent",
|
||||
sourceOutput: "ai_tool"
|
||||
},
|
||||
// Add memory (optional)
|
||||
{
|
||||
type: "addConnection",
|
||||
source: "Window Buffer Memory",
|
||||
target: "AI Agent",
|
||||
sourceOutput: "ai_memory"
|
||||
}
|
||||
]
|
||||
})
|
||||
\`\`\`
|
||||
|
||||
---
|
||||
|
||||
## 3. Building Your First AI Agent {#first-agent}
|
||||
|
||||
### Step-by-Step Tutorial
|
||||
|
||||
#### Step 1: Create Chat Trigger
|
||||
|
||||
Use \`n8n_create_workflow\` or manually create a workflow with:
|
||||
|
||||
\`\`\`typescript
|
||||
{
|
||||
name: "My First AI Agent",
|
||||
nodes: [
|
||||
{
|
||||
id: "chat_trigger",
|
||||
name: "Chat Trigger",
|
||||
type: "@n8n/n8n-nodes-langchain.chatTrigger",
|
||||
position: [100, 100],
|
||||
parameters: {
|
||||
options: {
|
||||
responseMode: "lastNode" // or "streaming" for real-time
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
#### Step 2: Add Language Model
|
||||
|
||||
\`\`\`typescript
|
||||
n8n_update_partial_workflow({
|
||||
id: "workflow_id",
|
||||
operations: [
|
||||
{
|
||||
type: "addNode",
|
||||
node: {
|
||||
name: "OpenAI Chat Model",
|
||||
type: "@n8n/n8n-nodes-langchain.lmChatOpenAi",
|
||||
position: [300, 50],
|
||||
parameters: {
|
||||
model: "gpt-4",
|
||||
temperature: 0.7
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
})
|
||||
\`\`\`
|
||||
|
||||
#### Step 3: Add AI Agent
|
||||
|
||||
\`\`\`typescript
|
||||
n8n_update_partial_workflow({
|
||||
id: "workflow_id",
|
||||
operations: [
|
||||
{
|
||||
type: "addNode",
|
||||
node: {
|
||||
name: "AI Agent",
|
||||
type: "@n8n/n8n-nodes-langchain.agent",
|
||||
position: [300, 150],
|
||||
parameters: {
|
||||
promptType: "auto",
|
||||
systemMessage: "You are a helpful assistant. Be concise and accurate."
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
})
|
||||
\`\`\`
|
||||
|
||||
#### Step 4: Connect Components
|
||||
|
||||
\`\`\`typescript
|
||||
n8n_update_partial_workflow({
|
||||
id: "workflow_id",
|
||||
operations: [
|
||||
// Chat Trigger → AI Agent (main connection)
|
||||
{
|
||||
type: "addConnection",
|
||||
source: "Chat Trigger",
|
||||
target: "AI Agent"
|
||||
},
|
||||
// Language Model → AI Agent (AI connection)
|
||||
{
|
||||
type: "addConnection",
|
||||
source: "OpenAI Chat Model",
|
||||
target: "AI Agent",
|
||||
sourceOutput: "ai_languageModel"
|
||||
}
|
||||
]
|
||||
})
|
||||
\`\`\`
|
||||
|
||||
#### Step 5: Validate
|
||||
|
||||
\`\`\`typescript
|
||||
n8n_validate_workflow({id: "workflow_id"})
|
||||
\`\`\`
|
||||
|
||||
---
|
||||
|
||||
## 4. AI Tools Deep Dive {#tools}
|
||||
|
||||
### Tool Types and When to Use Them
|
||||
|
||||
#### 1. HTTP Request Tool
|
||||
**Use when**: AI needs to call external APIs
|
||||
|
||||
**Critical Requirements**:
|
||||
- \`toolDescription\`: Clear, 15+ character description
|
||||
- \`url\`: API endpoint (can include placeholders)
|
||||
- \`placeholderDefinitions\`: Define all {placeholders}
|
||||
- Proper authentication if needed
|
||||
|
||||
**Example**:
|
||||
\`\`\`typescript
|
||||
{
|
||||
type: "addNode",
|
||||
node: {
|
||||
name: "GitHub Issues Tool",
|
||||
type: "@n8n/n8n-nodes-langchain.toolHttpRequest",
|
||||
position: [500, 100],
|
||||
parameters: {
|
||||
method: "POST",
|
||||
url: "https://api.github.com/repos/{owner}/{repo}/issues",
|
||||
toolDescription: "Create GitHub issues. Requires owner (username), repo (repository name), title, and body.",
|
||||
placeholderDefinitions: {
|
||||
values: [
|
||||
{name: "owner", description: "Repository owner username"},
|
||||
{name: "repo", description: "Repository name"},
|
||||
{name: "title", description: "Issue title"},
|
||||
{name: "body", description: "Issue description"}
|
||||
]
|
||||
},
|
||||
sendBody: true,
|
||||
jsonBody: "={{ { title: $json.title, body: $json.body } }}"
|
||||
}
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
#### 2. Code Tool
|
||||
**Use when**: AI needs to run custom logic
|
||||
|
||||
**Critical Requirements**:
|
||||
- \`name\`: Function name (alphanumeric + underscore)
|
||||
- \`description\`: 10+ character explanation
|
||||
- \`code\`: JavaScript or Python code
|
||||
- \`inputSchema\`: Define expected inputs (recommended)
|
||||
|
||||
**Example**:
|
||||
\`\`\`typescript
|
||||
{
|
||||
type: "addNode",
|
||||
node: {
|
||||
name: "Calculate Shipping",
|
||||
type: "@n8n/n8n-nodes-langchain.toolCode",
|
||||
position: [500, 200],
|
||||
parameters: {
|
||||
name: "calculate_shipping",
|
||||
description: "Calculate shipping cost based on weight (kg) and distance (km)",
|
||||
language: "javaScript",
|
||||
code: "const cost = 5 + ($input.weight * 2) + ($input.distance * 0.1); return { cost };",
|
||||
specifyInputSchema: true,
|
||||
inputSchema: "{ \\"type\\": \\"object\\", \\"properties\\": { \\"weight\\": { \\"type\\": \\"number\\" }, \\"distance\\": { \\"type\\": \\"number\\" } } }"
|
||||
}
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
#### 3. Vector Store Tool
|
||||
**Use when**: AI needs to search knowledge base
|
||||
|
||||
**Setup**: Requires Vector Store + Embeddings + Documents
|
||||
|
||||
**Example**:
|
||||
\`\`\`typescript
|
||||
// Step 1: Create Vector Store with embeddings and documents
|
||||
n8n_update_partial_workflow({
|
||||
operations: [
|
||||
{type: "addConnection", source: "Embeddings OpenAI", target: "Pinecone", sourceOutput: "ai_embedding"},
|
||||
{type: "addConnection", source: "Document Loader", target: "Pinecone", sourceOutput: "ai_document"}
|
||||
]
|
||||
})
|
||||
|
||||
// Step 2: Connect Vector Store to Vector Store Tool
|
||||
n8n_update_partial_workflow({
|
||||
operations: [
|
||||
{type: "addConnection", source: "Pinecone", target: "Vector Store Tool", sourceOutput: "ai_vectorStore"}
|
||||
]
|
||||
})
|
||||
|
||||
// Step 3: Connect tool to AI Agent
|
||||
n8n_update_partial_workflow({
|
||||
operations: [
|
||||
{type: "addConnection", source: "Vector Store Tool", target: "AI Agent", sourceOutput: "ai_tool"}
|
||||
]
|
||||
})
|
||||
\`\`\`
|
||||
|
||||
#### 4. AI Agent Tool (Sub-Agents)
|
||||
**Use when**: Need specialized expertise
|
||||
|
||||
**Example**: Research specialist sub-agent
|
||||
\`\`\`typescript
|
||||
{
|
||||
type: "addNode",
|
||||
node: {
|
||||
name: "Research Specialist",
|
||||
type: "@n8n/n8n-nodes-langchain.agentTool",
|
||||
position: [500, 300],
|
||||
parameters: {
|
||||
name: "research_specialist",
|
||||
description: "Expert researcher that searches multiple sources and synthesizes information. Use for detailed research tasks.",
|
||||
systemMessage: "You are a research specialist. Search thoroughly, cite sources, and provide comprehensive analysis."
|
||||
}
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
#### 5. MCP Client Tool
|
||||
**Use when**: Need to use Model Context Protocol servers
|
||||
|
||||
**Example**: Filesystem access
|
||||
\`\`\`typescript
|
||||
{
|
||||
type: "addNode",
|
||||
node: {
|
||||
name: "Filesystem Tool",
|
||||
type: "@n8n/n8n-nodes-langchain.mcpClientTool",
|
||||
position: [500, 400],
|
||||
parameters: {
|
||||
description: "Access file system to read files, list directories, and search content",
|
||||
mcpServer: {
|
||||
transport: "stdio",
|
||||
command: "npx",
|
||||
args: ["-y", "@modelcontextprotocol/server-filesystem", "/allowed/path"]
|
||||
},
|
||||
tool: "read_file"
|
||||
}
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
---
|
||||
|
||||
## 5. Advanced Patterns {#advanced}
|
||||
|
||||
### Pattern 1: Streaming Responses
|
||||
|
||||
For real-time user experience:
|
||||
|
||||
\`\`\`typescript
|
||||
// Set Chat Trigger to streaming mode
|
||||
{
|
||||
parameters: {
|
||||
options: {
|
||||
responseMode: "streaming"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CRITICAL: AI Agent must NOT have main output connections in streaming mode
|
||||
// Responses stream back through Chat Trigger automatically
|
||||
\`\`\`
|
||||
|
||||
**Validation will fail if**:
|
||||
- Chat Trigger has streaming but target is not AI Agent
|
||||
- AI Agent in streaming mode has main output connections
|
||||
|
||||
### Pattern 2: Fallback Language Models
|
||||
|
||||
For production reliability with fallback language models:
|
||||
|
||||
\`\`\`typescript
|
||||
n8n_update_partial_workflow({
|
||||
operations: [
|
||||
// Primary model
|
||||
{
|
||||
type: "addConnection",
|
||||
source: "OpenAI GPT-4",
|
||||
target: "AI Agent",
|
||||
sourceOutput: "ai_languageModel",
|
||||
targetIndex: 0
|
||||
},
|
||||
// Fallback model
|
||||
{
|
||||
type: "addConnection",
|
||||
source: "Anthropic Claude",
|
||||
target: "AI Agent",
|
||||
sourceOutput: "ai_languageModel",
|
||||
targetIndex: 1
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
// Enable fallback on AI Agent
|
||||
{
|
||||
type: "updateNode",
|
||||
nodeName: "AI Agent",
|
||||
updates: {
|
||||
"parameters.needsFallback": true
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
### Pattern 3: RAG (Retrieval-Augmented Generation)
|
||||
|
||||
Complete knowledge base setup:
|
||||
|
||||
\`\`\`typescript
|
||||
// 1. Load documents
|
||||
{type: "addConnection", source: "PDF Loader", target: "Text Splitter", sourceOutput: "ai_document"}
|
||||
|
||||
// 2. Split and embed
|
||||
{type: "addConnection", source: "Text Splitter", target: "Vector Store"}
|
||||
{type: "addConnection", source: "Embeddings", target: "Vector Store", sourceOutput: "ai_embedding"}
|
||||
|
||||
// 3. Create search tool
|
||||
{type: "addConnection", source: "Vector Store", target: "Vector Store Tool", sourceOutput: "ai_vectorStore"}
|
||||
|
||||
// 4. Give tool to agent
|
||||
{type: "addConnection", source: "Vector Store Tool", target: "AI Agent", sourceOutput: "ai_tool"}
|
||||
\`\`\`
|
||||
|
||||
### Pattern 4: Multi-Agent Systems
|
||||
|
||||
Specialized sub-agents for complex tasks:
|
||||
|
||||
\`\`\`typescript
|
||||
// Create sub-agents with specific expertise
|
||||
[
|
||||
{name: "research_agent", description: "Deep research specialist"},
|
||||
{name: "data_analyst", description: "Data analysis expert"},
|
||||
{name: "writer_agent", description: "Content writing specialist"}
|
||||
].forEach(agent => {
|
||||
// Add as AI Agent Tool to main coordinator agent
|
||||
{
|
||||
type: "addConnection",
|
||||
source: agent.name,
|
||||
target: "Coordinator Agent",
|
||||
sourceOutput: "ai_tool"
|
||||
}
|
||||
})
|
||||
\`\`\`
|
||||
|
||||
---
|
||||
|
||||
## 6. Validation & Best Practices {#validation}
|
||||
|
||||
### Always Validate Before Deployment
|
||||
|
||||
\`\`\`typescript
|
||||
const result = n8n_validate_workflow({id: "workflow_id"})
|
||||
|
||||
if (!result.valid) {
|
||||
console.log("Errors:", result.errors)
|
||||
console.log("Warnings:", result.warnings)
|
||||
console.log("Suggestions:", result.suggestions)
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
### Common Validation Errors
|
||||
|
||||
1. **MISSING_LANGUAGE_MODEL**
|
||||
- Problem: AI Agent has no ai_languageModel connection
|
||||
- Fix: Connect a language model before creating AI Agent
|
||||
|
||||
2. **MISSING_TOOL_DESCRIPTION**
|
||||
- Problem: HTTP Request Tool has no toolDescription
|
||||
- Fix: Add clear description (15+ characters)
|
||||
|
||||
3. **STREAMING_WITH_MAIN_OUTPUT**
|
||||
- Problem: AI Agent in streaming mode has outgoing main connections
|
||||
- Fix: Remove main connections when using streaming
|
||||
|
||||
4. **FALLBACK_MISSING_SECOND_MODEL**
|
||||
- Problem: needsFallback=true but only 1 language model
|
||||
- Fix: Add second language model or disable needsFallback
|
||||
|
||||
### Best Practices Checklist
|
||||
|
||||
✅ **Before Creating AI Agent**:
|
||||
- [ ] Language model is connected first
|
||||
- [ ] At least one tool is prepared (or will be added)
|
||||
- [ ] System message is thoughtful and specific
|
||||
|
||||
✅ **For Each Tool**:
|
||||
- [ ] Has toolDescription/description (15+ characters)
|
||||
- [ ] toolDescription explains WHEN to use the tool
|
||||
- [ ] All required parameters are configured
|
||||
- [ ] Credentials are set up if needed
|
||||
|
||||
✅ **For Production**:
|
||||
- [ ] Workflow validated with n8n_validate_workflow
|
||||
- [ ] Tested with real user queries
|
||||
- [ ] Fallback model configured for reliability
|
||||
- [ ] Error handling in place
|
||||
- [ ] maxIterations set appropriately (default 10, max 50)
|
||||
|
||||
---
|
||||
|
||||
## 7. Troubleshooting {#troubleshooting}
|
||||
|
||||
### Problem: "AI Agent has no language model"
|
||||
|
||||
**Cause**: Connection created AFTER AI Agent or using wrong sourceOutput
|
||||
|
||||
**Solution**:
|
||||
\`\`\`typescript
|
||||
n8n_update_partial_workflow({
|
||||
operations: [
|
||||
{
|
||||
type: "addConnection",
|
||||
source: "OpenAI Chat Model",
|
||||
target: "AI Agent",
|
||||
sourceOutput: "ai_languageModel" // ← CRITICAL
|
||||
}
|
||||
]
|
||||
})
|
||||
\`\`\`
|
||||
|
||||
### Problem: "Tool has no description"
|
||||
|
||||
**Cause**: HTTP Request Tool or Code Tool missing toolDescription/description
|
||||
|
||||
**Solution**:
|
||||
\`\`\`typescript
|
||||
{
|
||||
type: "updateNode",
|
||||
nodeName: "HTTP Request Tool",
|
||||
updates: {
|
||||
"parameters.toolDescription": "Call weather API to get current conditions for a city"
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
### Problem: "Streaming mode not working"
|
||||
|
||||
**Causes**:
|
||||
1. Chat Trigger not set to streaming
|
||||
2. AI Agent has main output connections
|
||||
3. Target of Chat Trigger is not AI Agent
|
||||
|
||||
**Solution**:
|
||||
\`\`\`typescript
|
||||
// 1. Set Chat Trigger to streaming
|
||||
{
|
||||
type: "updateNode",
|
||||
nodeName: "Chat Trigger",
|
||||
updates: {
|
||||
"parameters.options.responseMode": "streaming"
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Remove AI Agent main outputs
|
||||
{
|
||||
type: "removeConnection",
|
||||
source: "AI Agent",
|
||||
target: "Any Output Node"
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
### Problem: "Agent keeps looping"
|
||||
|
||||
**Cause**: Tool not returning proper response or agent stuck in reasoning loop
|
||||
|
||||
**Solutions**:
|
||||
1. Set maxIterations lower: \`"parameters.maxIterations": 5\`
|
||||
2. Improve tool descriptions to be more specific
|
||||
3. Add system message guidance: "Use tools efficiently, don't repeat actions"
|
||||
|
||||
---
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Essential Tools
|
||||
|
||||
| Tool | Purpose | Key Parameters |
|
||||
|------|---------|----------------|
|
||||
| HTTP Request Tool | API calls | toolDescription, url, placeholders |
|
||||
| Code Tool | Custom logic | name, description, code, inputSchema |
|
||||
| Vector Store Tool | Knowledge search | description, topK |
|
||||
| AI Agent Tool | Sub-agents | name, description, systemMessage |
|
||||
| MCP Client Tool | MCP protocol | description, mcpServer, tool |
|
||||
|
||||
### Connection Quick Codes
|
||||
|
||||
\`\`\`typescript
|
||||
// Language Model → AI Agent
|
||||
sourceOutput: "ai_languageModel"
|
||||
|
||||
// Tool → AI Agent
|
||||
sourceOutput: "ai_tool"
|
||||
|
||||
// Memory → AI Agent
|
||||
sourceOutput: "ai_memory"
|
||||
|
||||
// Parser → AI Agent
|
||||
sourceOutput: "ai_outputParser"
|
||||
|
||||
// Embeddings → Vector Store
|
||||
sourceOutput: "ai_embedding"
|
||||
|
||||
// Vector Store → Vector Store Tool
|
||||
sourceOutput: "ai_vectorStore"
|
||||
\`\`\`
|
||||
|
||||
### Validation Command
|
||||
|
||||
\`\`\`typescript
|
||||
n8n_validate_workflow({id: "workflow_id"})
|
||||
\`\`\`
|
||||
|
||||
---
|
||||
|
||||
## Related Resources
|
||||
|
||||
- **FINAL_AI_VALIDATION_SPEC.md**: Complete validation rules
|
||||
- **n8n_update_partial_workflow**: Workflow modification tool
|
||||
- **search_nodes({query: "AI", includeExamples: true})**: Find AI nodes with examples
|
||||
- **get_node_essentials({nodeType: "...", includeExamples: true})**: Node details with examples
|
||||
|
||||
---
|
||||
|
||||
*This guide is part of the n8n-mcp documentation system. For questions or issues, refer to the validation spec or use tools_documentation() for specific topics.*`,
|
||||
parameters: {},
|
||||
returns: 'Complete AI Agents guide with architecture, patterns, validation, and troubleshooting',
|
||||
examples: [
|
||||
'tools_documentation({topic: "ai_agents_guide"}) - Full guide',
|
||||
'tools_documentation({topic: "ai_agents_guide", depth: "essentials"}) - Quick reference',
|
||||
'When user asks about AI Agents, Chat Trigger, or building AI workflows → Point to this guide'
|
||||
],
|
||||
useCases: [
|
||||
'Learning AI Agent architecture in n8n',
|
||||
'Understanding AI connection types and patterns',
|
||||
'Building first AI Agent workflow step-by-step',
|
||||
'Implementing advanced patterns (streaming, fallback, RAG, multi-agent)',
|
||||
'Troubleshooting AI workflow issues',
|
||||
'Validating AI workflows before deployment',
|
||||
'Quick reference for connection types and tools'
|
||||
],
|
||||
performance: 'N/A - Static documentation',
|
||||
bestPractices: [
|
||||
'Reference this guide when users ask about AI Agents',
|
||||
'Point to specific sections based on user needs',
|
||||
'Combine with search_nodes(includeExamples=true) for working examples',
|
||||
'Validate workflows after following guide instructions',
|
||||
'Use FINAL_AI_VALIDATION_SPEC.md for detailed requirements'
|
||||
],
|
||||
pitfalls: [
|
||||
'This is a guide, not an executable tool',
|
||||
'Always validate workflows after making changes',
|
||||
'AI connections require sourceOutput parameter',
|
||||
'Streaming mode has specific constraints',
|
||||
'Fallback models require AI Agent node with fallback support'
|
||||
],
|
||||
relatedTools: [
|
||||
'n8n_create_workflow',
|
||||
'n8n_update_partial_workflow',
|
||||
'n8n_validate_workflow',
|
||||
'search_nodes',
|
||||
'get_node_essentials',
|
||||
'list_ai_tools'
|
||||
]
|
||||
}
|
||||
};
|
||||
2
src/mcp/tool-docs/guides/index.ts
Normal file
2
src/mcp/tool-docs/guides/index.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
// Export all guides
|
||||
export { aiAgentsGuide } from './ai-agents-guide';
|
||||
@@ -25,12 +25,15 @@ import {
|
||||
searchTemplatesByMetadataDoc,
|
||||
getTemplatesForTaskDoc
|
||||
} from './templates';
|
||||
import {
|
||||
import {
|
||||
toolsDocumentationDoc,
|
||||
n8nDiagnosticDoc,
|
||||
n8nHealthCheckDoc,
|
||||
n8nListAvailableToolsDoc
|
||||
} from './system';
|
||||
import {
|
||||
aiAgentsGuide
|
||||
} from './guides';
|
||||
import {
|
||||
n8nCreateWorkflowDoc,
|
||||
n8nGetWorkflowDoc,
|
||||
@@ -56,7 +59,10 @@ export const toolsDocumentation: Record<string, ToolDocumentation> = {
|
||||
n8n_diagnostic: n8nDiagnosticDoc,
|
||||
n8n_health_check: n8nHealthCheckDoc,
|
||||
n8n_list_available_tools: n8nListAvailableToolsDoc,
|
||||
|
||||
|
||||
// Guides
|
||||
ai_agents_guide: aiAgentsGuide,
|
||||
|
||||
// Discovery tools
|
||||
search_nodes: searchNodesDoc,
|
||||
list_nodes: listNodesDoc,
|
||||
|
||||
@@ -4,14 +4,16 @@ export const n8nDiagnosticDoc: ToolDocumentation = {
|
||||
name: 'n8n_diagnostic',
|
||||
category: 'system',
|
||||
essentials: {
|
||||
description: 'Diagnose n8n API configuration and troubleshoot why n8n management tools might not be working',
|
||||
description: 'Comprehensive diagnostic with environment-aware debugging, version checks, performance metrics, and mode-specific troubleshooting',
|
||||
keyParameters: ['verbose'],
|
||||
example: 'n8n_diagnostic({verbose: true})',
|
||||
performance: 'Instant - checks environment and configuration only',
|
||||
performance: 'Fast - checks environment, API, and npm version (~180ms median)',
|
||||
tips: [
|
||||
'Run first when n8n tools are missing or failing - shows exact configuration issues',
|
||||
'Use verbose=true for detailed debugging info including environment variables',
|
||||
'If tools are missing, check that N8N_API_URL and N8N_API_KEY are configured'
|
||||
'Now includes environment-aware debugging based on MCP_MODE (http/stdio)',
|
||||
'Provides mode-specific troubleshooting (HTTP server vs Claude Desktop)',
|
||||
'Detects Docker and cloud platforms for targeted guidance',
|
||||
'Shows performance metrics: response time and cache statistics',
|
||||
'Includes data-driven tips based on 82% user success rate'
|
||||
]
|
||||
},
|
||||
full: {
|
||||
@@ -35,15 +37,31 @@ The diagnostic is essential when:
|
||||
default: false
|
||||
}
|
||||
},
|
||||
returns: `Diagnostic report object containing:
|
||||
- status: Overall health status ('ok', 'error', 'not_configured')
|
||||
- apiUrl: Detected API URL (or null if not configured)
|
||||
- apiKeyStatus: Status of API key ('configured', 'missing', 'invalid')
|
||||
- toolsAvailable: Number of n8n management tools available
|
||||
- connectivity: API connectivity test results
|
||||
- errors: Array of specific error messages
|
||||
- suggestions: Array of actionable fix suggestions
|
||||
- verbose: Additional debug information (if verbose=true)`,
|
||||
returns: `Comprehensive diagnostic report containing:
|
||||
- timestamp: ISO timestamp of diagnostic run
|
||||
- environment: Enhanced environment variables
|
||||
- N8N_API_URL, N8N_API_KEY (masked), NODE_ENV, MCP_MODE
|
||||
- isDocker: Boolean indicating if running in Docker
|
||||
- cloudPlatform: Detected cloud platform (railway/render/fly/etc.) or null
|
||||
- nodeVersion: Node.js version
|
||||
- platform: OS platform (darwin/win32/linux)
|
||||
- apiConfiguration: API configuration and connectivity status
|
||||
- configured, status (connected/error/version), config details
|
||||
- versionInfo: Version check results (current, latest, upToDate, message, updateCommand)
|
||||
- toolsAvailability: Tool availability breakdown (doc tools + management tools)
|
||||
- performance: Performance metrics (responseTimeMs, cacheHitRate, cachedInstances)
|
||||
- modeSpecificDebug: Mode-specific debugging (ALWAYS PRESENT)
|
||||
- HTTP mode: port, authTokenConfigured, serverUrl, healthCheckUrl, troubleshooting steps, commonIssues
|
||||
- stdio mode: configLocation, troubleshooting steps, commonIssues
|
||||
- dockerDebug: Docker-specific guidance (if IS_DOCKER=true)
|
||||
- containerDetected, troubleshooting steps, commonIssues
|
||||
- cloudPlatformDebug: Cloud platform-specific tips (if platform detected)
|
||||
- name, troubleshooting steps tailored to platform (Railway/Render/Fly/K8s/AWS/etc.)
|
||||
- nextSteps: Context-specific guidance (if API connected)
|
||||
- troubleshooting: Troubleshooting guidance (if API not connecting)
|
||||
- setupGuide: Setup guidance (if API not configured)
|
||||
- updateWarning: Update recommendation (if version outdated)
|
||||
- debug: Verbose debug information (if verbose=true)`,
|
||||
examples: [
|
||||
'n8n_diagnostic({}) - Quick diagnostic check',
|
||||
'n8n_diagnostic({verbose: true}) - Detailed diagnostic with environment info',
|
||||
|
||||
@@ -4,14 +4,15 @@ export const n8nHealthCheckDoc: ToolDocumentation = {
|
||||
name: 'n8n_health_check',
|
||||
category: 'system',
|
||||
essentials: {
|
||||
description: 'Check n8n instance health, API connectivity, and available features',
|
||||
description: 'Check n8n instance health, API connectivity, version status, and performance metrics',
|
||||
keyParameters: [],
|
||||
example: 'n8n_health_check({})',
|
||||
performance: 'Fast - single API call to health endpoint',
|
||||
performance: 'Fast - single API call (~150-200ms median)',
|
||||
tips: [
|
||||
'Use before starting workflow operations to ensure n8n is responsive',
|
||||
'Check regularly in production environments for monitoring',
|
||||
'Returns version info and feature availability for compatibility checks'
|
||||
'Automatically checks if n8n-mcp version is outdated',
|
||||
'Returns version info, performance metrics, and next-step recommendations',
|
||||
'New: Shows cache hit rate and response time for performance monitoring'
|
||||
]
|
||||
},
|
||||
full: {
|
||||
@@ -33,17 +34,27 @@ Health checks are crucial for:
|
||||
parameters: {},
|
||||
returns: `Health status object containing:
|
||||
- status: Overall health status ('healthy', 'degraded', 'error')
|
||||
- version: n8n instance version information
|
||||
- n8nVersion: n8n instance version information
|
||||
- instanceId: Unique identifier for the n8n instance
|
||||
- features: Object listing available features and their status
|
||||
- apiVersion: API version for compatibility checking
|
||||
- responseTime: API response time in milliseconds
|
||||
- timestamp: Check timestamp
|
||||
- details: Additional health metrics from n8n`,
|
||||
- mcpVersion: Current n8n-mcp version
|
||||
- supportedN8nVersion: Recommended n8n version for compatibility
|
||||
- versionCheck: Version status information
|
||||
- current: Current n8n-mcp version
|
||||
- latest: Latest available version from npm
|
||||
- upToDate: Boolean indicating if version is current
|
||||
- message: Formatted version status message
|
||||
- updateCommand: Command to update (if outdated)
|
||||
- performance: Performance metrics
|
||||
- responseTimeMs: API response time in milliseconds
|
||||
- cacheHitRate: Cache efficiency percentage
|
||||
- cachedInstances: Number of cached API instances
|
||||
- nextSteps: Recommended actions after health check
|
||||
- updateWarning: Warning if version is outdated (if applicable)`,
|
||||
examples: [
|
||||
'n8n_health_check({}) - Standard health check',
|
||||
'// Use in monitoring scripts\nconst health = await n8n_health_check({});\nif (health.status !== "healthy") alert("n8n is down!");',
|
||||
'// Check before critical operations\nconst health = await n8n_health_check({});\nif (health.responseTime > 1000) console.warn("n8n is slow");'
|
||||
'n8n_health_check({}) - Complete health check with version and performance data',
|
||||
'// Use in monitoring scripts\nconst health = await n8n_health_check({});\nif (health.status !== "ok") alert("n8n is down!");\nif (!health.versionCheck.upToDate) console.log("Update available:", health.versionCheck.updateCommand);',
|
||||
'// Check before critical operations\nconst health = await n8n_health_check({});\nif (health.performance.responseTimeMs > 1000) console.warn("n8n is slow");\nif (health.versionCheck.isOutdated) console.log(health.updateWarning);'
|
||||
],
|
||||
useCases: [
|
||||
'Pre-flight checks before workflow deployments',
|
||||
|
||||
@@ -11,7 +11,8 @@ export const validateNodeOperationDoc: ToolDocumentation = {
|
||||
tips: [
|
||||
'Profile choices: minimal (editing), runtime (execution), ai-friendly (balanced), strict (deployment)',
|
||||
'Returns fixes you can apply directly',
|
||||
'Operation-aware - knows Slack post needs text'
|
||||
'Operation-aware - knows Slack post needs text',
|
||||
'Validates operator structures for IF and Switch nodes with conditions'
|
||||
]
|
||||
},
|
||||
full: {
|
||||
@@ -71,7 +72,9 @@ export const validateNodeOperationDoc: ToolDocumentation = {
|
||||
'Validate configuration before workflow execution',
|
||||
'Debug why a node isn\'t working as expected',
|
||||
'Generate configuration fixes automatically',
|
||||
'Different validation for editing vs production'
|
||||
'Different validation for editing vs production',
|
||||
'Check IF/Switch operator structures (binary vs unary operators)',
|
||||
'Validate conditions.options metadata for filter-based nodes'
|
||||
],
|
||||
performance: '<100ms for most nodes, <200ms for complex nodes with many conditions',
|
||||
bestPractices: [
|
||||
@@ -85,7 +88,10 @@ export const validateNodeOperationDoc: ToolDocumentation = {
|
||||
pitfalls: [
|
||||
'Must include operation fields for multi-operation nodes',
|
||||
'Fixes are suggestions - review before applying',
|
||||
'Profile affects what\'s validated - minimal skips many checks'
|
||||
'Profile affects what\'s validated - minimal skips many checks',
|
||||
'**Binary vs Unary operators**: Binary operators (equals, contains, greaterThan) must NOT have singleValue:true. Unary operators (isEmpty, isNotEmpty, true, false) REQUIRE singleValue:true',
|
||||
'**IF and Switch nodes with conditions**: Must have complete conditions.options structure: {version: 2, leftValue: "", caseSensitive: true/false, typeValidation: "strict"}',
|
||||
'**Operator type field**: Must be data type (string/number/boolean/dateTime/array/object), NOT operation name (e.g., use type:"string" operation:"equals", not type:"equals")'
|
||||
],
|
||||
relatedTools: ['validate_node_minimal for quick checks', 'get_node_essentials for valid examples', 'validate_workflow for complete workflow validation']
|
||||
}
|
||||
|
||||
@@ -11,7 +11,8 @@ export const validateWorkflowDoc: ToolDocumentation = {
|
||||
tips: [
|
||||
'Always validate before n8n_create_workflow to catch errors early',
|
||||
'Use options.profile="minimal" for quick checks during development',
|
||||
'AI tool connections are automatically validated for proper node references'
|
||||
'AI tool connections are automatically validated for proper node references',
|
||||
'Detects operator structure issues (binary vs unary, singleValue requirements)'
|
||||
]
|
||||
},
|
||||
full: {
|
||||
@@ -67,7 +68,9 @@ export const validateWorkflowDoc: ToolDocumentation = {
|
||||
'Use minimal profile during development, strict profile before production',
|
||||
'Pay attention to warnings - they often indicate potential runtime issues',
|
||||
'Validate after any workflow modifications, especially connection changes',
|
||||
'Check statistics to understand workflow complexity'
|
||||
'Check statistics to understand workflow complexity',
|
||||
'**Auto-sanitization runs during create/update**: Operator structures and missing metadata are automatically fixed when workflows are created or updated, but validation helps catch issues before they reach n8n',
|
||||
'If validation detects operator issues, they will be auto-fixed during n8n_create_workflow or n8n_update_partial_workflow'
|
||||
],
|
||||
pitfalls: [
|
||||
'Large workflows (100+ nodes) may take longer to validate',
|
||||
|
||||
@@ -4,15 +4,17 @@ export const n8nAutofixWorkflowDoc: ToolDocumentation = {
|
||||
name: 'n8n_autofix_workflow',
|
||||
category: 'workflow_management',
|
||||
essentials: {
|
||||
description: 'Automatically fix common workflow validation errors - expression formats, typeVersions, error outputs, webhook paths',
|
||||
description: 'Automatically fix common workflow validation errors - expression formats, typeVersions, error outputs, webhook paths, and smart version upgrades',
|
||||
keyParameters: ['id', 'applyFixes'],
|
||||
example: 'n8n_autofix_workflow({id: "wf_abc123", applyFixes: false})',
|
||||
performance: 'Network-dependent (200-1000ms) - fetches, validates, and optionally updates workflow',
|
||||
performance: 'Network-dependent (200-1500ms) - fetches, validates, and optionally updates workflow with smart migrations',
|
||||
tips: [
|
||||
'Use applyFixes: false to preview changes before applying',
|
||||
'Set confidenceThreshold to control fix aggressiveness (high/medium/low)',
|
||||
'Supports fixing expression formats, typeVersion issues, error outputs, node type corrections, and webhook paths',
|
||||
'High-confidence fixes (≥90%) are safe for auto-application'
|
||||
'Supports expression formats, typeVersion issues, error outputs, node corrections, webhook paths, AND version upgrades',
|
||||
'High-confidence fixes (≥90%) are safe for auto-application',
|
||||
'Version upgrades include smart migration with breaking change detection',
|
||||
'Post-update guidance provides AI-friendly step-by-step instructions for manual changes'
|
||||
]
|
||||
},
|
||||
full: {
|
||||
@@ -39,6 +41,20 @@ The auto-fixer can resolve:
|
||||
- Sets both 'path' parameter and 'webhookId' field to the same UUID
|
||||
- Ensures webhook nodes become functional with valid endpoints
|
||||
- High confidence fix as UUID generation is deterministic
|
||||
6. **Smart Version Upgrades** (NEW): Proactively upgrades nodes to their latest versions:
|
||||
- Detects outdated node versions and recommends upgrades
|
||||
- Applies smart migrations with auto-migratable property changes
|
||||
- Handles breaking changes intelligently (Execute Workflow v1.0→v1.1, Webhook v2.0→v2.1, etc.)
|
||||
- Generates UUIDs for required fields (webhookId), sets sensible defaults
|
||||
- HIGH confidence for non-breaking upgrades, MEDIUM for breaking changes with auto-migration
|
||||
- Example: Execute Workflow v1.0→v1.1 adds inputFieldMapping automatically
|
||||
7. **Version Migration Guidance** (NEW): Documents complex migrations requiring manual intervention:
|
||||
- Identifies breaking changes that cannot be auto-migrated
|
||||
- Provides AI-friendly post-update guidance with step-by-step instructions
|
||||
- Lists required actions by priority (CRITICAL, HIGH, MEDIUM, LOW)
|
||||
- Documents behavior changes and their impact
|
||||
- Estimates time required for manual migration steps
|
||||
- MEDIUM/LOW confidence - requires review before applying
|
||||
|
||||
The tool uses a confidence-based system to ensure safe fixes:
|
||||
- **High (≥90%)**: Safe to auto-apply (exact matches, known patterns)
|
||||
@@ -60,7 +76,7 @@ Requires N8N_API_URL and N8N_API_KEY environment variables to be configured.`,
|
||||
fixTypes: {
|
||||
type: 'array',
|
||||
required: false,
|
||||
description: 'Types of fixes to apply. Options: ["expression-format", "typeversion-correction", "error-output-config", "node-type-correction", "webhook-missing-path"]. Default: all types.'
|
||||
description: 'Types of fixes to apply. Options: ["expression-format", "typeversion-correction", "error-output-config", "node-type-correction", "webhook-missing-path", "typeversion-upgrade", "version-migration"]. Default: all types. NEW: "typeversion-upgrade" for smart version upgrades, "version-migration" for complex migration guidance.'
|
||||
},
|
||||
confidenceThreshold: {
|
||||
type: 'string',
|
||||
@@ -78,13 +94,21 @@ Requires N8N_API_URL and N8N_API_KEY environment variables to be configured.`,
|
||||
- fixes: Detailed list of individual fixes with before/after values
|
||||
- summary: Human-readable summary of fixes
|
||||
- stats: Statistics by fix type and confidence level
|
||||
- applied: Boolean indicating if fixes were applied (when applyFixes: true)`,
|
||||
- applied: Boolean indicating if fixes were applied (when applyFixes: true)
|
||||
- postUpdateGuidance: (NEW) Array of AI-friendly migration guidance for version upgrades, including:
|
||||
* Required actions by priority (CRITICAL, HIGH, MEDIUM, LOW)
|
||||
* Deprecated properties to remove
|
||||
* Behavior changes and their impact
|
||||
* Step-by-step migration instructions
|
||||
* Estimated time for manual changes`,
|
||||
examples: [
|
||||
'n8n_autofix_workflow({id: "wf_abc123"}) - Preview all possible fixes',
|
||||
'n8n_autofix_workflow({id: "wf_abc123"}) - Preview all possible fixes including version upgrades',
|
||||
'n8n_autofix_workflow({id: "wf_abc123", applyFixes: true}) - Apply all medium+ confidence fixes',
|
||||
'n8n_autofix_workflow({id: "wf_abc123", applyFixes: true, confidenceThreshold: "high"}) - Only apply high-confidence fixes',
|
||||
'n8n_autofix_workflow({id: "wf_abc123", fixTypes: ["expression-format"]}) - Only fix expression format issues',
|
||||
'n8n_autofix_workflow({id: "wf_abc123", fixTypes: ["webhook-missing-path"]}) - Only fix webhook path issues',
|
||||
'n8n_autofix_workflow({id: "wf_abc123", fixTypes: ["typeversion-upgrade"]}) - NEW: Only upgrade node versions with smart migrations',
|
||||
'n8n_autofix_workflow({id: "wf_abc123", fixTypes: ["typeversion-upgrade", "version-migration"]}) - NEW: Upgrade versions and provide migration guidance',
|
||||
'n8n_autofix_workflow({id: "wf_abc123", applyFixes: true, maxFixes: 10}) - Apply up to 10 fixes'
|
||||
],
|
||||
useCases: [
|
||||
@@ -94,16 +118,23 @@ Requires N8N_API_URL and N8N_API_KEY environment variables to be configured.`,
|
||||
'Cleaning up workflows before production deployment',
|
||||
'Batch fixing common issues across multiple workflows',
|
||||
'Migrating workflows between n8n instances with different versions',
|
||||
'Repairing webhook nodes that lost their path configuration'
|
||||
'Repairing webhook nodes that lost their path configuration',
|
||||
'Upgrading Execute Workflow nodes from v1.0 to v1.1+ with automatic inputFieldMapping',
|
||||
'Modernizing webhook nodes to v2.1+ with stable webhookId fields',
|
||||
'Proactively keeping workflows up-to-date with latest node versions',
|
||||
'Getting detailed migration guidance for complex breaking changes'
|
||||
],
|
||||
performance: 'Depends on workflow size and number of issues. Preview mode: 200-500ms. Apply mode: 500-1000ms for medium workflows. Node similarity matching is cached for 5 minutes for improved performance on repeated validations.',
|
||||
performance: 'Depends on workflow size and number of issues. Preview mode: 200-500ms. Apply mode: 500-1500ms for medium workflows with version upgrades. Node similarity matching and version metadata are cached for 5 minutes for improved performance on repeated validations.',
|
||||
bestPractices: [
|
||||
'Always preview fixes first (applyFixes: false) before applying',
|
||||
'Start with high confidence threshold for production workflows',
|
||||
'Review the fix summary to understand what changed',
|
||||
'Test workflows after auto-fixing to ensure expected behavior',
|
||||
'Use fixTypes parameter to target specific issue categories',
|
||||
'Keep maxFixes reasonable to avoid too many changes at once'
|
||||
'Keep maxFixes reasonable to avoid too many changes at once',
|
||||
'NEW: Review postUpdateGuidance for version upgrades - contains step-by-step migration instructions',
|
||||
'NEW: Test workflows after version upgrades - behavior may change even with successful auto-migration',
|
||||
'NEW: Apply version upgrades incrementally - start with high-confidence, non-breaking upgrades'
|
||||
],
|
||||
pitfalls: [
|
||||
'Some fixes may change workflow behavior - always test after fixing',
|
||||
@@ -112,7 +143,12 @@ Requires N8N_API_URL and N8N_API_KEY environment variables to be configured.`,
|
||||
'Node type corrections only work for known node types in the database',
|
||||
'Cannot fix structural issues like missing nodes or invalid connections',
|
||||
'TypeVersion downgrades might remove node features added in newer versions',
|
||||
'Generated webhook paths are new UUIDs - existing webhook URLs will change'
|
||||
'Generated webhook paths are new UUIDs - existing webhook URLs will change',
|
||||
'NEW: Version upgrades may introduce breaking changes - review postUpdateGuidance carefully',
|
||||
'NEW: Auto-migrated properties use sensible defaults which may not match your use case',
|
||||
'NEW: Execute Workflow v1.1+ requires explicit inputFieldMapping - automatic mapping uses empty array',
|
||||
'NEW: Some breaking changes cannot be auto-migrated and require manual intervention',
|
||||
'NEW: Version history is based on registry - unknown nodes cannot be upgraded'
|
||||
],
|
||||
relatedTools: [
|
||||
'n8n_validate_workflow',
|
||||
|
||||
@@ -11,7 +11,8 @@ export const n8nCreateWorkflowDoc: ToolDocumentation = {
|
||||
tips: [
|
||||
'Workflow created inactive',
|
||||
'Returns ID for future updates',
|
||||
'Validate first with validate_workflow'
|
||||
'Validate first with validate_workflow',
|
||||
'Auto-sanitization fixes operator structures and missing metadata during creation'
|
||||
]
|
||||
},
|
||||
full: {
|
||||
@@ -90,7 +91,9 @@ n8n_create_workflow({
|
||||
'Workflows created in INACTIVE state - must activate separately',
|
||||
'Node IDs must be unique within workflow',
|
||||
'Credentials must be configured separately in n8n',
|
||||
'Node type names must include package prefix (e.g., "n8n-nodes-base.slack")'
|
||||
'Node type names must include package prefix (e.g., "n8n-nodes-base.slack")',
|
||||
'**Auto-sanitization runs on creation**: All nodes sanitized before workflow created (operator structures fixed, missing metadata added)',
|
||||
'**Auto-sanitization cannot prevent all failures**: Broken connections or invalid node configurations may still cause creation to fail'
|
||||
],
|
||||
relatedTools: ['validate_workflow', 'n8n_update_partial_workflow', 'n8n_trigger_webhook_workflow']
|
||||
}
|
||||
|
||||
@@ -4,19 +4,26 @@ export const n8nUpdatePartialWorkflowDoc: ToolDocumentation = {
|
||||
name: 'n8n_update_partial_workflow',
|
||||
category: 'workflow_management',
|
||||
essentials: {
|
||||
description: 'Update workflow incrementally with diff operations. Types: addNode, removeNode, updateNode, moveNode, enable/disableNode, addConnection, removeConnection, cleanStaleConnections, replaceConnections, updateSettings, updateName, add/removeTag.',
|
||||
description: 'Update workflow incrementally with diff operations. Types: addNode, removeNode, updateNode, moveNode, enable/disableNode, addConnection, removeConnection, rewireConnection, cleanStaleConnections, replaceConnections, updateSettings, updateName, add/removeTag. Supports smart parameters (branch, case) for multi-output nodes. Full support for AI connections (ai_languageModel, ai_tool, ai_memory, ai_embedding, ai_vectorStore, ai_document, ai_textSplitter, ai_outputParser).',
|
||||
keyParameters: ['id', 'operations', 'continueOnError'],
|
||||
example: 'n8n_update_partial_workflow({id: "wf_123", operations: [{type: "cleanStaleConnections"}]})',
|
||||
example: 'n8n_update_partial_workflow({id: "wf_123", operations: [{type: "rewireConnection", source: "IF", from: "Old", to: "New", branch: "true"}]})',
|
||||
performance: 'Fast (50-200ms)',
|
||||
tips: [
|
||||
'Use rewireConnection to change connection targets',
|
||||
'Use branch="true"/"false" for IF nodes',
|
||||
'Use case=N for Switch nodes',
|
||||
'Use cleanStaleConnections to auto-remove broken connections',
|
||||
'Set ignoreErrors:true on removeConnection for cleanup',
|
||||
'Use continueOnError mode for best-effort bulk operations',
|
||||
'Validate with validateOnly first'
|
||||
'Validate with validateOnly first',
|
||||
'For AI connections, specify sourceOutput type (ai_languageModel, ai_tool, etc.)',
|
||||
'Batch AI component connections for atomic updates',
|
||||
'Auto-sanitization: ALL nodes auto-fixed during updates (operator structures, missing metadata)',
|
||||
'Node renames automatically update all connection references - no manual connection operations needed'
|
||||
]
|
||||
},
|
||||
full: {
|
||||
description: `Updates workflows using surgical diff operations instead of full replacement. Supports 15 operation types for precise modifications. Operations are validated and applied atomically by default - all succeed or none are applied. v2.14.4 adds cleanup operations and best-effort mode for workflow recovery scenarios.
|
||||
description: `Updates workflows using surgical diff operations instead of full replacement. Supports 15 operation types for precise modifications. Operations are validated and applied atomically by default - all succeed or none are applied.
|
||||
|
||||
## Available Operations:
|
||||
|
||||
@@ -29,11 +36,11 @@ export const n8nUpdatePartialWorkflowDoc: ToolDocumentation = {
|
||||
- **disableNode**: Disable an active node
|
||||
|
||||
### Connection Operations (5 types):
|
||||
- **addConnection**: Connect nodes (source→target)
|
||||
- **addConnection**: Connect nodes (source→target). Supports smart parameters: branch="true"/"false" for IF nodes, case=N for Switch nodes.
|
||||
- **removeConnection**: Remove connection between nodes (supports ignoreErrors flag)
|
||||
- **updateConnection**: Modify connection properties
|
||||
- **cleanStaleConnections**: Auto-remove all connections referencing non-existent nodes (NEW in v2.14.4)
|
||||
- **replaceConnections**: Replace entire connections object (NEW in v2.14.4)
|
||||
- **rewireConnection**: Change connection target from one node to another. Supports smart parameters.
|
||||
- **cleanStaleConnections**: Auto-remove all connections referencing non-existent nodes
|
||||
- **replaceConnections**: Replace entire connections object
|
||||
|
||||
### Metadata Operations (4 types):
|
||||
- **updateSettings**: Modify workflow settings
|
||||
@@ -41,7 +48,50 @@ export const n8nUpdatePartialWorkflowDoc: ToolDocumentation = {
|
||||
- **addTag**: Add a workflow tag
|
||||
- **removeTag**: Remove a workflow tag
|
||||
|
||||
## New in v2.14.4: Cleanup & Recovery Features
|
||||
## Smart Parameters for Multi-Output Nodes
|
||||
|
||||
For **IF nodes**, use semantic 'branch' parameter instead of technical sourceIndex:
|
||||
- **branch="true"**: Routes to true branch (sourceIndex=0)
|
||||
- **branch="false"**: Routes to false branch (sourceIndex=1)
|
||||
|
||||
For **Switch nodes**, use semantic 'case' parameter:
|
||||
- **case=0**: First output
|
||||
- **case=1**: Second output
|
||||
- **case=N**: Nth output
|
||||
|
||||
Works with addConnection and rewireConnection operations. Explicit sourceIndex overrides smart parameters.
|
||||
|
||||
## AI Connection Support
|
||||
|
||||
Full support for all 8 AI connection types used in n8n AI workflows:
|
||||
|
||||
**Connection Types**:
|
||||
- **ai_languageModel**: Connect language models (OpenAI, Anthropic, Google Gemini) to AI Agents
|
||||
- **ai_tool**: Connect tools (HTTP Request Tool, Code Tool, etc.) to AI Agents
|
||||
- **ai_memory**: Connect memory systems (Window Buffer, Conversation Summary) to AI Agents
|
||||
- **ai_outputParser**: Connect output parsers (Structured, JSON) to AI Agents
|
||||
- **ai_embedding**: Connect embedding models to Vector Stores
|
||||
- **ai_vectorStore**: Connect vector stores to Vector Store Tools
|
||||
- **ai_document**: Connect document loaders to Vector Stores
|
||||
- **ai_textSplitter**: Connect text splitters to document processing chains
|
||||
|
||||
**AI Connection Examples**:
|
||||
- Single connection: \`{type: "addConnection", source: "OpenAI", target: "AI Agent", sourceOutput: "ai_languageModel"}\`
|
||||
- Fallback model: Use targetIndex (0=primary, 1=fallback) for dual language model setup
|
||||
- Multiple tools: Batch multiple \`sourceOutput: "ai_tool"\` connections to one AI Agent
|
||||
- Vector retrieval: Chain ai_embedding → ai_vectorStore → ai_tool → AI Agent
|
||||
|
||||
**Important Notes**:
|
||||
- **AI nodes do NOT require main connections**: Nodes like OpenAI Chat Model, Postgres Chat Memory, Embeddings OpenAI, and Supabase Vector Store use AI-specific connection types exclusively. They should ONLY have connections like \`ai_languageModel\`, \`ai_memory\`, \`ai_embedding\`, or \`ai_tool\` - NOT \`main\` connections.
|
||||
- **Fixed in v2.21.1**: Validation now correctly recognizes AI nodes that only have AI-specific connections without requiring \`main\` connections (resolves issue #357).
|
||||
|
||||
**Best Practices**:
|
||||
- Always specify \`sourceOutput\` for AI connections (defaults to "main" if omitted)
|
||||
- Connect language model BEFORE creating/enabling AI Agent (validation requirement)
|
||||
- Use atomic mode (default) when setting up AI workflows to ensure complete configuration
|
||||
- Validate AI workflows after changes with \`n8n_validate_workflow\` tool
|
||||
|
||||
## Cleanup & Recovery Features
|
||||
|
||||
### Automatic Cleanup
|
||||
The **cleanStaleConnections** operation automatically removes broken connection references after node renames/deletions. Essential for workflow recovery.
|
||||
@@ -50,7 +100,201 @@ The **cleanStaleConnections** operation automatically removes broken connection
|
||||
Set **continueOnError: true** to apply valid operations even if some fail. Returns detailed results showing which operations succeeded/failed. Perfect for bulk cleanup operations.
|
||||
|
||||
### Graceful Error Handling
|
||||
Add **ignoreErrors: true** to removeConnection operations to prevent failures when connections don't exist.`,
|
||||
Add **ignoreErrors: true** to removeConnection operations to prevent failures when connections don't exist.
|
||||
|
||||
## Auto-Sanitization System
|
||||
|
||||
### What Gets Auto-Fixed
|
||||
When ANY workflow update is made, ALL nodes in the workflow are automatically sanitized to ensure complete metadata and correct structure:
|
||||
|
||||
1. **Operator Structure Fixes**:
|
||||
- Binary operators (equals, contains, greaterThan, etc.) automatically have \`singleValue\` removed
|
||||
- Unary operators (isEmpty, isNotEmpty, true, false) automatically get \`singleValue: true\` added
|
||||
- Invalid operator structures (e.g., \`{type: "isNotEmpty"}\`) are corrected to \`{type: "boolean", operation: "isNotEmpty"}\`
|
||||
|
||||
2. **Missing Metadata Added**:
|
||||
- IF nodes with conditions get complete \`conditions.options\` structure if missing
|
||||
- Switch nodes with conditions get complete \`conditions.options\` for all rules
|
||||
- Required fields: \`{version: 2, leftValue: "", caseSensitive: true, typeValidation: "strict"}\`
|
||||
|
||||
### Sanitization Scope
|
||||
- Runs on **ALL nodes** in the workflow, not just modified ones
|
||||
- Triggered by ANY update operation (addNode, updateNode, addConnection, etc.)
|
||||
- Prevents workflow corruption that would make UI unrenderable
|
||||
|
||||
### Limitations
|
||||
Auto-sanitization CANNOT fix:
|
||||
- Broken connections (connections referencing non-existent nodes) - use \`cleanStaleConnections\`
|
||||
- Branch count mismatches (e.g., Switch with 3 rules but only 2 outputs) - requires manual connection fixes
|
||||
- Workflows in paradoxical corrupt states (API returns corrupt data, API rejects updates) - must recreate workflow
|
||||
|
||||
### Recovery Guidance
|
||||
If validation still fails after auto-sanitization:
|
||||
1. Check error details for specific issues
|
||||
2. Use \`validate_workflow\` to see all validation errors
|
||||
3. For connection issues, use \`cleanStaleConnections\` operation
|
||||
4. For branch mismatches, add missing output connections
|
||||
5. For paradoxical corrupted workflows, create new workflow and migrate nodes
|
||||
|
||||
## Automatic Connection Reference Updates
|
||||
|
||||
When you rename a node using **updateNode**, all connection references throughout the workflow are automatically updated. Both the connection source keys and target references are updated for all connection types (main, error, ai_tool, ai_languageModel, ai_memory, etc.) and all branch configurations (IF node branches, Switch node cases, error outputs).
|
||||
|
||||
### Basic Example
|
||||
\`\`\`javascript
|
||||
// Rename a node - connections update automatically
|
||||
n8n_update_partial_workflow({
|
||||
id: "wf_123",
|
||||
operations: [{
|
||||
type: "updateNode",
|
||||
nodeId: "node_abc",
|
||||
updates: { name: "Data Processor" }
|
||||
}]
|
||||
});
|
||||
// All incoming and outgoing connections now reference "Data Processor"
|
||||
\`\`\`
|
||||
|
||||
### Multi-Output Node Example
|
||||
\`\`\`javascript
|
||||
// Rename nodes in a branching workflow
|
||||
n8n_update_partial_workflow({
|
||||
id: "workflow_id",
|
||||
operations: [
|
||||
{
|
||||
type: "updateNode",
|
||||
nodeId: "if_node_id",
|
||||
updates: { name: "Value Checker" }
|
||||
},
|
||||
{
|
||||
type: "updateNode",
|
||||
nodeId: "error_node_id",
|
||||
updates: { name: "Error Handler" }
|
||||
}
|
||||
]
|
||||
});
|
||||
// IF node branches and error connections automatically updated
|
||||
\`\`\`
|
||||
|
||||
### Name Collision Protection
|
||||
Attempting to rename a node to an existing name returns a clear error:
|
||||
\`\`\`
|
||||
Cannot rename node "Old Name" to "New Name": A node with that name already exists (id: abc123...).
|
||||
Please choose a different name.
|
||||
\`\`\`
|
||||
|
||||
### Usage Notes
|
||||
- Simply rename nodes with updateNode - no manual connection operations needed
|
||||
- Multiple renames in one call work atomically
|
||||
- Can rename a node and add/remove connections using the new name in the same batch
|
||||
- Use \`validateOnly: true\` to preview effects before applying
|
||||
|
||||
## Removing Properties with undefined
|
||||
|
||||
To remove a property from a node, set its value to \`undefined\` in the updates object. This is essential when migrating from deprecated properties or cleaning up optional configuration fields.
|
||||
|
||||
### Why Use undefined?
|
||||
- **Property removal vs. null**: Setting a property to \`undefined\` removes it completely from the node object, while \`null\` sets the property to a null value
|
||||
- **Validation constraints**: Some properties are mutually exclusive (e.g., \`continueOnFail\` and \`onError\`). Simply setting one without removing the other will fail validation
|
||||
- **Deprecated property migration**: When n8n deprecates properties, you must remove the old property before the new one will work
|
||||
|
||||
### Basic Property Removal
|
||||
\`\`\`javascript
|
||||
// Remove error handling configuration
|
||||
n8n_update_partial_workflow({
|
||||
id: "wf_123",
|
||||
operations: [{
|
||||
type: "updateNode",
|
||||
nodeName: "HTTP Request",
|
||||
updates: { onError: undefined }
|
||||
}]
|
||||
});
|
||||
|
||||
// Remove disabled flag
|
||||
n8n_update_partial_workflow({
|
||||
id: "wf_456",
|
||||
operations: [{
|
||||
type: "updateNode",
|
||||
nodeId: "node_abc",
|
||||
updates: { disabled: undefined }
|
||||
}]
|
||||
});
|
||||
\`\`\`
|
||||
|
||||
### Nested Property Removal
|
||||
Use dot notation to remove nested properties:
|
||||
\`\`\`javascript
|
||||
// Remove nested parameter
|
||||
n8n_update_partial_workflow({
|
||||
id: "wf_789",
|
||||
operations: [{
|
||||
type: "updateNode",
|
||||
nodeName: "API Request",
|
||||
updates: { "parameters.authentication": undefined }
|
||||
}]
|
||||
});
|
||||
|
||||
// Remove entire array property
|
||||
n8n_update_partial_workflow({
|
||||
id: "wf_012",
|
||||
operations: [{
|
||||
type: "updateNode",
|
||||
nodeName: "HTTP Request",
|
||||
updates: { "parameters.headers": undefined }
|
||||
}]
|
||||
});
|
||||
\`\`\`
|
||||
|
||||
### Migrating from Deprecated Properties
|
||||
Common scenario: replacing \`continueOnFail\` with \`onError\`:
|
||||
\`\`\`javascript
|
||||
// WRONG: Setting only the new property leaves the old one
|
||||
n8n_update_partial_workflow({
|
||||
id: "wf_123",
|
||||
operations: [{
|
||||
type: "updateNode",
|
||||
nodeName: "HTTP Request",
|
||||
updates: { onError: "continueErrorOutput" }
|
||||
}]
|
||||
});
|
||||
// Error: continueOnFail and onError are mutually exclusive
|
||||
|
||||
// CORRECT: Remove the old property first
|
||||
n8n_update_partial_workflow({
|
||||
id: "wf_123",
|
||||
operations: [{
|
||||
type: "updateNode",
|
||||
nodeName: "HTTP Request",
|
||||
updates: {
|
||||
continueOnFail: undefined,
|
||||
onError: "continueErrorOutput"
|
||||
}
|
||||
}]
|
||||
});
|
||||
\`\`\`
|
||||
|
||||
### Batch Property Removal
|
||||
Remove multiple properties in one operation:
|
||||
\`\`\`javascript
|
||||
n8n_update_partial_workflow({
|
||||
id: "wf_345",
|
||||
operations: [{
|
||||
type: "updateNode",
|
||||
nodeName: "Data Processor",
|
||||
updates: {
|
||||
continueOnFail: undefined,
|
||||
alwaysOutputData: undefined,
|
||||
"parameters.legacy_option": undefined
|
||||
}
|
||||
}]
|
||||
});
|
||||
\`\`\`
|
||||
|
||||
### When to Use undefined
|
||||
- Removing deprecated properties during migration
|
||||
- Cleaning up optional configuration flags
|
||||
- Resolving mutual exclusivity validation errors
|
||||
- Removing stale or unnecessary node metadata
|
||||
- Simplifying node configuration`,
|
||||
parameters: {
|
||||
id: { type: 'string', required: true, description: 'Workflow ID to update' },
|
||||
operations: {
|
||||
@@ -66,15 +310,38 @@ Add **ignoreErrors: true** to removeConnection operations to prevent failures wh
|
||||
'// Add a basic node (minimal configuration)\nn8n_update_partial_workflow({id: "abc", operations: [{type: "addNode", node: {name: "Process Data", type: "n8n-nodes-base.set", position: [400, 300], parameters: {}}}]})',
|
||||
'// Add node with full configuration\nn8n_update_partial_workflow({id: "def", operations: [{type: "addNode", node: {name: "Send Slack Alert", type: "n8n-nodes-base.slack", position: [600, 300], typeVersion: 2, parameters: {resource: "message", operation: "post", channel: "#alerts", text: "Success!"}}}]})',
|
||||
'// Add node AND connect it (common pattern)\nn8n_update_partial_workflow({id: "ghi", operations: [\n {type: "addNode", node: {name: "HTTP Request", type: "n8n-nodes-base.httpRequest", position: [400, 300], parameters: {url: "https://api.example.com", method: "GET"}}},\n {type: "addConnection", source: "Webhook", target: "HTTP Request"}\n]})',
|
||||
'// Add multiple nodes in batch\nn8n_update_partial_workflow({id: "jkl", operations: [\n {type: "addNode", node: {name: "Filter", type: "n8n-nodes-base.filter", position: [400, 300], parameters: {}}},\n {type: "addNode", node: {name: "Transform", type: "n8n-nodes-base.set", position: [600, 300], parameters: {}}},\n {type: "addConnection", source: "Filter", target: "Transform"}\n]})',
|
||||
'// Clean up stale connections after node renames/deletions\nn8n_update_partial_workflow({id: "mno", operations: [{type: "cleanStaleConnections"}]})',
|
||||
'// Remove connection gracefully (no error if it doesn\'t exist)\nn8n_update_partial_workflow({id: "pqr", operations: [{type: "removeConnection", source: "Old Node", target: "Target", ignoreErrors: true}]})',
|
||||
'// Best-effort mode: apply what works, report what fails\nn8n_update_partial_workflow({id: "stu", operations: [\n {type: "updateName", name: "Fixed Workflow"},\n {type: "removeConnection", source: "Broken", target: "Node"},\n {type: "cleanStaleConnections"}\n], continueOnError: true})',
|
||||
'// Replace entire connections object\nn8n_update_partial_workflow({id: "vwx", operations: [{type: "replaceConnections", connections: {"Webhook": {"main": [[{node: "Slack", type: "main", index: 0}]]}}}]})',
|
||||
'// Update node parameter (classic atomic mode)\nn8n_update_partial_workflow({id: "yza", operations: [{type: "updateNode", nodeName: "HTTP Request", updates: {"parameters.url": "https://api.example.com"}}]})',
|
||||
'// Validate before applying\nn8n_update_partial_workflow({id: "bcd", operations: [{type: "removeNode", nodeName: "Old Process"}], validateOnly: true})'
|
||||
'// Rewire connection from one target to another\nn8n_update_partial_workflow({id: "xyz", operations: [{type: "rewireConnection", source: "Webhook", from: "Old Handler", to: "New Handler"}]})',
|
||||
'// Smart parameter: IF node true branch\nn8n_update_partial_workflow({id: "abc", operations: [{type: "addConnection", source: "IF", target: "Success Handler", branch: "true"}]})',
|
||||
'// Smart parameter: IF node false branch\nn8n_update_partial_workflow({id: "def", operations: [{type: "addConnection", source: "IF", target: "Error Handler", branch: "false"}]})',
|
||||
'// Smart parameter: Switch node case routing\nn8n_update_partial_workflow({id: "ghi", operations: [\n {type: "addConnection", source: "Switch", target: "Handler A", case: 0},\n {type: "addConnection", source: "Switch", target: "Handler B", case: 1},\n {type: "addConnection", source: "Switch", target: "Handler C", case: 2}\n]})',
|
||||
'// Rewire with smart parameter\nn8n_update_partial_workflow({id: "jkl", operations: [{type: "rewireConnection", source: "IF", from: "Old True Handler", to: "New True Handler", branch: "true"}]})',
|
||||
'// Add multiple nodes in batch\nn8n_update_partial_workflow({id: "mno", operations: [\n {type: "addNode", node: {name: "Filter", type: "n8n-nodes-base.filter", position: [400, 300], parameters: {}}},\n {type: "addNode", node: {name: "Transform", type: "n8n-nodes-base.set", position: [600, 300], parameters: {}}},\n {type: "addConnection", source: "Filter", target: "Transform"}\n]})',
|
||||
'// Clean up stale connections after node renames/deletions\nn8n_update_partial_workflow({id: "pqr", operations: [{type: "cleanStaleConnections"}]})',
|
||||
'// Remove connection gracefully (no error if it doesn\'t exist)\nn8n_update_partial_workflow({id: "stu", operations: [{type: "removeConnection", source: "Old Node", target: "Target", ignoreErrors: true}]})',
|
||||
'// Best-effort mode: apply what works, report what fails\nn8n_update_partial_workflow({id: "vwx", operations: [\n {type: "updateName", name: "Fixed Workflow"},\n {type: "removeConnection", source: "Broken", target: "Node"},\n {type: "cleanStaleConnections"}\n], continueOnError: true})',
|
||||
'// Update node parameter\nn8n_update_partial_workflow({id: "yza", operations: [{type: "updateNode", nodeName: "HTTP Request", updates: {"parameters.url": "https://api.example.com"}}]})',
|
||||
'// Validate before applying\nn8n_update_partial_workflow({id: "bcd", operations: [{type: "removeNode", nodeName: "Old Process"}], validateOnly: true})',
|
||||
'\n// ============ AI CONNECTION EXAMPLES ============',
|
||||
'// Connect language model to AI Agent\nn8n_update_partial_workflow({id: "ai1", operations: [{type: "addConnection", source: "OpenAI Chat Model", target: "AI Agent", sourceOutput: "ai_languageModel"}]})',
|
||||
'// Connect tool to AI Agent\nn8n_update_partial_workflow({id: "ai2", operations: [{type: "addConnection", source: "HTTP Request Tool", target: "AI Agent", sourceOutput: "ai_tool"}]})',
|
||||
'// Connect memory to AI Agent\nn8n_update_partial_workflow({id: "ai3", operations: [{type: "addConnection", source: "Window Buffer Memory", target: "AI Agent", sourceOutput: "ai_memory"}]})',
|
||||
'// Connect output parser to AI Agent\nn8n_update_partial_workflow({id: "ai4", operations: [{type: "addConnection", source: "Structured Output Parser", target: "AI Agent", sourceOutput: "ai_outputParser"}]})',
|
||||
'// Complete AI Agent setup: Add language model, tools, and memory\nn8n_update_partial_workflow({id: "ai5", operations: [\n {type: "addConnection", source: "OpenAI Chat Model", target: "AI Agent", sourceOutput: "ai_languageModel"},\n {type: "addConnection", source: "HTTP Request Tool", target: "AI Agent", sourceOutput: "ai_tool"},\n {type: "addConnection", source: "Code Tool", target: "AI Agent", sourceOutput: "ai_tool"},\n {type: "addConnection", source: "Window Buffer Memory", target: "AI Agent", sourceOutput: "ai_memory"}\n]})',
|
||||
'// Add fallback model to AI Agent for reliability\nn8n_update_partial_workflow({id: "ai6", operations: [\n {type: "addConnection", source: "OpenAI Chat Model", target: "AI Agent", sourceOutput: "ai_languageModel", targetIndex: 0},\n {type: "addConnection", source: "Anthropic Chat Model", target: "AI Agent", sourceOutput: "ai_languageModel", targetIndex: 1}\n]})',
|
||||
'// Vector Store setup: Connect embeddings and documents\nn8n_update_partial_workflow({id: "ai7", operations: [\n {type: "addConnection", source: "Embeddings OpenAI", target: "Pinecone Vector Store", sourceOutput: "ai_embedding"},\n {type: "addConnection", source: "Default Data Loader", target: "Pinecone Vector Store", sourceOutput: "ai_document"}\n]})',
|
||||
'// Connect Vector Store Tool to AI Agent (retrieval setup)\nn8n_update_partial_workflow({id: "ai8", operations: [\n {type: "addConnection", source: "Pinecone Vector Store", target: "Vector Store Tool", sourceOutput: "ai_vectorStore"},\n {type: "addConnection", source: "Vector Store Tool", target: "AI Agent", sourceOutput: "ai_tool"}\n]})',
|
||||
'// Rewire AI Agent to use different language model\nn8n_update_partial_workflow({id: "ai9", operations: [{type: "rewireConnection", source: "AI Agent", from: "OpenAI Chat Model", to: "Anthropic Chat Model", sourceOutput: "ai_languageModel"}]})',
|
||||
'// Replace all AI tools for an agent\nn8n_update_partial_workflow({id: "ai10", operations: [\n {type: "removeConnection", source: "Old Tool 1", target: "AI Agent", sourceOutput: "ai_tool"},\n {type: "removeConnection", source: "Old Tool 2", target: "AI Agent", sourceOutput: "ai_tool"},\n {type: "addConnection", source: "New HTTP Tool", target: "AI Agent", sourceOutput: "ai_tool"},\n {type: "addConnection", source: "New Code Tool", target: "AI Agent", sourceOutput: "ai_tool"}\n]})',
|
||||
'\n// ============ REMOVING PROPERTIES EXAMPLES ============',
|
||||
'// Remove a simple property\nn8n_update_partial_workflow({id: "rm1", operations: [{type: "updateNode", nodeName: "HTTP Request", updates: {onError: undefined}}]})',
|
||||
'// Migrate from deprecated continueOnFail to onError\nn8n_update_partial_workflow({id: "rm2", operations: [{type: "updateNode", nodeName: "HTTP Request", updates: {continueOnFail: undefined, onError: "continueErrorOutput"}}]})',
|
||||
'// Remove nested property\nn8n_update_partial_workflow({id: "rm3", operations: [{type: "updateNode", nodeName: "API Request", updates: {"parameters.authentication": undefined}}]})',
|
||||
'// Remove multiple properties\nn8n_update_partial_workflow({id: "rm4", operations: [{type: "updateNode", nodeName: "Data Processor", updates: {continueOnFail: undefined, alwaysOutputData: undefined, "parameters.legacy_option": undefined}}]})',
|
||||
'// Remove entire array property\nn8n_update_partial_workflow({id: "rm5", operations: [{type: "updateNode", nodeName: "HTTP Request", updates: {"parameters.headers": undefined}}]})'
|
||||
],
|
||||
useCases: [
|
||||
'Rewire connections when replacing nodes',
|
||||
'Route IF/Switch node outputs with semantic parameters',
|
||||
'Clean up broken workflows after node renames/deletions',
|
||||
'Bulk connection cleanup with best-effort mode',
|
||||
'Update single node parameters',
|
||||
@@ -82,17 +349,35 @@ Add **ignoreErrors: true** to removeConnection operations to prevent failures wh
|
||||
'Graceful cleanup operations that don\'t fail',
|
||||
'Enable/disable nodes',
|
||||
'Rename workflows or nodes',
|
||||
'Manage tags efficiently'
|
||||
'Manage tags efficiently',
|
||||
'Connect AI components (language models, tools, memory, parsers)',
|
||||
'Set up AI Agent workflows with multiple tools',
|
||||
'Add fallback language models to AI Agents',
|
||||
'Configure Vector Store retrieval systems',
|
||||
'Swap language models in existing AI workflows',
|
||||
'Batch-update AI tool connections'
|
||||
],
|
||||
performance: 'Very fast - typically 50-200ms. Much faster than full updates as only changes are processed.',
|
||||
bestPractices: [
|
||||
'Use rewireConnection instead of remove+add for changing targets',
|
||||
'Use branch="true"/"false" for IF nodes instead of sourceIndex',
|
||||
'Use case=N for Switch nodes instead of sourceIndex',
|
||||
'Use cleanStaleConnections after renaming/removing nodes',
|
||||
'Use continueOnError for bulk cleanup operations',
|
||||
'Set ignoreErrors:true on removeConnection for graceful cleanup',
|
||||
'Use validateOnly to test operations before applying',
|
||||
'Group related changes in one call',
|
||||
'Check operation order for dependencies',
|
||||
'Use atomic mode (default) for critical updates'
|
||||
'Use atomic mode (default) for critical updates',
|
||||
'For AI connections, always specify sourceOutput (ai_languageModel, ai_tool, ai_memory, etc.)',
|
||||
'Connect language model BEFORE adding AI Agent to ensure validation passes',
|
||||
'Use targetIndex for fallback models (primary=0, fallback=1)',
|
||||
'Batch AI component connections in a single operation for atomicity',
|
||||
'Validate AI workflows after connection changes to catch configuration errors',
|
||||
'To remove properties, set them to undefined (not null) in the updates object',
|
||||
'When migrating from deprecated properties, remove the old property and add the new one in the same operation',
|
||||
'Use undefined to resolve mutual exclusivity validation errors between properties',
|
||||
'Batch multiple property removals in a single updateNode operation for efficiency'
|
||||
],
|
||||
pitfalls: [
|
||||
'**REQUIRES N8N_API_URL and N8N_API_KEY environment variables** - will not work without n8n API access',
|
||||
@@ -100,9 +385,24 @@ Add **ignoreErrors: true** to removeConnection operations to prevent failures wh
|
||||
'continueOnError breaks atomic guarantees - use with caution',
|
||||
'Order matters for dependent operations (e.g., must add node before connecting to it)',
|
||||
'Node references accept ID or name, but name must be unique',
|
||||
'Node names with special characters (apostrophes, quotes) work correctly',
|
||||
'For best compatibility, prefer node IDs over names when dealing with special characters',
|
||||
'Use "updates" property for updateNode operations: {type: "updateNode", updates: {...}}',
|
||||
'Smart parameters (branch, case) only work with IF and Switch nodes - ignored for other node types',
|
||||
'Explicit sourceIndex overrides smart parameters (branch, case) if both provided',
|
||||
'**CRITICAL**: For If nodes, ALWAYS use branch="true"/"false" instead of sourceIndex. Using sourceIndex=0 for multiple connections will put them ALL on the TRUE branch (main[0]), breaking your workflow logic!',
|
||||
'**CRITICAL**: For Switch nodes, ALWAYS use case=N instead of sourceIndex. Using same sourceIndex for multiple connections will put them on the same case output.',
|
||||
'cleanStaleConnections removes ALL broken connections - cannot be selective',
|
||||
'replaceConnections overwrites entire connections object - all previous connections lost'
|
||||
'replaceConnections overwrites entire connections object - all previous connections lost',
|
||||
'**Auto-sanitization behavior**: Binary operators (equals, contains) automatically have singleValue removed; unary operators (isEmpty, isNotEmpty) automatically get singleValue:true added',
|
||||
'**Auto-sanitization runs on ALL nodes**: When ANY update is made, ALL nodes in the workflow are sanitized (not just modified ones)',
|
||||
'**Auto-sanitization cannot fix everything**: It fixes operator structures and missing metadata, but cannot fix broken connections or branch mismatches',
|
||||
'**Corrupted workflows beyond repair**: Workflows in paradoxical states (API returns corrupt, API rejects updates) cannot be fixed via API - must be recreated',
|
||||
'Setting a property to null does NOT remove it - use undefined instead',
|
||||
'When properties are mutually exclusive (e.g., continueOnFail and onError), setting only the new property will fail - you must remove the old one with undefined',
|
||||
'Removing a required property may cause validation errors - check node documentation first',
|
||||
'Nested property removal with dot notation only removes the specific nested field, not the entire parent object',
|
||||
'Array index notation (e.g., "parameters.headers[0]") is not supported - remove the entire array property instead'
|
||||
],
|
||||
relatedTools: ['n8n_update_full_workflow', 'n8n_get_workflow', 'validate_workflow', 'tools_documentation']
|
||||
}
|
||||
|
||||
@@ -293,7 +293,7 @@ export const n8nManagementTools: ToolDefinition[] = [
|
||||
description: 'Types of fixes to apply (default: all)',
|
||||
items: {
|
||||
type: 'string',
|
||||
enum: ['expression-format', 'typeversion-correction', 'error-output-config', 'node-type-correction', 'webhook-missing-path']
|
||||
enum: ['expression-format', 'typeversion-correction', 'error-output-config', 'node-type-correction', 'webhook-missing-path', 'typeversion-upgrade', 'version-migration']
|
||||
}
|
||||
},
|
||||
confidenceThreshold: {
|
||||
@@ -462,5 +462,59 @@ Examples:
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'n8n_workflow_versions',
|
||||
description: `Manage workflow version history, rollback, and cleanup. Six modes:
|
||||
- list: Show version history for a workflow
|
||||
- get: Get details of specific version
|
||||
- rollback: Restore workflow to previous version (creates backup first)
|
||||
- delete: Delete specific version or all versions for a workflow
|
||||
- prune: Manually trigger pruning to keep N most recent versions
|
||||
- truncate: Delete ALL versions for ALL workflows (requires confirmation)`,
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
mode: {
|
||||
type: 'string',
|
||||
enum: ['list', 'get', 'rollback', 'delete', 'prune', 'truncate'],
|
||||
description: 'Operation mode'
|
||||
},
|
||||
workflowId: {
|
||||
type: 'string',
|
||||
description: 'Workflow ID (required for list, rollback, delete, prune)'
|
||||
},
|
||||
versionId: {
|
||||
type: 'number',
|
||||
description: 'Version ID (required for get mode and single version delete, optional for rollback)'
|
||||
},
|
||||
limit: {
|
||||
type: 'number',
|
||||
default: 10,
|
||||
description: 'Max versions to return in list mode'
|
||||
},
|
||||
validateBefore: {
|
||||
type: 'boolean',
|
||||
default: true,
|
||||
description: 'Validate workflow structure before rollback'
|
||||
},
|
||||
deleteAll: {
|
||||
type: 'boolean',
|
||||
default: false,
|
||||
description: 'Delete all versions for workflow (delete mode only)'
|
||||
},
|
||||
maxVersions: {
|
||||
type: 'number',
|
||||
default: 10,
|
||||
description: 'Keep N most recent versions (prune mode only)'
|
||||
},
|
||||
confirmTruncate: {
|
||||
type: 'boolean',
|
||||
default: false,
|
||||
description: 'REQUIRED: Must be true to truncate all versions (truncate mode only)'
|
||||
}
|
||||
},
|
||||
required: ['mode']
|
||||
}
|
||||
}
|
||||
];
|
||||
@@ -1,4 +1,14 @@
|
||||
import { PropertyExtractor } from './property-extractor';
|
||||
import type {
|
||||
NodeClass,
|
||||
VersionedNodeInstance
|
||||
} from '../types/node-types';
|
||||
import {
|
||||
isVersionedNodeInstance,
|
||||
isVersionedNodeClass,
|
||||
getNodeDescription as getNodeDescriptionHelper
|
||||
} from '../types/node-types';
|
||||
import type { INodeTypeBaseDescription, INodeTypeDescription } from 'n8n-workflow';
|
||||
|
||||
export interface ParsedNode {
|
||||
style: 'declarative' | 'programmatic';
|
||||
@@ -22,9 +32,9 @@ export interface ParsedNode {
|
||||
|
||||
export class NodeParser {
|
||||
private propertyExtractor = new PropertyExtractor();
|
||||
private currentNodeClass: any = null;
|
||||
|
||||
parse(nodeClass: any, packageName: string): ParsedNode {
|
||||
private currentNodeClass: NodeClass | null = null;
|
||||
|
||||
parse(nodeClass: NodeClass, packageName: string): ParsedNode {
|
||||
this.currentNodeClass = nodeClass;
|
||||
// Get base description (handles versioned nodes)
|
||||
const description = this.getNodeDescription(nodeClass);
|
||||
@@ -50,46 +60,64 @@ export class NodeParser {
|
||||
};
|
||||
}
|
||||
|
||||
private getNodeDescription(nodeClass: any): any {
|
||||
private getNodeDescription(nodeClass: NodeClass): INodeTypeBaseDescription | INodeTypeDescription {
|
||||
// Try to get description from the class first
|
||||
let description: any;
|
||||
|
||||
// Check if it's a versioned node (has baseDescription and nodeVersions)
|
||||
if (typeof nodeClass === 'function' && nodeClass.prototype &&
|
||||
nodeClass.prototype.constructor &&
|
||||
nodeClass.prototype.constructor.name === 'VersionedNodeType') {
|
||||
let description: INodeTypeBaseDescription | INodeTypeDescription | undefined;
|
||||
|
||||
// Check if it's a versioned node using type guard
|
||||
if (isVersionedNodeClass(nodeClass)) {
|
||||
// This is a VersionedNodeType class - instantiate it
|
||||
const instance = new nodeClass();
|
||||
description = instance.baseDescription || {};
|
||||
try {
|
||||
const instance = new (nodeClass as new () => VersionedNodeInstance)();
|
||||
// Strategic any assertion for accessing both description and baseDescription
|
||||
const inst = instance as any;
|
||||
// Try description first (real VersionedNodeType with getter)
|
||||
// Only fallback to baseDescription if nodeVersions exists (complete VersionedNodeType mock)
|
||||
// This prevents using baseDescription for incomplete mocks that test edge cases
|
||||
description = inst.description || (inst.nodeVersions ? inst.baseDescription : undefined);
|
||||
|
||||
// If still undefined (incomplete mock), leave as undefined to use catch block fallback
|
||||
} catch (e) {
|
||||
// Some nodes might require parameters to instantiate
|
||||
}
|
||||
} else if (typeof nodeClass === 'function') {
|
||||
// Try to instantiate to get description
|
||||
try {
|
||||
const instance = new nodeClass();
|
||||
description = instance.description || {};
|
||||
|
||||
// For versioned nodes, we might need to look deeper
|
||||
if (!description.name && instance.baseDescription) {
|
||||
description = instance.baseDescription;
|
||||
description = instance.description;
|
||||
// If description is empty or missing name, check for baseDescription fallback
|
||||
if (!description || !description.name) {
|
||||
const inst = instance as any;
|
||||
if (inst.baseDescription?.name) {
|
||||
description = inst.baseDescription;
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
// Some nodes might require parameters to instantiate
|
||||
// Try to access static properties
|
||||
description = nodeClass.description || {};
|
||||
description = (nodeClass as any).description;
|
||||
}
|
||||
} else {
|
||||
// Maybe it's already an instance
|
||||
description = nodeClass.description || {};
|
||||
description = nodeClass.description;
|
||||
// If description is empty or missing name, check for baseDescription fallback
|
||||
if (!description || !description.name) {
|
||||
const inst = nodeClass as any;
|
||||
if (inst.baseDescription?.name) {
|
||||
description = inst.baseDescription;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return description;
|
||||
|
||||
return description || ({} as any);
|
||||
}
|
||||
|
||||
private detectStyle(nodeClass: any): 'declarative' | 'programmatic' {
|
||||
private detectStyle(nodeClass: NodeClass): 'declarative' | 'programmatic' {
|
||||
const desc = this.getNodeDescription(nodeClass);
|
||||
return desc.routing ? 'declarative' : 'programmatic';
|
||||
return (desc as any).routing ? 'declarative' : 'programmatic';
|
||||
}
|
||||
|
||||
private extractNodeType(description: any, packageName: string): string {
|
||||
|
||||
private extractNodeType(description: INodeTypeBaseDescription | INodeTypeDescription, packageName: string): string {
|
||||
// Ensure we have the full node type including package prefix
|
||||
const name = description.name;
|
||||
|
||||
@@ -106,57 +134,97 @@ export class NodeParser {
|
||||
return `${packagePrefix}.${name}`;
|
||||
}
|
||||
|
||||
private extractCategory(description: any): string {
|
||||
return description.group?.[0] ||
|
||||
description.categories?.[0] ||
|
||||
description.category ||
|
||||
private extractCategory(description: INodeTypeBaseDescription | INodeTypeDescription): string {
|
||||
return description.group?.[0] ||
|
||||
(description as any).categories?.[0] ||
|
||||
(description as any).category ||
|
||||
'misc';
|
||||
}
|
||||
|
||||
private detectTrigger(description: any): boolean {
|
||||
|
||||
private detectTrigger(description: INodeTypeBaseDescription | INodeTypeDescription): boolean {
|
||||
// Strategic any assertion for properties that only exist on INodeTypeDescription
|
||||
const desc = description as any;
|
||||
|
||||
// Primary check: group includes 'trigger'
|
||||
if (description.group && Array.isArray(description.group)) {
|
||||
if (description.group.includes('trigger')) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Fallback checks for edge cases
|
||||
return description.polling === true ||
|
||||
description.trigger === true ||
|
||||
description.eventTrigger === true ||
|
||||
return desc.polling === true ||
|
||||
desc.trigger === true ||
|
||||
desc.eventTrigger === true ||
|
||||
description.name?.toLowerCase().includes('trigger');
|
||||
}
|
||||
|
||||
private detectWebhook(description: any): boolean {
|
||||
return (description.webhooks?.length > 0) ||
|
||||
description.webhook === true ||
|
||||
private detectWebhook(description: INodeTypeBaseDescription | INodeTypeDescription): boolean {
|
||||
const desc = description as any; // INodeTypeDescription has webhooks, but INodeTypeBaseDescription doesn't
|
||||
return (desc.webhooks?.length > 0) ||
|
||||
desc.webhook === true ||
|
||||
description.name?.toLowerCase().includes('webhook');
|
||||
}
|
||||
|
||||
private extractVersion(nodeClass: any): string {
|
||||
// Check instance for baseDescription first
|
||||
/**
|
||||
* Extracts the version from a node class.
|
||||
*
|
||||
* Priority Chain:
|
||||
* 1. Instance currentVersion (VersionedNodeType's computed property)
|
||||
* 2. Instance description.defaultVersion (explicit default)
|
||||
* 3. Instance nodeVersions (fallback to max available version)
|
||||
* 4. Description version array (legacy nodes)
|
||||
* 5. Description version scalar (simple versioning)
|
||||
* 6. Class-level properties (if instantiation fails)
|
||||
* 7. Default to "1"
|
||||
*
|
||||
* Critical Fix (v2.17.4): Removed check for non-existent instance.baseDescription.defaultVersion
|
||||
* which caused AI Agent to incorrectly return version "3" instead of "2.2"
|
||||
*
|
||||
* @param nodeClass - The node class or instance to extract version from
|
||||
* @returns The version as a string
|
||||
*/
|
||||
private extractVersion(nodeClass: NodeClass): string {
|
||||
// Check instance properties first
|
||||
try {
|
||||
const instance = typeof nodeClass === 'function' ? new nodeClass() : nodeClass;
|
||||
|
||||
// Handle instance-level baseDescription
|
||||
if (instance?.baseDescription?.defaultVersion) {
|
||||
return instance.baseDescription.defaultVersion.toString();
|
||||
// Strategic any assertion - instance could be INodeType or IVersionedNodeType
|
||||
const inst = instance as any;
|
||||
|
||||
// PRIORITY 1: Check currentVersion (what VersionedNodeType actually uses)
|
||||
// For VersionedNodeType, currentVersion = defaultVersion ?? max(nodeVersions)
|
||||
if (inst?.currentVersion !== undefined) {
|
||||
return inst.currentVersion.toString();
|
||||
}
|
||||
|
||||
// Handle instance-level nodeVersions
|
||||
if (instance?.nodeVersions) {
|
||||
const versions = Object.keys(instance.nodeVersions);
|
||||
return Math.max(...versions.map(Number)).toString();
|
||||
|
||||
// PRIORITY 2: Handle instance-level description.defaultVersion
|
||||
// VersionedNodeType stores baseDescription as 'description', not 'baseDescription'
|
||||
if (inst?.description?.defaultVersion) {
|
||||
return inst.description.defaultVersion.toString();
|
||||
}
|
||||
|
||||
|
||||
// PRIORITY 3: Handle instance-level nodeVersions (fallback to max)
|
||||
if (inst?.nodeVersions) {
|
||||
const versions = Object.keys(inst.nodeVersions).map(Number);
|
||||
if (versions.length > 0) {
|
||||
const maxVersion = Math.max(...versions);
|
||||
if (!isNaN(maxVersion)) {
|
||||
return maxVersion.toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle version array in description (e.g., [1, 1.1, 1.2])
|
||||
if (instance?.description?.version) {
|
||||
const version = instance.description.version;
|
||||
if (inst?.description?.version) {
|
||||
const version = inst.description.version;
|
||||
if (Array.isArray(version)) {
|
||||
// Find the maximum version from the array
|
||||
const maxVersion = Math.max(...version.map((v: any) => parseFloat(v.toString())));
|
||||
return maxVersion.toString();
|
||||
const numericVersions = version.map((v: any) => parseFloat(v.toString()));
|
||||
if (numericVersions.length > 0) {
|
||||
const maxVersion = Math.max(...numericVersions);
|
||||
if (!isNaN(maxVersion)) {
|
||||
return maxVersion.toString();
|
||||
}
|
||||
}
|
||||
} else if (typeof version === 'number' || typeof version === 'string') {
|
||||
return version.toString();
|
||||
}
|
||||
@@ -165,94 +233,119 @@ export class NodeParser {
|
||||
// Some nodes might require parameters to instantiate
|
||||
// Try class-level properties
|
||||
}
|
||||
|
||||
|
||||
// Handle class-level VersionedNodeType with defaultVersion
|
||||
if (nodeClass.baseDescription?.defaultVersion) {
|
||||
return nodeClass.baseDescription.defaultVersion.toString();
|
||||
// Note: Most VersionedNodeType classes don't have static properties
|
||||
// Strategic any assertion for class-level property access
|
||||
const nodeClassAny = nodeClass as any;
|
||||
if (nodeClassAny.description?.defaultVersion) {
|
||||
return nodeClassAny.description.defaultVersion.toString();
|
||||
}
|
||||
|
||||
|
||||
// Handle class-level VersionedNodeType with nodeVersions
|
||||
if (nodeClass.nodeVersions) {
|
||||
const versions = Object.keys(nodeClass.nodeVersions);
|
||||
return Math.max(...versions.map(Number)).toString();
|
||||
}
|
||||
|
||||
// Also check class-level description for version array
|
||||
const description = this.getNodeDescription(nodeClass);
|
||||
if (description?.version) {
|
||||
if (Array.isArray(description.version)) {
|
||||
const maxVersion = Math.max(...description.version.map((v: any) => parseFloat(v.toString())));
|
||||
return maxVersion.toString();
|
||||
} else if (typeof description.version === 'number' || typeof description.version === 'string') {
|
||||
return description.version.toString();
|
||||
if (nodeClassAny.nodeVersions) {
|
||||
const versions = Object.keys(nodeClassAny.nodeVersions).map(Number);
|
||||
if (versions.length > 0) {
|
||||
const maxVersion = Math.max(...versions);
|
||||
if (!isNaN(maxVersion)) {
|
||||
return maxVersion.toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Also check class-level description for version array
|
||||
const description = this.getNodeDescription(nodeClass);
|
||||
const desc = description as any; // Strategic assertion for version property
|
||||
if (desc?.version) {
|
||||
if (Array.isArray(desc.version)) {
|
||||
const numericVersions = desc.version.map((v: any) => parseFloat(v.toString()));
|
||||
if (numericVersions.length > 0) {
|
||||
const maxVersion = Math.max(...numericVersions);
|
||||
if (!isNaN(maxVersion)) {
|
||||
return maxVersion.toString();
|
||||
}
|
||||
}
|
||||
} else if (typeof desc.version === 'number' || typeof desc.version === 'string') {
|
||||
return desc.version.toString();
|
||||
}
|
||||
}
|
||||
|
||||
// Default to version 1
|
||||
return '1';
|
||||
}
|
||||
|
||||
private detectVersioned(nodeClass: any): boolean {
|
||||
private detectVersioned(nodeClass: NodeClass): boolean {
|
||||
// Check instance-level properties first
|
||||
try {
|
||||
const instance = typeof nodeClass === 'function' ? new nodeClass() : nodeClass;
|
||||
|
||||
// Strategic any assertion - instance could be INodeType or IVersionedNodeType
|
||||
const inst = instance as any;
|
||||
|
||||
// Check for instance baseDescription with defaultVersion
|
||||
if (instance?.baseDescription?.defaultVersion) {
|
||||
if (inst?.baseDescription?.defaultVersion) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Check for nodeVersions
|
||||
if (instance?.nodeVersions) {
|
||||
if (inst?.nodeVersions) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Check for version array in description
|
||||
if (instance?.description?.version && Array.isArray(instance.description.version)) {
|
||||
if (inst?.description?.version && Array.isArray(inst.description.version)) {
|
||||
return true;
|
||||
}
|
||||
} catch (e) {
|
||||
// Some nodes might require parameters to instantiate
|
||||
// Try class-level checks
|
||||
}
|
||||
|
||||
|
||||
// Check class-level nodeVersions
|
||||
if (nodeClass.nodeVersions || nodeClass.baseDescription?.defaultVersion) {
|
||||
// Strategic any assertion for class-level property access
|
||||
const nodeClassAny = nodeClass as any;
|
||||
if (nodeClassAny.nodeVersions || nodeClassAny.baseDescription?.defaultVersion) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Also check class-level description for version array
|
||||
const description = this.getNodeDescription(nodeClass);
|
||||
if (description?.version && Array.isArray(description.version)) {
|
||||
const desc = description as any; // Strategic assertion for version property
|
||||
if (desc?.version && Array.isArray(desc.version)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
private extractOutputs(description: any): { outputs?: any[], outputNames?: string[] } {
|
||||
private extractOutputs(description: INodeTypeBaseDescription | INodeTypeDescription): { outputs?: any[], outputNames?: string[] } {
|
||||
const result: { outputs?: any[], outputNames?: string[] } = {};
|
||||
|
||||
// Strategic any assertion for outputs/outputNames properties
|
||||
const desc = description as any;
|
||||
|
||||
// First check the base description
|
||||
if (description.outputs) {
|
||||
result.outputs = Array.isArray(description.outputs) ? description.outputs : [description.outputs];
|
||||
if (desc.outputs) {
|
||||
result.outputs = Array.isArray(desc.outputs) ? desc.outputs : [desc.outputs];
|
||||
}
|
||||
|
||||
if (description.outputNames) {
|
||||
result.outputNames = Array.isArray(description.outputNames) ? description.outputNames : [description.outputNames];
|
||||
|
||||
if (desc.outputNames) {
|
||||
result.outputNames = Array.isArray(desc.outputNames) ? desc.outputNames : [desc.outputNames];
|
||||
}
|
||||
|
||||
|
||||
// If no outputs found and this is a versioned node, check the latest version
|
||||
if (!result.outputs && !result.outputNames) {
|
||||
const nodeClass = this.currentNodeClass; // We'll need to track this
|
||||
if (nodeClass) {
|
||||
try {
|
||||
const instance = new nodeClass();
|
||||
if (instance.nodeVersions) {
|
||||
const instance = typeof nodeClass === 'function' ? new nodeClass() : nodeClass;
|
||||
// Strategic any assertion for instance properties
|
||||
const inst = instance as any;
|
||||
if (inst.nodeVersions) {
|
||||
// Get the latest version
|
||||
const versions = Object.keys(instance.nodeVersions).map(Number);
|
||||
const latestVersion = Math.max(...versions);
|
||||
const versionedDescription = instance.nodeVersions[latestVersion]?.description;
|
||||
const versions = Object.keys(inst.nodeVersions).map(Number);
|
||||
if (versions.length > 0) {
|
||||
const latestVersion = Math.max(...versions);
|
||||
if (!isNaN(latestVersion)) {
|
||||
const versionedDescription = inst.nodeVersions[latestVersion]?.description;
|
||||
|
||||
if (versionedDescription) {
|
||||
if (versionedDescription.outputs) {
|
||||
@@ -262,11 +355,13 @@ export class NodeParser {
|
||||
}
|
||||
|
||||
if (versionedDescription.outputNames) {
|
||||
result.outputNames = Array.isArray(versionedDescription.outputNames)
|
||||
? versionedDescription.outputNames
|
||||
result.outputNames = Array.isArray(versionedDescription.outputNames)
|
||||
? versionedDescription.outputNames
|
||||
: [versionedDescription.outputNames];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
// Ignore errors from instantiating node
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
import type { NodeClass } from '../types/node-types';
|
||||
|
||||
export class PropertyExtractor {
|
||||
/**
|
||||
* Extract properties with proper handling of n8n's complex structures
|
||||
*/
|
||||
extractProperties(nodeClass: any): any[] {
|
||||
extractProperties(nodeClass: NodeClass): any[] {
|
||||
const properties: any[] = [];
|
||||
|
||||
// First try to get instance-level properties
|
||||
@@ -15,12 +17,16 @@ export class PropertyExtractor {
|
||||
|
||||
// Handle versioned nodes - check instance for nodeVersions
|
||||
if (instance?.nodeVersions) {
|
||||
const versions = Object.keys(instance.nodeVersions);
|
||||
const latestVersion = Math.max(...versions.map(Number));
|
||||
const versionedNode = instance.nodeVersions[latestVersion];
|
||||
|
||||
if (versionedNode?.description?.properties) {
|
||||
return this.normalizeProperties(versionedNode.description.properties);
|
||||
const versions = Object.keys(instance.nodeVersions).map(Number);
|
||||
if (versions.length > 0) {
|
||||
const latestVersion = Math.max(...versions);
|
||||
if (!isNaN(latestVersion)) {
|
||||
const versionedNode = instance.nodeVersions[latestVersion];
|
||||
|
||||
if (versionedNode?.description?.properties) {
|
||||
return this.normalizeProperties(versionedNode.description.properties);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,30 +41,36 @@ export class PropertyExtractor {
|
||||
return properties;
|
||||
}
|
||||
|
||||
private getNodeDescription(nodeClass: any): any {
|
||||
private getNodeDescription(nodeClass: NodeClass): any {
|
||||
// Try to get description from the class first
|
||||
let description: any;
|
||||
|
||||
|
||||
if (typeof nodeClass === 'function') {
|
||||
// Try to instantiate to get description
|
||||
try {
|
||||
const instance = new nodeClass();
|
||||
description = instance.description || instance.baseDescription || {};
|
||||
// Strategic any assertion for instance properties
|
||||
const inst = instance as any;
|
||||
description = inst.description || inst.baseDescription || {};
|
||||
} catch (e) {
|
||||
// Some nodes might require parameters to instantiate
|
||||
description = nodeClass.description || {};
|
||||
// Strategic any assertion for class-level properties
|
||||
const nodeClassAny = nodeClass as any;
|
||||
description = nodeClassAny.description || {};
|
||||
}
|
||||
} else {
|
||||
description = nodeClass.description || {};
|
||||
// Strategic any assertion for instance properties
|
||||
const inst = nodeClass as any;
|
||||
description = inst.description || {};
|
||||
}
|
||||
|
||||
|
||||
return description;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract operations from both declarative and programmatic nodes
|
||||
*/
|
||||
extractOperations(nodeClass: any): any[] {
|
||||
extractOperations(nodeClass: NodeClass): any[] {
|
||||
const operations: any[] = [];
|
||||
|
||||
// First try to get instance-level data
|
||||
@@ -71,12 +83,16 @@ export class PropertyExtractor {
|
||||
|
||||
// Handle versioned nodes
|
||||
if (instance?.nodeVersions) {
|
||||
const versions = Object.keys(instance.nodeVersions);
|
||||
const latestVersion = Math.max(...versions.map(Number));
|
||||
const versionedNode = instance.nodeVersions[latestVersion];
|
||||
|
||||
if (versionedNode?.description) {
|
||||
return this.extractOperationsFromDescription(versionedNode.description);
|
||||
const versions = Object.keys(instance.nodeVersions).map(Number);
|
||||
if (versions.length > 0) {
|
||||
const latestVersion = Math.max(...versions);
|
||||
if (!isNaN(latestVersion)) {
|
||||
const versionedNode = instance.nodeVersions[latestVersion];
|
||||
|
||||
if (versionedNode?.description) {
|
||||
return this.extractOperationsFromDescription(versionedNode.description);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,33 +154,35 @@ export class PropertyExtractor {
|
||||
/**
|
||||
* Deep search for AI tool capability
|
||||
*/
|
||||
detectAIToolCapability(nodeClass: any): boolean {
|
||||
detectAIToolCapability(nodeClass: NodeClass): boolean {
|
||||
const description = this.getNodeDescription(nodeClass);
|
||||
|
||||
|
||||
// Direct property check
|
||||
if (description?.usableAsTool === true) return true;
|
||||
|
||||
|
||||
// Check in actions for declarative nodes
|
||||
if (description?.actions?.some((a: any) => a.usableAsTool === true)) return true;
|
||||
|
||||
|
||||
// Check versioned nodes
|
||||
if (nodeClass.nodeVersions) {
|
||||
for (const version of Object.values(nodeClass.nodeVersions)) {
|
||||
// Strategic any assertion for nodeVersions property
|
||||
const nodeClassAny = nodeClass as any;
|
||||
if (nodeClassAny.nodeVersions) {
|
||||
for (const version of Object.values(nodeClassAny.nodeVersions)) {
|
||||
if ((version as any).description?.usableAsTool === true) return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Check for specific AI-related properties
|
||||
const aiIndicators = ['openai', 'anthropic', 'huggingface', 'cohere', 'ai'];
|
||||
const nodeName = description?.name?.toLowerCase() || '';
|
||||
|
||||
|
||||
return aiIndicators.some(indicator => nodeName.includes(indicator));
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract credential requirements with proper structure
|
||||
*/
|
||||
extractCredentials(nodeClass: any): any[] {
|
||||
extractCredentials(nodeClass: NodeClass): any[] {
|
||||
const credentials: any[] = [];
|
||||
|
||||
// First try to get instance-level data
|
||||
@@ -177,12 +195,16 @@ export class PropertyExtractor {
|
||||
|
||||
// Handle versioned nodes
|
||||
if (instance?.nodeVersions) {
|
||||
const versions = Object.keys(instance.nodeVersions);
|
||||
const latestVersion = Math.max(...versions.map(Number));
|
||||
const versionedNode = instance.nodeVersions[latestVersion];
|
||||
|
||||
if (versionedNode?.description?.credentials) {
|
||||
return versionedNode.description.credentials;
|
||||
const versions = Object.keys(instance.nodeVersions).map(Number);
|
||||
if (versions.length > 0) {
|
||||
const latestVersion = Math.max(...versions);
|
||||
if (!isNaN(latestVersion)) {
|
||||
const versionedNode = instance.nodeVersions[latestVersion];
|
||||
|
||||
if (versionedNode?.description?.credentials) {
|
||||
return versionedNode.description.credentials;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -209,6 +231,7 @@ export class PropertyExtractor {
|
||||
required: prop.required,
|
||||
displayOptions: prop.displayOptions,
|
||||
typeOptions: prop.typeOptions,
|
||||
modes: prop.modes, // For resourceLocator type properties - modes are at top level
|
||||
noDataExpression: prop.noDataExpression
|
||||
}));
|
||||
}
|
||||
|
||||
@@ -1,3 +1,13 @@
|
||||
import type {
|
||||
NodeClass,
|
||||
VersionedNodeInstance
|
||||
} from '../types/node-types';
|
||||
import {
|
||||
isVersionedNodeInstance,
|
||||
isVersionedNodeClass
|
||||
} from '../types/node-types';
|
||||
import type { INodeTypeBaseDescription, INodeTypeDescription } from 'n8n-workflow';
|
||||
|
||||
export interface ParsedNode {
|
||||
style: 'declarative' | 'programmatic';
|
||||
nodeType: string;
|
||||
@@ -15,24 +25,32 @@ export interface ParsedNode {
|
||||
}
|
||||
|
||||
export class SimpleParser {
|
||||
parse(nodeClass: any): ParsedNode {
|
||||
let description: any;
|
||||
parse(nodeClass: NodeClass): ParsedNode {
|
||||
let description: INodeTypeBaseDescription | INodeTypeDescription;
|
||||
let isVersioned = false;
|
||||
|
||||
|
||||
// Try to get description from the class
|
||||
try {
|
||||
// Check if it's a versioned node (has baseDescription and nodeVersions)
|
||||
if (typeof nodeClass === 'function' && nodeClass.prototype &&
|
||||
nodeClass.prototype.constructor &&
|
||||
nodeClass.prototype.constructor.name === 'VersionedNodeType') {
|
||||
// Check if it's a versioned node using type guard
|
||||
if (isVersionedNodeClass(nodeClass)) {
|
||||
// This is a VersionedNodeType class - instantiate it
|
||||
const instance = new nodeClass();
|
||||
description = instance.baseDescription || {};
|
||||
const instance = new (nodeClass as new () => VersionedNodeInstance)();
|
||||
// Strategic any assertion for accessing both description and baseDescription
|
||||
const inst = instance as any;
|
||||
// Try description first (real VersionedNodeType with getter)
|
||||
// Only fallback to baseDescription if nodeVersions exists (complete VersionedNodeType mock)
|
||||
// This prevents using baseDescription for incomplete mocks that test edge cases
|
||||
description = inst.description || (inst.nodeVersions ? inst.baseDescription : undefined);
|
||||
|
||||
// If still undefined (incomplete mock), use empty object to allow graceful failure later
|
||||
if (!description) {
|
||||
description = {} as any;
|
||||
}
|
||||
isVersioned = true;
|
||||
|
||||
|
||||
// For versioned nodes, try to get properties from the current version
|
||||
if (instance.nodeVersions && instance.currentVersion) {
|
||||
const currentVersionNode = instance.nodeVersions[instance.currentVersion];
|
||||
if (inst.nodeVersions && inst.currentVersion) {
|
||||
const currentVersionNode = inst.nodeVersions[inst.currentVersion];
|
||||
if (currentVersionNode && currentVersionNode.description) {
|
||||
// Merge baseDescription with version-specific description
|
||||
description = { ...description, ...currentVersionNode.description };
|
||||
@@ -42,63 +60,76 @@ export class SimpleParser {
|
||||
// Try to instantiate to get description
|
||||
try {
|
||||
const instance = new nodeClass();
|
||||
description = instance.description || {};
|
||||
|
||||
// For versioned nodes, we might need to look deeper
|
||||
if (!description.name && instance.baseDescription) {
|
||||
description = instance.baseDescription;
|
||||
isVersioned = true;
|
||||
description = instance.description;
|
||||
// If description is empty or missing name, check for baseDescription fallback
|
||||
if (!description || !description.name) {
|
||||
const inst = instance as any;
|
||||
if (inst.baseDescription?.name) {
|
||||
description = inst.baseDescription;
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
// Some nodes might require parameters to instantiate
|
||||
// Try to access static properties or look for common patterns
|
||||
description = {};
|
||||
description = {} as any;
|
||||
}
|
||||
} else {
|
||||
// Maybe it's already an instance
|
||||
description = nodeClass.description || {};
|
||||
description = nodeClass.description;
|
||||
// If description is empty or missing name, check for baseDescription fallback
|
||||
if (!description || !description.name) {
|
||||
const inst = nodeClass as any;
|
||||
if (inst.baseDescription?.name) {
|
||||
description = inst.baseDescription;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// If instantiation fails, try to get static description
|
||||
description = nodeClass.description || {};
|
||||
description = (nodeClass as any).description || ({} as any);
|
||||
}
|
||||
|
||||
const isDeclarative = !!description.routing;
|
||||
|
||||
|
||||
// Strategic any assertion for properties that don't exist on both union sides
|
||||
const desc = description as any;
|
||||
const isDeclarative = !!desc.routing;
|
||||
|
||||
// Ensure we have a valid nodeType
|
||||
if (!description.name) {
|
||||
throw new Error('Node is missing name property');
|
||||
}
|
||||
|
||||
|
||||
return {
|
||||
style: isDeclarative ? 'declarative' : 'programmatic',
|
||||
nodeType: description.name,
|
||||
displayName: description.displayName || description.name,
|
||||
description: description.description,
|
||||
category: description.group?.[0] || description.categories?.[0],
|
||||
properties: description.properties || [],
|
||||
credentials: description.credentials || [],
|
||||
isAITool: description.usableAsTool === true,
|
||||
category: description.group?.[0] || desc.categories?.[0],
|
||||
properties: desc.properties || [],
|
||||
credentials: desc.credentials || [],
|
||||
isAITool: desc.usableAsTool === true,
|
||||
isTrigger: this.detectTrigger(description),
|
||||
isWebhook: description.webhooks?.length > 0,
|
||||
operations: isDeclarative ? this.extractOperations(description.routing) : this.extractProgrammaticOperations(description),
|
||||
isWebhook: desc.webhooks?.length > 0,
|
||||
operations: isDeclarative ? this.extractOperations(desc.routing) : this.extractProgrammaticOperations(desc),
|
||||
version: this.extractVersion(nodeClass),
|
||||
isVersioned: isVersioned || this.isVersionedNode(nodeClass) || Array.isArray(description.version) || description.defaultVersion !== undefined
|
||||
isVersioned: isVersioned || this.isVersionedNode(nodeClass) || Array.isArray(desc.version) || desc.defaultVersion !== undefined
|
||||
};
|
||||
}
|
||||
|
||||
private detectTrigger(description: any): boolean {
|
||||
private detectTrigger(description: INodeTypeBaseDescription | INodeTypeDescription): boolean {
|
||||
// Primary check: group includes 'trigger'
|
||||
if (description.group && Array.isArray(description.group)) {
|
||||
if (description.group.includes('trigger')) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Strategic any assertion for properties that only exist on INodeTypeDescription
|
||||
const desc = description as any;
|
||||
|
||||
// Fallback checks for edge cases
|
||||
return description.polling === true ||
|
||||
description.trigger === true ||
|
||||
description.eventTrigger === true ||
|
||||
return desc.polling === true ||
|
||||
desc.trigger === true ||
|
||||
desc.eventTrigger === true ||
|
||||
description.name?.toLowerCase().includes('trigger');
|
||||
}
|
||||
|
||||
@@ -186,48 +217,109 @@ export class SimpleParser {
|
||||
return operations;
|
||||
}
|
||||
|
||||
private extractVersion(nodeClass: any): string {
|
||||
/**
|
||||
* Extracts the version from a node class.
|
||||
*
|
||||
* Priority Chain (same as node-parser.ts):
|
||||
* 1. Instance currentVersion (VersionedNodeType's computed property)
|
||||
* 2. Instance description.defaultVersion (explicit default)
|
||||
* 3. Instance nodeVersions (fallback to max available version)
|
||||
* 4. Instance description.version (simple versioning)
|
||||
* 5. Class-level properties (if instantiation fails)
|
||||
* 6. Default to "1"
|
||||
*
|
||||
* Critical Fix (v2.17.4): Removed check for non-existent instance.baseDescription.defaultVersion
|
||||
* which caused AI Agent and other VersionedNodeType nodes to return wrong versions.
|
||||
*
|
||||
* @param nodeClass - The node class or instance to extract version from
|
||||
* @returns The version as a string
|
||||
*/
|
||||
private extractVersion(nodeClass: NodeClass): string {
|
||||
// Try to get version from instance first
|
||||
try {
|
||||
const instance = typeof nodeClass === 'function' ? new nodeClass() : nodeClass;
|
||||
|
||||
// Check instance baseDescription
|
||||
if (instance?.baseDescription?.defaultVersion) {
|
||||
return instance.baseDescription.defaultVersion.toString();
|
||||
// Strategic any assertion for instance properties
|
||||
const inst = instance as any;
|
||||
|
||||
// PRIORITY 1: Check currentVersion (what VersionedNodeType actually uses)
|
||||
// For VersionedNodeType, currentVersion = defaultVersion ?? max(nodeVersions)
|
||||
if (inst?.currentVersion !== undefined) {
|
||||
return inst.currentVersion.toString();
|
||||
}
|
||||
|
||||
// Check instance description version
|
||||
if (instance?.description?.version) {
|
||||
return instance.description.version.toString();
|
||||
|
||||
// PRIORITY 2: Handle instance-level description.defaultVersion
|
||||
// VersionedNodeType stores baseDescription as 'description', not 'baseDescription'
|
||||
if (inst?.description?.defaultVersion) {
|
||||
return inst.description.defaultVersion.toString();
|
||||
}
|
||||
|
||||
// PRIORITY 3: Handle instance-level nodeVersions (fallback to max)
|
||||
if (inst?.nodeVersions) {
|
||||
const versions = Object.keys(inst.nodeVersions).map(Number);
|
||||
if (versions.length > 0) {
|
||||
const maxVersion = Math.max(...versions);
|
||||
if (!isNaN(maxVersion)) {
|
||||
return maxVersion.toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PRIORITY 4: Check instance description version
|
||||
if (inst?.description?.version) {
|
||||
return inst.description.version.toString();
|
||||
}
|
||||
} catch (e) {
|
||||
// Ignore instantiation errors
|
||||
}
|
||||
|
||||
// Check class-level properties
|
||||
if (nodeClass.baseDescription?.defaultVersion) {
|
||||
return nodeClass.baseDescription.defaultVersion.toString();
|
||||
|
||||
// PRIORITY 5: Check class-level properties (if instantiation failed)
|
||||
// Strategic any assertion for class-level properties
|
||||
const nodeClassAny = nodeClass as any;
|
||||
if (nodeClassAny.description?.defaultVersion) {
|
||||
return nodeClassAny.description.defaultVersion.toString();
|
||||
}
|
||||
|
||||
return nodeClass.description?.version || '1';
|
||||
|
||||
if (nodeClassAny.nodeVersions) {
|
||||
const versions = Object.keys(nodeClassAny.nodeVersions).map(Number);
|
||||
if (versions.length > 0) {
|
||||
const maxVersion = Math.max(...versions);
|
||||
if (!isNaN(maxVersion)) {
|
||||
return maxVersion.toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PRIORITY 6: Default to version 1
|
||||
return nodeClassAny.description?.version || '1';
|
||||
}
|
||||
|
||||
private isVersionedNode(nodeClass: any): boolean {
|
||||
// Check for VersionedNodeType pattern
|
||||
if (nodeClass.baseDescription && nodeClass.nodeVersions) {
|
||||
private isVersionedNode(nodeClass: NodeClass): boolean {
|
||||
// Strategic any assertion for class-level properties
|
||||
const nodeClassAny = nodeClass as any;
|
||||
|
||||
// Check for VersionedNodeType pattern at class level
|
||||
if (nodeClassAny.baseDescription && nodeClassAny.nodeVersions) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Check for inline versioning pattern (like Code node)
|
||||
try {
|
||||
const instance = typeof nodeClass === 'function' ? new nodeClass() : nodeClass;
|
||||
const description = instance.description || {};
|
||||
|
||||
// Strategic any assertion for instance properties
|
||||
const inst = instance as any;
|
||||
|
||||
// Check for VersionedNodeType pattern at instance level
|
||||
if (inst.baseDescription && inst.nodeVersions) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const description = inst.description || {};
|
||||
|
||||
// If version is an array, it's versioned
|
||||
if (Array.isArray(description.version)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// If it has defaultVersion, it's likely versioned
|
||||
if (description.defaultVersion !== undefined) {
|
||||
return true;
|
||||
@@ -235,7 +327,7 @@ export class SimpleParser {
|
||||
} catch (e) {
|
||||
// Ignore instantiation errors
|
||||
}
|
||||
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -417,12 +417,28 @@ async function generateTemplateMetadata(db: any, service: TemplateService) {
|
||||
} catch (error) {
|
||||
console.warn(`Failed to parse workflow for template ${t.id}:`, error);
|
||||
}
|
||||
|
||||
|
||||
// Parse nodes_used safely
|
||||
let nodes: string[] = [];
|
||||
try {
|
||||
if (t.nodes_used) {
|
||||
nodes = JSON.parse(t.nodes_used);
|
||||
// Ensure it's an array
|
||||
if (!Array.isArray(nodes)) {
|
||||
console.warn(`Template ${t.id} has invalid nodes_used (not an array), using empty array`);
|
||||
nodes = [];
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn(`Failed to parse nodes_used for template ${t.id}:`, error);
|
||||
nodes = [];
|
||||
}
|
||||
|
||||
return {
|
||||
templateId: t.id,
|
||||
name: t.name,
|
||||
description: t.description,
|
||||
nodes: JSON.parse(t.nodes_used),
|
||||
nodes: nodes,
|
||||
workflow
|
||||
};
|
||||
});
|
||||
|
||||
@@ -167,29 +167,81 @@ async function rebuild() {
|
||||
|
||||
function validateDatabase(repository: NodeRepository): { passed: boolean; issues: string[] } {
|
||||
const issues = [];
|
||||
|
||||
// Check critical nodes
|
||||
const criticalNodes = ['nodes-base.httpRequest', 'nodes-base.code', 'nodes-base.webhook', 'nodes-base.slack'];
|
||||
|
||||
for (const nodeType of criticalNodes) {
|
||||
const node = repository.getNode(nodeType);
|
||||
|
||||
if (!node) {
|
||||
issues.push(`Critical node ${nodeType} not found`);
|
||||
continue;
|
||||
|
||||
try {
|
||||
const db = (repository as any).db;
|
||||
|
||||
// CRITICAL: Check if database has any nodes at all
|
||||
const nodeCount = db.prepare('SELECT COUNT(*) as count FROM nodes').get() as { count: number };
|
||||
if (nodeCount.count === 0) {
|
||||
issues.push('CRITICAL: Database is empty - no nodes found! Rebuild failed or was interrupted.');
|
||||
return { passed: false, issues };
|
||||
}
|
||||
|
||||
if (node.properties.length === 0) {
|
||||
issues.push(`Node ${nodeType} has no properties`);
|
||||
|
||||
// Check minimum expected node count (should have at least 500 nodes from both packages)
|
||||
if (nodeCount.count < 500) {
|
||||
issues.push(`WARNING: Only ${nodeCount.count} nodes found - expected at least 500 (both n8n packages)`);
|
||||
}
|
||||
|
||||
// Check critical nodes
|
||||
const criticalNodes = ['nodes-base.httpRequest', 'nodes-base.code', 'nodes-base.webhook', 'nodes-base.slack'];
|
||||
|
||||
for (const nodeType of criticalNodes) {
|
||||
const node = repository.getNode(nodeType);
|
||||
|
||||
if (!node) {
|
||||
issues.push(`Critical node ${nodeType} not found`);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (node.properties.length === 0) {
|
||||
issues.push(`Node ${nodeType} has no properties`);
|
||||
}
|
||||
}
|
||||
|
||||
// Check AI tools
|
||||
const aiTools = repository.getAITools();
|
||||
if (aiTools.length === 0) {
|
||||
issues.push('No AI tools found - check detection logic');
|
||||
}
|
||||
|
||||
// Check FTS5 table existence and population
|
||||
const ftsTableCheck = db.prepare(`
|
||||
SELECT name FROM sqlite_master
|
||||
WHERE type='table' AND name='nodes_fts'
|
||||
`).get();
|
||||
|
||||
if (!ftsTableCheck) {
|
||||
issues.push('CRITICAL: FTS5 table (nodes_fts) does not exist - searches will fail or be very slow');
|
||||
} else {
|
||||
// Check if FTS5 table is properly populated
|
||||
const ftsCount = db.prepare('SELECT COUNT(*) as count FROM nodes_fts').get() as { count: number };
|
||||
|
||||
if (ftsCount.count === 0) {
|
||||
issues.push('CRITICAL: FTS5 index is empty - searches will return zero results');
|
||||
} else if (nodeCount.count !== ftsCount.count) {
|
||||
issues.push(`FTS5 index out of sync: ${nodeCount.count} nodes but ${ftsCount.count} FTS5 entries`);
|
||||
}
|
||||
|
||||
// Verify critical nodes are searchable via FTS5
|
||||
const searchableNodes = ['webhook', 'merge', 'split'];
|
||||
for (const searchTerm of searchableNodes) {
|
||||
const searchResult = db.prepare(`
|
||||
SELECT COUNT(*) as count FROM nodes_fts
|
||||
WHERE nodes_fts MATCH ?
|
||||
`).get(searchTerm);
|
||||
|
||||
if (searchResult.count === 0) {
|
||||
issues.push(`CRITICAL: Search for "${searchTerm}" returns zero results in FTS5 index`);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Catch any validation errors
|
||||
const errorMessage = (error as Error).message;
|
||||
issues.push(`Validation error: ${errorMessage}`);
|
||||
}
|
||||
|
||||
// Check AI tools
|
||||
const aiTools = repository.getAITools();
|
||||
if (aiTools.length === 0) {
|
||||
issues.push('No AI tools found - check detection logic');
|
||||
}
|
||||
|
||||
|
||||
return {
|
||||
passed: issues.length === 0,
|
||||
issues
|
||||
|
||||
161
src/scripts/seed-canonical-ai-examples.ts
Normal file
161
src/scripts/seed-canonical-ai-examples.ts
Normal file
@@ -0,0 +1,161 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Seed canonical AI tool examples into the database
|
||||
*
|
||||
* These hand-crafted examples demonstrate best practices for critical AI tools
|
||||
* that are missing from the template database.
|
||||
*/
|
||||
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { createDatabaseAdapter } from '../database/database-adapter';
|
||||
import { logger } from '../utils/logger';
|
||||
|
||||
interface CanonicalExample {
|
||||
name: string;
|
||||
use_case: string;
|
||||
complexity: 'simple' | 'medium' | 'complex';
|
||||
parameters: Record<string, any>;
|
||||
credentials?: Record<string, any>;
|
||||
connections?: Record<string, any>;
|
||||
notes: string;
|
||||
}
|
||||
|
||||
interface CanonicalToolExamples {
|
||||
node_type: string;
|
||||
display_name: string;
|
||||
examples: CanonicalExample[];
|
||||
}
|
||||
|
||||
interface CanonicalExamplesFile {
|
||||
description: string;
|
||||
version: string;
|
||||
examples: CanonicalToolExamples[];
|
||||
}
|
||||
|
||||
async function seedCanonicalExamples() {
|
||||
try {
|
||||
// Load canonical examples file
|
||||
const examplesPath = path.join(__dirname, '../data/canonical-ai-tool-examples.json');
|
||||
const examplesData = fs.readFileSync(examplesPath, 'utf-8');
|
||||
const canonicalExamples: CanonicalExamplesFile = JSON.parse(examplesData);
|
||||
|
||||
logger.info('Loading canonical AI tool examples', {
|
||||
version: canonicalExamples.version,
|
||||
tools: canonicalExamples.examples.length
|
||||
});
|
||||
|
||||
// Initialize database
|
||||
const db = await createDatabaseAdapter('./data/nodes.db');
|
||||
|
||||
// First, ensure we have placeholder templates for canonical examples
|
||||
const templateStmt = db.prepare(`
|
||||
INSERT OR IGNORE INTO templates (
|
||||
id,
|
||||
workflow_id,
|
||||
name,
|
||||
description,
|
||||
views,
|
||||
created_at,
|
||||
updated_at
|
||||
) VALUES (?, ?, ?, ?, ?, datetime('now'), datetime('now'))
|
||||
`);
|
||||
|
||||
// Create one placeholder template for canonical examples
|
||||
const canonicalTemplateId = -1000;
|
||||
templateStmt.run(
|
||||
canonicalTemplateId,
|
||||
canonicalTemplateId, // workflow_id must be unique
|
||||
'Canonical AI Tool Examples',
|
||||
'Hand-crafted examples demonstrating best practices for AI tools',
|
||||
99999 // High view count
|
||||
);
|
||||
|
||||
// Prepare insert statement for node configs
|
||||
const stmt = db.prepare(`
|
||||
INSERT OR REPLACE INTO template_node_configs (
|
||||
node_type,
|
||||
template_id,
|
||||
template_name,
|
||||
template_views,
|
||||
node_name,
|
||||
parameters_json,
|
||||
credentials_json,
|
||||
has_credentials,
|
||||
has_expressions,
|
||||
complexity,
|
||||
use_cases
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`);
|
||||
|
||||
let totalInserted = 0;
|
||||
|
||||
// Seed each tool's examples
|
||||
for (const toolExamples of canonicalExamples.examples) {
|
||||
const { node_type, display_name, examples } = toolExamples;
|
||||
|
||||
logger.info(`Seeding examples for ${display_name}`, {
|
||||
nodeType: node_type,
|
||||
exampleCount: examples.length
|
||||
});
|
||||
|
||||
for (let i = 0; i < examples.length; i++) {
|
||||
const example = examples[i];
|
||||
|
||||
// All canonical examples use the same template ID
|
||||
const templateId = canonicalTemplateId;
|
||||
const templateName = `Canonical: ${display_name} - ${example.name}`;
|
||||
|
||||
// Check for expressions in parameters
|
||||
const paramsStr = JSON.stringify(example.parameters);
|
||||
const hasExpressions = paramsStr.includes('={{') || paramsStr.includes('$json') || paramsStr.includes('$node') ? 1 : 0;
|
||||
|
||||
// Insert into database
|
||||
stmt.run(
|
||||
node_type,
|
||||
templateId,
|
||||
templateName,
|
||||
99999, // High view count for canonical examples
|
||||
example.name,
|
||||
JSON.stringify(example.parameters),
|
||||
example.credentials ? JSON.stringify(example.credentials) : null,
|
||||
example.credentials ? 1 : 0,
|
||||
hasExpressions,
|
||||
example.complexity,
|
||||
example.use_case
|
||||
);
|
||||
|
||||
totalInserted++;
|
||||
logger.info(` ✓ Seeded: ${example.name}`, {
|
||||
complexity: example.complexity,
|
||||
hasCredentials: !!example.credentials,
|
||||
hasExpressions: hasExpressions === 1
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
db.close();
|
||||
|
||||
logger.info('Canonical examples seeding complete', {
|
||||
totalExamples: totalInserted,
|
||||
tools: canonicalExamples.examples.length
|
||||
});
|
||||
|
||||
console.log('\n✅ Successfully seeded', totalInserted, 'canonical AI tool examples');
|
||||
console.log('\nExamples are now available via:');
|
||||
console.log(' • search_nodes({query: "HTTP Request Tool", includeExamples: true})');
|
||||
console.log(' • get_node_essentials({nodeType: "nodes-langchain.toolCode", includeExamples: true})');
|
||||
|
||||
} catch (error) {
|
||||
logger.error('Failed to seed canonical examples', { error });
|
||||
console.error('❌ Error:', error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Run if called directly
|
||||
if (require.main === module) {
|
||||
seedCanonicalExamples().catch(console.error);
|
||||
}
|
||||
|
||||
export { seedCanonicalExamples };
|
||||
@@ -164,7 +164,7 @@ async function testAutofix() {
|
||||
// Step 3: Generate fixes in preview mode
|
||||
logger.info('\nStep 3: Generating fixes (preview mode)...');
|
||||
const autoFixer = new WorkflowAutoFixer();
|
||||
const previewResult = autoFixer.generateFixes(
|
||||
const previewResult = await autoFixer.generateFixes(
|
||||
testWorkflow as any,
|
||||
validationResult,
|
||||
allFormatIssues,
|
||||
@@ -210,7 +210,7 @@ async function testAutofix() {
|
||||
logger.info('\n\n=== Testing Different Confidence Thresholds ===');
|
||||
|
||||
for (const threshold of ['high', 'medium', 'low'] as const) {
|
||||
const result = autoFixer.generateFixes(
|
||||
const result = await autoFixer.generateFixes(
|
||||
testWorkflow as any,
|
||||
validationResult,
|
||||
allFormatIssues,
|
||||
@@ -227,7 +227,7 @@ async function testAutofix() {
|
||||
|
||||
const fixTypes = ['expression-format', 'typeversion-correction', 'error-output-config'] as const;
|
||||
for (const fixType of fixTypes) {
|
||||
const result = autoFixer.generateFixes(
|
||||
const result = await autoFixer.generateFixes(
|
||||
testWorkflow as any,
|
||||
validationResult,
|
||||
allFormatIssues,
|
||||
|
||||
@@ -173,7 +173,7 @@ async function testNodeSimilarity() {
|
||||
console.log('='.repeat(60));
|
||||
|
||||
const autoFixer = new WorkflowAutoFixer(repository);
|
||||
const fixResult = autoFixer.generateFixes(
|
||||
const fixResult = await autoFixer.generateFixes(
|
||||
testWorkflow as any,
|
||||
validationResult,
|
||||
[],
|
||||
|
||||
@@ -87,7 +87,7 @@ async function testWebhookAutofix() {
|
||||
// Step 2: Generate fixes (preview mode)
|
||||
logger.info('\nStep 2: Generating fixes in preview mode...');
|
||||
|
||||
const fixResult = autoFixer.generateFixes(
|
||||
const fixResult = await autoFixer.generateFixes(
|
||||
testWorkflow,
|
||||
validationResult,
|
||||
[], // No expression format issues to pass
|
||||
|
||||
634
src/services/ai-node-validator.ts
Normal file
634
src/services/ai-node-validator.ts
Normal file
@@ -0,0 +1,634 @@
|
||||
/**
|
||||
* AI Node Validator
|
||||
*
|
||||
* Implements validation logic for AI Agent, Chat Trigger, and Basic LLM Chain nodes
|
||||
* from docs/FINAL_AI_VALIDATION_SPEC.md
|
||||
*
|
||||
* Key Features:
|
||||
* - Reverse connection mapping (AI connections flow TO the consumer)
|
||||
* - AI Agent comprehensive validation (prompt types, fallback models, streaming mode)
|
||||
* - Chat Trigger validation (streaming mode constraints)
|
||||
* - Integration with AI tool validators
|
||||
*/
|
||||
|
||||
import { NodeTypeNormalizer } from '../utils/node-type-normalizer';
|
||||
import {
|
||||
WorkflowNode,
|
||||
WorkflowJson,
|
||||
ReverseConnection,
|
||||
ValidationIssue,
|
||||
isAIToolSubNode,
|
||||
validateAIToolSubNode
|
||||
} from './ai-tool-validators';
|
||||
|
||||
// Re-export types for test files
|
||||
export type {
|
||||
WorkflowNode,
|
||||
WorkflowJson,
|
||||
ReverseConnection,
|
||||
ValidationIssue
|
||||
} from './ai-tool-validators';
|
||||
|
||||
// Validation constants
|
||||
const MIN_SYSTEM_MESSAGE_LENGTH = 20;
|
||||
const MAX_ITERATIONS_WARNING_THRESHOLD = 50;
|
||||
|
||||
/**
|
||||
* AI Connection Types
|
||||
* From spec lines 551-596
|
||||
*/
|
||||
export const AI_CONNECTION_TYPES = [
|
||||
'ai_languageModel',
|
||||
'ai_memory',
|
||||
'ai_tool',
|
||||
'ai_embedding',
|
||||
'ai_vectorStore',
|
||||
'ai_document',
|
||||
'ai_textSplitter',
|
||||
'ai_outputParser'
|
||||
] as const;
|
||||
|
||||
/**
|
||||
* Build Reverse Connection Map
|
||||
*
|
||||
* CRITICAL: AI connections flow TO the consumer node (reversed from standard n8n pattern)
|
||||
* This utility maps which nodes connect TO each node, essential for AI validation.
|
||||
*
|
||||
* From spec lines 551-596
|
||||
*
|
||||
* @example
|
||||
* Standard n8n: [Source] --main--> [Target]
|
||||
* workflow.connections["Source"]["main"] = [[{node: "Target", ...}]]
|
||||
*
|
||||
* AI pattern: [Language Model] --ai_languageModel--> [AI Agent]
|
||||
* workflow.connections["Language Model"]["ai_languageModel"] = [[{node: "AI Agent", ...}]]
|
||||
*
|
||||
* Reverse map: reverseMap.get("AI Agent") = [{sourceName: "Language Model", type: "ai_languageModel", ...}]
|
||||
*/
|
||||
export function buildReverseConnectionMap(
|
||||
workflow: WorkflowJson
|
||||
): Map<string, ReverseConnection[]> {
|
||||
const map = new Map<string, ReverseConnection[]>();
|
||||
|
||||
// Iterate through all connections
|
||||
for (const [sourceName, outputs] of Object.entries(workflow.connections)) {
|
||||
// Validate source name is not empty
|
||||
if (!sourceName || typeof sourceName !== 'string' || sourceName.trim() === '') {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!outputs || typeof outputs !== 'object') continue;
|
||||
|
||||
// Iterate through all output types (main, error, ai_tool, ai_languageModel, etc.)
|
||||
for (const [outputType, connections] of Object.entries(outputs)) {
|
||||
if (!Array.isArray(connections)) continue;
|
||||
|
||||
// Flatten nested arrays and process each connection
|
||||
const connArray = connections.flat().filter(c => c);
|
||||
|
||||
for (const conn of connArray) {
|
||||
if (!conn || !conn.node) continue;
|
||||
|
||||
// Validate target node name is not empty
|
||||
if (typeof conn.node !== 'string' || conn.node.trim() === '') {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Initialize array for target node if not exists
|
||||
if (!map.has(conn.node)) {
|
||||
map.set(conn.node, []);
|
||||
}
|
||||
|
||||
// Add reverse connection entry
|
||||
map.get(conn.node)!.push({
|
||||
sourceName: sourceName,
|
||||
sourceType: outputType,
|
||||
type: outputType,
|
||||
index: conn.index ?? 0
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get AI connections TO a specific node
|
||||
*/
|
||||
export function getAIConnections(
|
||||
nodeName: string,
|
||||
reverseConnections: Map<string, ReverseConnection[]>,
|
||||
connectionType?: string
|
||||
): ReverseConnection[] {
|
||||
const incoming = reverseConnections.get(nodeName) || [];
|
||||
|
||||
if (connectionType) {
|
||||
return incoming.filter(c => c.type === connectionType);
|
||||
}
|
||||
|
||||
return incoming.filter(c => AI_CONNECTION_TYPES.includes(c.type as any));
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate AI Agent Node
|
||||
* From spec lines 3-549
|
||||
*
|
||||
* Validates:
|
||||
* - Language model connections (1 or 2 if fallback)
|
||||
* - Output parser connection + hasOutputParser flag
|
||||
* - Prompt type configuration (auto vs define)
|
||||
* - System message recommendations
|
||||
* - Streaming mode constraints (CRITICAL)
|
||||
* - Memory connections (0-1)
|
||||
* - Tool connections
|
||||
* - maxIterations validation
|
||||
*/
|
||||
export function validateAIAgent(
|
||||
node: WorkflowNode,
|
||||
reverseConnections: Map<string, ReverseConnection[]>,
|
||||
workflow: WorkflowJson
|
||||
): ValidationIssue[] {
|
||||
const issues: ValidationIssue[] = [];
|
||||
const incoming = reverseConnections.get(node.name) || [];
|
||||
|
||||
// 1. Validate language model connections (REQUIRED: 1 or 2 if fallback)
|
||||
const languageModelConnections = incoming.filter(c => c.type === 'ai_languageModel');
|
||||
|
||||
if (languageModelConnections.length === 0) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent "${node.name}" requires an ai_languageModel connection. Connect a language model node (e.g., OpenAI Chat Model, Anthropic Chat Model).`,
|
||||
code: 'MISSING_LANGUAGE_MODEL'
|
||||
});
|
||||
} else if (languageModelConnections.length > 2) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent "${node.name}" has ${languageModelConnections.length} ai_languageModel connections. Maximum is 2 (for fallback model support).`,
|
||||
code: 'TOO_MANY_LANGUAGE_MODELS'
|
||||
});
|
||||
} else if (languageModelConnections.length === 2) {
|
||||
// Check if fallback is enabled
|
||||
if (!node.parameters.needsFallback) {
|
||||
issues.push({
|
||||
severity: 'warning',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent "${node.name}" has 2 language models but needsFallback is not enabled. Set needsFallback=true or remove the second model.`
|
||||
});
|
||||
}
|
||||
} else if (languageModelConnections.length === 1 && node.parameters.needsFallback === true) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent "${node.name}" has needsFallback=true but only 1 language model connected. Connect a second model for fallback or disable needsFallback.`,
|
||||
code: 'FALLBACK_MISSING_SECOND_MODEL'
|
||||
});
|
||||
}
|
||||
|
||||
// 2. Validate output parser configuration
|
||||
const outputParserConnections = incoming.filter(c => c.type === 'ai_outputParser');
|
||||
|
||||
if (node.parameters.hasOutputParser === true) {
|
||||
if (outputParserConnections.length === 0) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent "${node.name}" has hasOutputParser=true but no ai_outputParser connection. Connect an output parser or set hasOutputParser=false.`,
|
||||
code: 'MISSING_OUTPUT_PARSER'
|
||||
});
|
||||
}
|
||||
} else if (outputParserConnections.length > 0) {
|
||||
issues.push({
|
||||
severity: 'warning',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent "${node.name}" has an output parser connected but hasOutputParser is not true. Set hasOutputParser=true to enable output parsing.`
|
||||
});
|
||||
}
|
||||
|
||||
if (outputParserConnections.length > 1) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent "${node.name}" has ${outputParserConnections.length} output parsers. Only 1 is allowed.`,
|
||||
code: 'MULTIPLE_OUTPUT_PARSERS'
|
||||
});
|
||||
}
|
||||
|
||||
// 3. Validate prompt type configuration
|
||||
if (node.parameters.promptType === 'define') {
|
||||
if (!node.parameters.text || node.parameters.text.trim() === '') {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent "${node.name}" has promptType="define" but the text field is empty. Provide a custom prompt or switch to promptType="auto".`,
|
||||
code: 'MISSING_PROMPT_TEXT'
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Check system message (RECOMMENDED)
|
||||
if (!node.parameters.systemMessage) {
|
||||
issues.push({
|
||||
severity: 'info',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent "${node.name}" has no systemMessage. Consider adding one to define the agent's role, capabilities, and constraints.`
|
||||
});
|
||||
} else if (node.parameters.systemMessage.trim().length < MIN_SYSTEM_MESSAGE_LENGTH) {
|
||||
issues.push({
|
||||
severity: 'info',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent "${node.name}" systemMessage is very short (minimum ${MIN_SYSTEM_MESSAGE_LENGTH} characters recommended). Provide more detail about the agent's role and capabilities.`
|
||||
});
|
||||
}
|
||||
|
||||
// 5. Validate streaming mode constraints (CRITICAL)
|
||||
// From spec lines 753-879: AI Agent with streaming MUST NOT have main output connections
|
||||
const isStreamingTarget = checkIfStreamingTarget(node, workflow, reverseConnections);
|
||||
const hasOwnStreamingEnabled = node.parameters?.options?.streamResponse === true;
|
||||
|
||||
if (isStreamingTarget || hasOwnStreamingEnabled) {
|
||||
// Check if AI Agent has any main output connections
|
||||
const agentMainOutput = workflow.connections[node.name]?.main;
|
||||
if (agentMainOutput && agentMainOutput.flat().some((c: any) => c)) {
|
||||
const streamSource = isStreamingTarget
|
||||
? 'connected from Chat Trigger with responseMode="streaming"'
|
||||
: 'has streamResponse=true in options';
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent "${node.name}" is in streaming mode (${streamSource}) but has outgoing main connections. Remove all main output connections - streaming responses flow back through the Chat Trigger.`,
|
||||
code: 'STREAMING_WITH_MAIN_OUTPUT'
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// 6. Validate memory connections (0-1 allowed)
|
||||
const memoryConnections = incoming.filter(c => c.type === 'ai_memory');
|
||||
|
||||
if (memoryConnections.length > 1) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent "${node.name}" has ${memoryConnections.length} ai_memory connections. Only 1 memory is allowed.`,
|
||||
code: 'MULTIPLE_MEMORY_CONNECTIONS'
|
||||
});
|
||||
}
|
||||
|
||||
// 7. Validate tool connections
|
||||
const toolConnections = incoming.filter(c => c.type === 'ai_tool');
|
||||
|
||||
if (toolConnections.length === 0) {
|
||||
issues.push({
|
||||
severity: 'info',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent "${node.name}" has no ai_tool connections. Consider adding tools to enhance the agent's capabilities.`
|
||||
});
|
||||
}
|
||||
|
||||
// 8. Validate maxIterations if specified
|
||||
if (node.parameters.maxIterations !== undefined) {
|
||||
if (typeof node.parameters.maxIterations !== 'number') {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent "${node.name}" has invalid maxIterations type. Must be a number.`,
|
||||
code: 'INVALID_MAX_ITERATIONS_TYPE'
|
||||
});
|
||||
} else if (node.parameters.maxIterations < 1) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent "${node.name}" has maxIterations=${node.parameters.maxIterations}. Must be at least 1.`,
|
||||
code: 'MAX_ITERATIONS_TOO_LOW'
|
||||
});
|
||||
} else if (node.parameters.maxIterations > MAX_ITERATIONS_WARNING_THRESHOLD) {
|
||||
issues.push({
|
||||
severity: 'warning',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent "${node.name}" has maxIterations=${node.parameters.maxIterations}. Very high iteration counts (>${MAX_ITERATIONS_WARNING_THRESHOLD}) may cause long execution times and high costs.`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if AI Agent is a streaming target
|
||||
* Helper function to determine if an AI Agent is receiving streaming input from Chat Trigger
|
||||
*/
|
||||
function checkIfStreamingTarget(
|
||||
node: WorkflowNode,
|
||||
workflow: WorkflowJson,
|
||||
reverseConnections: Map<string, ReverseConnection[]>
|
||||
): boolean {
|
||||
const incoming = reverseConnections.get(node.name) || [];
|
||||
|
||||
// Check if any incoming main connection is from a Chat Trigger with streaming enabled
|
||||
const mainConnections = incoming.filter(c => c.type === 'main');
|
||||
|
||||
for (const conn of mainConnections) {
|
||||
const sourceNode = workflow.nodes.find(n => n.name === conn.sourceName);
|
||||
if (!sourceNode) continue;
|
||||
|
||||
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(sourceNode.type);
|
||||
if (normalizedType === 'nodes-langchain.chatTrigger') {
|
||||
const responseMode = sourceNode.parameters?.options?.responseMode || 'lastNode';
|
||||
if (responseMode === 'streaming') {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate Chat Trigger Node
|
||||
* From spec lines 753-879
|
||||
*
|
||||
* Critical validations:
|
||||
* - responseMode="streaming" requires AI Agent target
|
||||
* - AI Agent with streaming MUST NOT have main output connections
|
||||
* - responseMode="lastNode" validation
|
||||
*/
|
||||
export function validateChatTrigger(
|
||||
node: WorkflowNode,
|
||||
workflow: WorkflowJson,
|
||||
reverseConnections: Map<string, ReverseConnection[]>
|
||||
): ValidationIssue[] {
|
||||
const issues: ValidationIssue[] = [];
|
||||
|
||||
const responseMode = node.parameters?.options?.responseMode || 'lastNode';
|
||||
|
||||
// Get outgoing main connections from Chat Trigger
|
||||
const outgoingMain = workflow.connections[node.name]?.main;
|
||||
if (!outgoingMain || outgoingMain.length === 0 || !outgoingMain[0] || outgoingMain[0].length === 0) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Chat Trigger "${node.name}" has no outgoing connections. Connect it to an AI Agent or workflow.`,
|
||||
code: 'MISSING_CONNECTIONS'
|
||||
});
|
||||
return issues;
|
||||
}
|
||||
|
||||
const firstConnection = outgoingMain[0][0];
|
||||
if (!firstConnection) {
|
||||
return issues;
|
||||
}
|
||||
|
||||
const targetNode = workflow.nodes.find(n => n.name === firstConnection.node);
|
||||
if (!targetNode) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Chat Trigger "${node.name}" connects to non-existent node "${firstConnection.node}".`,
|
||||
code: 'INVALID_TARGET_NODE'
|
||||
});
|
||||
return issues;
|
||||
}
|
||||
|
||||
const targetType = NodeTypeNormalizer.normalizeToFullForm(targetNode.type);
|
||||
|
||||
// Validate streaming mode
|
||||
if (responseMode === 'streaming') {
|
||||
// CRITICAL: Streaming mode only works with AI Agent
|
||||
if (targetType !== 'nodes-langchain.agent') {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Chat Trigger "${node.name}" has responseMode="streaming" but connects to "${targetNode.name}" (${targetType}). Streaming mode only works with AI Agent. Change responseMode to "lastNode" or connect to an AI Agent.`,
|
||||
code: 'STREAMING_WRONG_TARGET'
|
||||
});
|
||||
} else {
|
||||
// CRITICAL: Check AI Agent has NO main output connections
|
||||
const agentMainOutput = workflow.connections[targetNode.name]?.main;
|
||||
if (agentMainOutput && agentMainOutput.flat().some((c: any) => c)) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: targetNode.id,
|
||||
nodeName: targetNode.name,
|
||||
message: `AI Agent "${targetNode.name}" is in streaming mode but has outgoing main connections. In streaming mode, the AI Agent must NOT have main output connections - responses stream back through the Chat Trigger.`,
|
||||
code: 'STREAMING_AGENT_HAS_OUTPUT'
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate lastNode mode
|
||||
if (responseMode === 'lastNode') {
|
||||
// lastNode mode requires a workflow that ends somewhere
|
||||
// Just informational - this is the default and works with any workflow
|
||||
if (targetType === 'nodes-langchain.agent') {
|
||||
issues.push({
|
||||
severity: 'info',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Chat Trigger "${node.name}" uses responseMode="lastNode" with AI Agent. Consider using responseMode="streaming" for better user experience with real-time responses.`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate Basic LLM Chain Node
|
||||
* From spec - simplified AI chain without agent loop
|
||||
*
|
||||
* Similar to AI Agent but simpler:
|
||||
* - Requires exactly 1 language model
|
||||
* - Can have 0-1 memory
|
||||
* - No tools (not an agent)
|
||||
* - No fallback model support
|
||||
*/
|
||||
export function validateBasicLLMChain(
|
||||
node: WorkflowNode,
|
||||
reverseConnections: Map<string, ReverseConnection[]>
|
||||
): ValidationIssue[] {
|
||||
const issues: ValidationIssue[] = [];
|
||||
const incoming = reverseConnections.get(node.name) || [];
|
||||
|
||||
// 1. Validate language model connection (REQUIRED: exactly 1)
|
||||
const languageModelConnections = incoming.filter(c => c.type === 'ai_languageModel');
|
||||
|
||||
if (languageModelConnections.length === 0) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Basic LLM Chain "${node.name}" requires an ai_languageModel connection. Connect a language model node.`,
|
||||
code: 'MISSING_LANGUAGE_MODEL'
|
||||
});
|
||||
} else if (languageModelConnections.length > 1) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Basic LLM Chain "${node.name}" has ${languageModelConnections.length} ai_languageModel connections. Basic LLM Chain only supports 1 language model (no fallback).`,
|
||||
code: 'MULTIPLE_LANGUAGE_MODELS'
|
||||
});
|
||||
}
|
||||
|
||||
// 2. Validate memory connections (0-1 allowed)
|
||||
const memoryConnections = incoming.filter(c => c.type === 'ai_memory');
|
||||
|
||||
if (memoryConnections.length > 1) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Basic LLM Chain "${node.name}" has ${memoryConnections.length} ai_memory connections. Only 1 memory is allowed.`,
|
||||
code: 'MULTIPLE_MEMORY_CONNECTIONS'
|
||||
});
|
||||
}
|
||||
|
||||
// 3. Check for tool connections (not supported)
|
||||
const toolConnections = incoming.filter(c => c.type === 'ai_tool');
|
||||
|
||||
if (toolConnections.length > 0) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Basic LLM Chain "${node.name}" has ai_tool connections. Basic LLM Chain does not support tools. Use AI Agent if you need tool support.`,
|
||||
code: 'TOOLS_NOT_SUPPORTED'
|
||||
});
|
||||
}
|
||||
|
||||
// 4. Validate prompt configuration
|
||||
if (node.parameters.promptType === 'define') {
|
||||
if (!node.parameters.text || node.parameters.text.trim() === '') {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Basic LLM Chain "${node.name}" has promptType="define" but the text field is empty.`,
|
||||
code: 'MISSING_PROMPT_TEXT'
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate all AI-specific nodes in a workflow
|
||||
*
|
||||
* This is the main entry point called by WorkflowValidator
|
||||
*/
|
||||
export function validateAISpecificNodes(
|
||||
workflow: WorkflowJson
|
||||
): ValidationIssue[] {
|
||||
const issues: ValidationIssue[] = [];
|
||||
|
||||
// Build reverse connection map (critical for AI validation)
|
||||
const reverseConnectionMap = buildReverseConnectionMap(workflow);
|
||||
|
||||
for (const node of workflow.nodes) {
|
||||
if (node.disabled) continue;
|
||||
|
||||
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(node.type);
|
||||
|
||||
// Validate AI Agent nodes
|
||||
if (normalizedType === 'nodes-langchain.agent') {
|
||||
const nodeIssues = validateAIAgent(node, reverseConnectionMap, workflow);
|
||||
issues.push(...nodeIssues);
|
||||
}
|
||||
|
||||
// Validate Chat Trigger nodes
|
||||
if (normalizedType === 'nodes-langchain.chatTrigger') {
|
||||
const nodeIssues = validateChatTrigger(node, workflow, reverseConnectionMap);
|
||||
issues.push(...nodeIssues);
|
||||
}
|
||||
|
||||
// Validate Basic LLM Chain nodes
|
||||
if (normalizedType === 'nodes-langchain.chainLlm') {
|
||||
const nodeIssues = validateBasicLLMChain(node, reverseConnectionMap);
|
||||
issues.push(...nodeIssues);
|
||||
}
|
||||
|
||||
// Validate AI tool sub-nodes (13 types)
|
||||
if (isAIToolSubNode(normalizedType)) {
|
||||
const nodeIssues = validateAIToolSubNode(
|
||||
node,
|
||||
normalizedType,
|
||||
reverseConnectionMap,
|
||||
workflow
|
||||
);
|
||||
issues.push(...nodeIssues);
|
||||
}
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a workflow contains any AI nodes
|
||||
* Useful for skipping AI validation when not needed
|
||||
*/
|
||||
export function hasAINodes(workflow: WorkflowJson): boolean {
|
||||
const aiNodeTypes = [
|
||||
'nodes-langchain.agent',
|
||||
'nodes-langchain.chatTrigger',
|
||||
'nodes-langchain.chainLlm',
|
||||
];
|
||||
|
||||
return workflow.nodes.some(node => {
|
||||
const normalized = NodeTypeNormalizer.normalizeToFullForm(node.type);
|
||||
return aiNodeTypes.includes(normalized) || isAIToolSubNode(normalized);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper: Get AI node type category
|
||||
*/
|
||||
export function getAINodeCategory(nodeType: string): string | null {
|
||||
const normalized = NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
|
||||
if (normalized === 'nodes-langchain.agent') return 'AI Agent';
|
||||
if (normalized === 'nodes-langchain.chatTrigger') return 'Chat Trigger';
|
||||
if (normalized === 'nodes-langchain.chainLlm') return 'Basic LLM Chain';
|
||||
if (isAIToolSubNode(normalized)) return 'AI Tool';
|
||||
|
||||
// Check for AI component nodes
|
||||
if (normalized.startsWith('nodes-langchain.')) {
|
||||
if (normalized.includes('openAi') || normalized.includes('anthropic') || normalized.includes('googleGemini')) {
|
||||
return 'Language Model';
|
||||
}
|
||||
if (normalized.includes('memory') || normalized.includes('buffer')) {
|
||||
return 'Memory';
|
||||
}
|
||||
if (normalized.includes('vectorStore') || normalized.includes('pinecone') || normalized.includes('qdrant')) {
|
||||
return 'Vector Store';
|
||||
}
|
||||
if (normalized.includes('embedding')) {
|
||||
return 'Embeddings';
|
||||
}
|
||||
return 'AI Component';
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
607
src/services/ai-tool-validators.ts
Normal file
607
src/services/ai-tool-validators.ts
Normal file
@@ -0,0 +1,607 @@
|
||||
/**
|
||||
* AI Tool Sub-Node Validators
|
||||
*
|
||||
* Implements validation logic for all 13 AI tool sub-nodes from
|
||||
* docs/FINAL_AI_VALIDATION_SPEC.md
|
||||
*
|
||||
* Each validator checks configuration requirements, connections, and
|
||||
* parameters specific to that tool type.
|
||||
*/
|
||||
|
||||
import { NodeTypeNormalizer } from '../utils/node-type-normalizer';
|
||||
|
||||
// Validation constants
|
||||
const MIN_DESCRIPTION_LENGTH_SHORT = 10;
|
||||
const MIN_DESCRIPTION_LENGTH_MEDIUM = 15;
|
||||
const MIN_DESCRIPTION_LENGTH_LONG = 20;
|
||||
const MAX_ITERATIONS_WARNING_THRESHOLD = 50;
|
||||
const MAX_TOPK_WARNING_THRESHOLD = 20;
|
||||
|
||||
export interface WorkflowNode {
|
||||
id: string;
|
||||
name: string;
|
||||
type: string;
|
||||
position: [number, number];
|
||||
parameters: any;
|
||||
credentials?: any;
|
||||
disabled?: boolean;
|
||||
typeVersion?: number;
|
||||
}
|
||||
|
||||
export interface WorkflowJson {
|
||||
name?: string;
|
||||
nodes: WorkflowNode[];
|
||||
connections: Record<string, any>;
|
||||
settings?: any;
|
||||
}
|
||||
|
||||
export interface ReverseConnection {
|
||||
sourceName: string;
|
||||
sourceType: string;
|
||||
type: string; // main, ai_tool, ai_languageModel, etc.
|
||||
index: number;
|
||||
}
|
||||
|
||||
export interface ValidationIssue {
|
||||
severity: 'error' | 'warning' | 'info';
|
||||
nodeId?: string;
|
||||
nodeName?: string;
|
||||
message: string;
|
||||
code?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* 1. HTTP Request Tool Validator
|
||||
* From spec lines 883-1123
|
||||
*/
|
||||
export function validateHTTPRequestTool(node: WorkflowNode): ValidationIssue[] {
|
||||
const issues: ValidationIssue[] = [];
|
||||
|
||||
// 1. Check toolDescription (REQUIRED)
|
||||
if (!node.parameters.toolDescription) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `HTTP Request Tool "${node.name}" has no toolDescription. Add a clear description to help the LLM know when to use this API.`,
|
||||
code: 'MISSING_TOOL_DESCRIPTION'
|
||||
});
|
||||
} else if (node.parameters.toolDescription.trim().length < MIN_DESCRIPTION_LENGTH_MEDIUM) {
|
||||
issues.push({
|
||||
severity: 'warning',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `HTTP Request Tool "${node.name}" toolDescription is too short (minimum ${MIN_DESCRIPTION_LENGTH_MEDIUM} characters). Explain what API this calls and when to use it.`
|
||||
});
|
||||
}
|
||||
|
||||
// 2. Check URL (REQUIRED)
|
||||
if (!node.parameters.url) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `HTTP Request Tool "${node.name}" has no URL. Add the API endpoint URL.`,
|
||||
code: 'MISSING_URL'
|
||||
});
|
||||
} else {
|
||||
// Validate URL protocol (must be http or https)
|
||||
try {
|
||||
const urlObj = new URL(node.parameters.url);
|
||||
if (urlObj.protocol !== 'http:' && urlObj.protocol !== 'https:') {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `HTTP Request Tool "${node.name}" has invalid URL protocol "${urlObj.protocol}". Use http:// or https:// only.`,
|
||||
code: 'INVALID_URL_PROTOCOL'
|
||||
});
|
||||
}
|
||||
} catch (e) {
|
||||
// URL parsing failed - invalid format
|
||||
// Only warn if it's not an n8n expression
|
||||
if (!node.parameters.url.includes('{{')) {
|
||||
issues.push({
|
||||
severity: 'warning',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `HTTP Request Tool "${node.name}" has potentially invalid URL format. Ensure it's a valid URL or n8n expression.`
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Validate placeholders match definitions
|
||||
if (node.parameters.url || node.parameters.body || node.parameters.headers) {
|
||||
const placeholderRegex = /\{([^}]+)\}/g;
|
||||
const placeholders = new Set<string>();
|
||||
|
||||
// Extract placeholders from URL, body, headers
|
||||
[node.parameters.url, node.parameters.body, JSON.stringify(node.parameters.headers || {})].forEach(text => {
|
||||
if (text) {
|
||||
let match;
|
||||
while ((match = placeholderRegex.exec(text)) !== null) {
|
||||
placeholders.add(match[1]);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// If placeholders exist in URL/body/headers
|
||||
if (placeholders.size > 0) {
|
||||
const definitions = node.parameters.placeholderDefinitions?.values || [];
|
||||
const definedNames = new Set(definitions.map((d: any) => d.name));
|
||||
|
||||
// If no placeholderDefinitions at all, warn
|
||||
if (!node.parameters.placeholderDefinitions) {
|
||||
issues.push({
|
||||
severity: 'warning',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `HTTP Request Tool "${node.name}" uses placeholders but has no placeholderDefinitions. Add definitions to describe the expected inputs.`
|
||||
});
|
||||
} else {
|
||||
// Has placeholderDefinitions, check each placeholder
|
||||
for (const placeholder of placeholders) {
|
||||
if (!definedNames.has(placeholder)) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `HTTP Request Tool "${node.name}" Placeholder "${placeholder}" in URL but it's not defined in placeholderDefinitions.`,
|
||||
code: 'UNDEFINED_PLACEHOLDER'
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Check for defined but unused placeholders
|
||||
for (const def of definitions) {
|
||||
if (!placeholders.has(def.name)) {
|
||||
issues.push({
|
||||
severity: 'warning',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `HTTP Request Tool "${node.name}" defines placeholder "${def.name}" but doesn't use it.`
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Validate authentication
|
||||
if (node.parameters.authentication === 'predefinedCredentialType' &&
|
||||
(!node.credentials || Object.keys(node.credentials).length === 0)) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `HTTP Request Tool "${node.name}" requires credentials but none are configured.`,
|
||||
code: 'MISSING_CREDENTIALS'
|
||||
});
|
||||
}
|
||||
|
||||
// 5. Validate HTTP method
|
||||
const validMethods = ['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'HEAD', 'OPTIONS'];
|
||||
if (node.parameters.method && !validMethods.includes(node.parameters.method.toUpperCase())) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `HTTP Request Tool "${node.name}" has invalid HTTP method "${node.parameters.method}". Use one of: ${validMethods.join(', ')}.`,
|
||||
code: 'INVALID_HTTP_METHOD'
|
||||
});
|
||||
}
|
||||
|
||||
// 6. Validate body for POST/PUT/PATCH
|
||||
if (['POST', 'PUT', 'PATCH'].includes(node.parameters.method?.toUpperCase())) {
|
||||
if (!node.parameters.body && !node.parameters.jsonBody) {
|
||||
issues.push({
|
||||
severity: 'warning',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `HTTP Request Tool "${node.name}" uses ${node.parameters.method} but has no body. Consider adding a body or using GET instead.`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
/**
|
||||
* 2. Code Tool Validator
|
||||
* From spec lines 1125-1393
|
||||
*/
|
||||
export function validateCodeTool(node: WorkflowNode): ValidationIssue[] {
|
||||
const issues: ValidationIssue[] = [];
|
||||
|
||||
// 1. Check toolDescription (REQUIRED)
|
||||
if (!node.parameters.toolDescription) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Code Tool "${node.name}" has no toolDescription. Add one to help the LLM understand the tool's purpose.`,
|
||||
code: 'MISSING_TOOL_DESCRIPTION'
|
||||
});
|
||||
}
|
||||
|
||||
// 2. Check jsCode exists (REQUIRED)
|
||||
if (!node.parameters.jsCode || node.parameters.jsCode.trim().length === 0) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Code Tool "${node.name}" code is empty. Add the JavaScript code to execute.`,
|
||||
code: 'MISSING_CODE'
|
||||
});
|
||||
}
|
||||
|
||||
// 3. Recommend input/output schema
|
||||
if (!node.parameters.inputSchema && !node.parameters.specifyInputSchema) {
|
||||
issues.push({
|
||||
severity: 'warning',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Code Tool "${node.name}" has no input schema. Consider adding one to validate LLM inputs.`
|
||||
});
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
/**
|
||||
* 3. Vector Store Tool Validator
|
||||
* From spec lines 1395-1620
|
||||
*/
|
||||
export function validateVectorStoreTool(
|
||||
node: WorkflowNode,
|
||||
reverseConnections: Map<string, ReverseConnection[]>,
|
||||
workflow: WorkflowJson
|
||||
): ValidationIssue[] {
|
||||
const issues: ValidationIssue[] = [];
|
||||
|
||||
// 1. Check toolDescription (REQUIRED)
|
||||
if (!node.parameters.toolDescription) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Vector Store Tool "${node.name}" has no toolDescription. Add one to explain what data it searches.`,
|
||||
code: 'MISSING_TOOL_DESCRIPTION'
|
||||
});
|
||||
}
|
||||
|
||||
// 2. Validate topK parameter if specified
|
||||
if (node.parameters.topK !== undefined) {
|
||||
if (typeof node.parameters.topK !== 'number' || node.parameters.topK < 1) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Vector Store Tool "${node.name}" has invalid topK value. Must be a positive number.`,
|
||||
code: 'INVALID_TOPK'
|
||||
});
|
||||
} else if (node.parameters.topK > MAX_TOPK_WARNING_THRESHOLD) {
|
||||
issues.push({
|
||||
severity: 'warning',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Vector Store Tool "${node.name}" has topK=${node.parameters.topK}. Large values (>${MAX_TOPK_WARNING_THRESHOLD}) may overwhelm the LLM context. Consider reducing to 10 or less.`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
/**
|
||||
* 4. Workflow Tool Validator
|
||||
* From spec lines 1622-1831 (already complete in spec)
|
||||
*/
|
||||
export function validateWorkflowTool(node: WorkflowNode, reverseConnections?: Map<string, ReverseConnection[]>): ValidationIssue[] {
|
||||
const issues: ValidationIssue[] = [];
|
||||
|
||||
// 1. Check toolDescription (REQUIRED)
|
||||
if (!node.parameters.toolDescription) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Workflow Tool "${node.name}" has no toolDescription. Add one to help the LLM know when to use this tool.`,
|
||||
code: 'MISSING_TOOL_DESCRIPTION'
|
||||
});
|
||||
}
|
||||
|
||||
// 2. Check workflowId (REQUIRED)
|
||||
if (!node.parameters.workflowId) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Workflow Tool "${node.name}" has no workflowId. Select a workflow to execute.`,
|
||||
code: 'MISSING_WORKFLOW_ID'
|
||||
});
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
/**
|
||||
* 5. AI Agent Tool Validator
|
||||
* From spec lines 1882-2122
|
||||
*/
|
||||
export function validateAIAgentTool(
|
||||
node: WorkflowNode,
|
||||
reverseConnections: Map<string, ReverseConnection[]>
|
||||
): ValidationIssue[] {
|
||||
const issues: ValidationIssue[] = [];
|
||||
|
||||
// 1. Check toolDescription (REQUIRED)
|
||||
if (!node.parameters.toolDescription) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent Tool "${node.name}" has no toolDescription. Add one to help the LLM know when to use this tool.`,
|
||||
code: 'MISSING_TOOL_DESCRIPTION'
|
||||
});
|
||||
}
|
||||
|
||||
// 2. Validate maxIterations if specified
|
||||
if (node.parameters.maxIterations !== undefined) {
|
||||
if (typeof node.parameters.maxIterations !== 'number' || node.parameters.maxIterations < 1) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent Tool "${node.name}" has invalid maxIterations. Must be a positive number.`,
|
||||
code: 'INVALID_MAX_ITERATIONS'
|
||||
});
|
||||
} else if (node.parameters.maxIterations > MAX_ITERATIONS_WARNING_THRESHOLD) {
|
||||
issues.push({
|
||||
severity: 'warning',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `AI Agent Tool "${node.name}" has maxIterations=${node.parameters.maxIterations}. Large values (>${MAX_ITERATIONS_WARNING_THRESHOLD}) may lead to long execution times.`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
/**
|
||||
* 6. MCP Client Tool Validator
|
||||
* From spec lines 2124-2534 (already complete in spec)
|
||||
*/
|
||||
export function validateMCPClientTool(node: WorkflowNode): ValidationIssue[] {
|
||||
const issues: ValidationIssue[] = [];
|
||||
|
||||
// 1. Check toolDescription (REQUIRED)
|
||||
if (!node.parameters.toolDescription) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `MCP Client Tool "${node.name}" has no toolDescription. Add one to help the LLM know when to use this tool.`,
|
||||
code: 'MISSING_TOOL_DESCRIPTION'
|
||||
});
|
||||
}
|
||||
|
||||
// 2. Check serverUrl (REQUIRED)
|
||||
if (!node.parameters.serverUrl) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `MCP Client Tool "${node.name}" has no serverUrl. Configure the MCP server URL.`,
|
||||
code: 'MISSING_SERVER_URL'
|
||||
});
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
/**
|
||||
* 7-8. Simple Tools (Calculator, Think) Validators
|
||||
* From spec lines 1868-2009
|
||||
*/
|
||||
export function validateCalculatorTool(node: WorkflowNode): ValidationIssue[] {
|
||||
const issues: ValidationIssue[] = [];
|
||||
|
||||
// Calculator Tool has a built-in description and is self-explanatory
|
||||
// toolDescription is optional - no validation needed
|
||||
return issues;
|
||||
}
|
||||
|
||||
export function validateThinkTool(node: WorkflowNode): ValidationIssue[] {
|
||||
const issues: ValidationIssue[] = [];
|
||||
|
||||
// Think Tool has a built-in description and is self-explanatory
|
||||
// toolDescription is optional - no validation needed
|
||||
return issues;
|
||||
}
|
||||
|
||||
/**
|
||||
* 9-12. Search Tools Validators
|
||||
* From spec lines 1833-2139
|
||||
*/
|
||||
export function validateSerpApiTool(node: WorkflowNode): ValidationIssue[] {
|
||||
const issues: ValidationIssue[] = [];
|
||||
|
||||
// 1. Check toolDescription (REQUIRED)
|
||||
if (!node.parameters.toolDescription) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `SerpApi Tool "${node.name}" has no toolDescription. Add one to explain when to use Google search.`,
|
||||
code: 'MISSING_TOOL_DESCRIPTION'
|
||||
});
|
||||
}
|
||||
|
||||
// 2. Check credentials (RECOMMENDED)
|
||||
if (!node.credentials || !node.credentials.serpApiApi) {
|
||||
issues.push({
|
||||
severity: 'warning',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `SerpApi Tool "${node.name}" requires SerpApi credentials. Configure your API key.`
|
||||
});
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
export function validateWikipediaTool(node: WorkflowNode): ValidationIssue[] {
|
||||
const issues: ValidationIssue[] = [];
|
||||
|
||||
// 1. Check toolDescription (REQUIRED)
|
||||
if (!node.parameters.toolDescription) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Wikipedia Tool "${node.name}" has no toolDescription. Add one to explain when to use Wikipedia.`,
|
||||
code: 'MISSING_TOOL_DESCRIPTION'
|
||||
});
|
||||
}
|
||||
|
||||
// 2. Validate language if specified
|
||||
if (node.parameters.language) {
|
||||
const validLanguageCodes = /^[a-z]{2,3}$/; // ISO 639 codes
|
||||
if (!validLanguageCodes.test(node.parameters.language)) {
|
||||
issues.push({
|
||||
severity: 'warning',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `Wikipedia Tool "${node.name}" has potentially invalid language code "${node.parameters.language}". Use ISO 639 codes (e.g., "en", "es", "fr").`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
export function validateSearXngTool(node: WorkflowNode): ValidationIssue[] {
|
||||
const issues: ValidationIssue[] = [];
|
||||
|
||||
// 1. Check toolDescription (REQUIRED)
|
||||
if (!node.parameters.toolDescription) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `SearXNG Tool "${node.name}" has no toolDescription. Add one to explain when to use SearXNG.`,
|
||||
code: 'MISSING_TOOL_DESCRIPTION'
|
||||
});
|
||||
}
|
||||
|
||||
// 2. Check baseUrl (REQUIRED)
|
||||
if (!node.parameters.baseUrl) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `SearXNG Tool "${node.name}" has no baseUrl. Configure your SearXNG instance URL.`,
|
||||
code: 'MISSING_BASE_URL'
|
||||
});
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
export function validateWolframAlphaTool(node: WorkflowNode): ValidationIssue[] {
|
||||
const issues: ValidationIssue[] = [];
|
||||
|
||||
// 1. Check credentials (REQUIRED)
|
||||
if (!node.credentials || (!node.credentials.wolframAlpha && !node.credentials.wolframAlphaApi)) {
|
||||
issues.push({
|
||||
severity: 'error',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `WolframAlpha Tool "${node.name}" requires Wolfram|Alpha API credentials. Configure your App ID.`,
|
||||
code: 'MISSING_CREDENTIALS'
|
||||
});
|
||||
}
|
||||
|
||||
// 2. Check description (INFO)
|
||||
if (!node.parameters.description && !node.parameters.toolDescription) {
|
||||
issues.push({
|
||||
severity: 'info',
|
||||
nodeId: node.id,
|
||||
nodeName: node.name,
|
||||
message: `WolframAlpha Tool "${node.name}" has no custom description. Add one to explain when to use Wolfram|Alpha for computational queries.`
|
||||
});
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper: Map node types to validator functions
|
||||
*/
|
||||
export const AI_TOOL_VALIDATORS = {
|
||||
'nodes-langchain.toolHttpRequest': validateHTTPRequestTool,
|
||||
'nodes-langchain.toolCode': validateCodeTool,
|
||||
'nodes-langchain.toolVectorStore': validateVectorStoreTool,
|
||||
'nodes-langchain.toolWorkflow': validateWorkflowTool,
|
||||
'nodes-langchain.agentTool': validateAIAgentTool,
|
||||
'nodes-langchain.mcpClientTool': validateMCPClientTool,
|
||||
'nodes-langchain.toolCalculator': validateCalculatorTool,
|
||||
'nodes-langchain.toolThink': validateThinkTool,
|
||||
'nodes-langchain.toolSerpApi': validateSerpApiTool,
|
||||
'nodes-langchain.toolWikipedia': validateWikipediaTool,
|
||||
'nodes-langchain.toolSearXng': validateSearXngTool,
|
||||
'nodes-langchain.toolWolframAlpha': validateWolframAlphaTool,
|
||||
} as const;
|
||||
|
||||
/**
|
||||
* Check if a node type is an AI tool sub-node
|
||||
*/
|
||||
export function isAIToolSubNode(nodeType: string): boolean {
|
||||
const normalized = NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
return normalized in AI_TOOL_VALIDATORS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate an AI tool sub-node with the appropriate validator
|
||||
*/
|
||||
export function validateAIToolSubNode(
|
||||
node: WorkflowNode,
|
||||
nodeType: string,
|
||||
reverseConnections: Map<string, ReverseConnection[]>,
|
||||
workflow: WorkflowJson
|
||||
): ValidationIssue[] {
|
||||
const normalized = NodeTypeNormalizer.normalizeToFullForm(nodeType);
|
||||
|
||||
// Route to appropriate validator based on node type
|
||||
switch (normalized) {
|
||||
case 'nodes-langchain.toolHttpRequest':
|
||||
return validateHTTPRequestTool(node);
|
||||
case 'nodes-langchain.toolCode':
|
||||
return validateCodeTool(node);
|
||||
case 'nodes-langchain.toolVectorStore':
|
||||
return validateVectorStoreTool(node, reverseConnections, workflow);
|
||||
case 'nodes-langchain.toolWorkflow':
|
||||
return validateWorkflowTool(node);
|
||||
case 'nodes-langchain.agentTool':
|
||||
return validateAIAgentTool(node, reverseConnections);
|
||||
case 'nodes-langchain.mcpClientTool':
|
||||
return validateMCPClientTool(node);
|
||||
case 'nodes-langchain.toolCalculator':
|
||||
return validateCalculatorTool(node);
|
||||
case 'nodes-langchain.toolThink':
|
||||
return validateThinkTool(node);
|
||||
case 'nodes-langchain.toolSerpApi':
|
||||
return validateSerpApiTool(node);
|
||||
case 'nodes-langchain.toolWikipedia':
|
||||
return validateWikipediaTool(node);
|
||||
case 'nodes-langchain.toolSearXng':
|
||||
return validateSearXngTool(node);
|
||||
case 'nodes-langchain.toolWolframAlpha':
|
||||
return validateWolframAlphaTool(node);
|
||||
default:
|
||||
return [];
|
||||
}
|
||||
}
|
||||
321
src/services/breaking-change-detector.ts
Normal file
321
src/services/breaking-change-detector.ts
Normal file
@@ -0,0 +1,321 @@
|
||||
/**
|
||||
* Breaking Change Detector
|
||||
*
|
||||
* Detects breaking changes between node versions by:
|
||||
* 1. Consulting the hardcoded breaking changes registry
|
||||
* 2. Dynamically comparing property schemas between versions
|
||||
* 3. Analyzing property requirement changes
|
||||
*
|
||||
* Used by the autofixer to intelligently upgrade node versions.
|
||||
*/
|
||||
|
||||
import { NodeRepository } from '../database/node-repository';
|
||||
import {
|
||||
BREAKING_CHANGES_REGISTRY,
|
||||
BreakingChange,
|
||||
getBreakingChangesForNode,
|
||||
getAllChangesForNode
|
||||
} from './breaking-changes-registry';
|
||||
|
||||
export interface DetectedChange {
|
||||
propertyName: string;
|
||||
changeType: 'added' | 'removed' | 'renamed' | 'type_changed' | 'requirement_changed' | 'default_changed';
|
||||
isBreaking: boolean;
|
||||
oldValue?: any;
|
||||
newValue?: any;
|
||||
migrationHint: string;
|
||||
autoMigratable: boolean;
|
||||
migrationStrategy?: any;
|
||||
severity: 'LOW' | 'MEDIUM' | 'HIGH';
|
||||
source: 'registry' | 'dynamic'; // Where this change was detected
|
||||
}
|
||||
|
||||
export interface VersionUpgradeAnalysis {
|
||||
nodeType: string;
|
||||
fromVersion: string;
|
||||
toVersion: string;
|
||||
hasBreakingChanges: boolean;
|
||||
changes: DetectedChange[];
|
||||
autoMigratableCount: number;
|
||||
manualRequiredCount: number;
|
||||
overallSeverity: 'LOW' | 'MEDIUM' | 'HIGH';
|
||||
recommendations: string[];
|
||||
}
|
||||
|
||||
export class BreakingChangeDetector {
|
||||
constructor(private nodeRepository: NodeRepository) {}
|
||||
|
||||
/**
|
||||
* Analyze a version upgrade and detect all changes
|
||||
*/
|
||||
async analyzeVersionUpgrade(
|
||||
nodeType: string,
|
||||
fromVersion: string,
|
||||
toVersion: string
|
||||
): Promise<VersionUpgradeAnalysis> {
|
||||
// Get changes from registry
|
||||
const registryChanges = this.getRegistryChanges(nodeType, fromVersion, toVersion);
|
||||
|
||||
// Get dynamic changes by comparing schemas
|
||||
const dynamicChanges = this.detectDynamicChanges(nodeType, fromVersion, toVersion);
|
||||
|
||||
// Merge and deduplicate changes
|
||||
const allChanges = this.mergeChanges(registryChanges, dynamicChanges);
|
||||
|
||||
// Calculate statistics
|
||||
const hasBreakingChanges = allChanges.some(c => c.isBreaking);
|
||||
const autoMigratableCount = allChanges.filter(c => c.autoMigratable).length;
|
||||
const manualRequiredCount = allChanges.filter(c => !c.autoMigratable).length;
|
||||
|
||||
// Determine overall severity
|
||||
const overallSeverity = this.calculateOverallSeverity(allChanges);
|
||||
|
||||
// Generate recommendations
|
||||
const recommendations = this.generateRecommendations(allChanges);
|
||||
|
||||
return {
|
||||
nodeType,
|
||||
fromVersion,
|
||||
toVersion,
|
||||
hasBreakingChanges,
|
||||
changes: allChanges,
|
||||
autoMigratableCount,
|
||||
manualRequiredCount,
|
||||
overallSeverity,
|
||||
recommendations
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get changes from the hardcoded registry
|
||||
*/
|
||||
private getRegistryChanges(
|
||||
nodeType: string,
|
||||
fromVersion: string,
|
||||
toVersion: string
|
||||
): DetectedChange[] {
|
||||
const registryChanges = getAllChangesForNode(nodeType, fromVersion, toVersion);
|
||||
|
||||
return registryChanges.map(change => ({
|
||||
propertyName: change.propertyName,
|
||||
changeType: change.changeType,
|
||||
isBreaking: change.isBreaking,
|
||||
oldValue: change.oldValue,
|
||||
newValue: change.newValue,
|
||||
migrationHint: change.migrationHint,
|
||||
autoMigratable: change.autoMigratable,
|
||||
migrationStrategy: change.migrationStrategy,
|
||||
severity: change.severity,
|
||||
source: 'registry' as const
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Dynamically detect changes by comparing property schemas
|
||||
*/
|
||||
private detectDynamicChanges(
|
||||
nodeType: string,
|
||||
fromVersion: string,
|
||||
toVersion: string
|
||||
): DetectedChange[] {
|
||||
// Get both versions from the database
|
||||
const oldVersionData = this.nodeRepository.getNodeVersion(nodeType, fromVersion);
|
||||
const newVersionData = this.nodeRepository.getNodeVersion(nodeType, toVersion);
|
||||
|
||||
if (!oldVersionData || !newVersionData) {
|
||||
return []; // Can't detect dynamic changes without version data
|
||||
}
|
||||
|
||||
const changes: DetectedChange[] = [];
|
||||
|
||||
// Compare properties schemas
|
||||
const oldProps = this.flattenProperties(oldVersionData.propertiesSchema || []);
|
||||
const newProps = this.flattenProperties(newVersionData.propertiesSchema || []);
|
||||
|
||||
// Detect added properties
|
||||
for (const propName of Object.keys(newProps)) {
|
||||
if (!oldProps[propName]) {
|
||||
const prop = newProps[propName];
|
||||
const isRequired = prop.required === true;
|
||||
|
||||
changes.push({
|
||||
propertyName: propName,
|
||||
changeType: 'added',
|
||||
isBreaking: isRequired, // Breaking if required
|
||||
newValue: prop.type || 'unknown',
|
||||
migrationHint: isRequired
|
||||
? `Property "${propName}" is now required in v${toVersion}. Provide a value to prevent validation errors.`
|
||||
: `Property "${propName}" was added in v${toVersion}. Optional parameter, safe to ignore if not needed.`,
|
||||
autoMigratable: !isRequired, // Can auto-add with default if not required
|
||||
migrationStrategy: !isRequired
|
||||
? {
|
||||
type: 'add_property',
|
||||
defaultValue: prop.default || null
|
||||
}
|
||||
: undefined,
|
||||
severity: isRequired ? 'HIGH' : 'LOW',
|
||||
source: 'dynamic'
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Detect removed properties
|
||||
for (const propName of Object.keys(oldProps)) {
|
||||
if (!newProps[propName]) {
|
||||
changes.push({
|
||||
propertyName: propName,
|
||||
changeType: 'removed',
|
||||
isBreaking: true, // Removal is always breaking
|
||||
oldValue: oldProps[propName].type || 'unknown',
|
||||
migrationHint: `Property "${propName}" was removed in v${toVersion}. Remove this property from your configuration.`,
|
||||
autoMigratable: true, // Can auto-remove
|
||||
migrationStrategy: {
|
||||
type: 'remove_property'
|
||||
},
|
||||
severity: 'MEDIUM',
|
||||
source: 'dynamic'
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Detect requirement changes
|
||||
for (const propName of Object.keys(newProps)) {
|
||||
if (oldProps[propName]) {
|
||||
const oldRequired = oldProps[propName].required === true;
|
||||
const newRequired = newProps[propName].required === true;
|
||||
|
||||
if (oldRequired !== newRequired) {
|
||||
changes.push({
|
||||
propertyName: propName,
|
||||
changeType: 'requirement_changed',
|
||||
isBreaking: newRequired && !oldRequired, // Breaking if became required
|
||||
oldValue: oldRequired ? 'required' : 'optional',
|
||||
newValue: newRequired ? 'required' : 'optional',
|
||||
migrationHint: newRequired
|
||||
? `Property "${propName}" is now required in v${toVersion}. Ensure a value is provided.`
|
||||
: `Property "${propName}" is now optional in v${toVersion}.`,
|
||||
autoMigratable: false, // Requirement changes need manual review
|
||||
severity: newRequired ? 'HIGH' : 'LOW',
|
||||
source: 'dynamic'
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return changes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Flatten nested properties into a map for easy comparison
|
||||
*/
|
||||
private flattenProperties(properties: any[], prefix: string = ''): Record<string, any> {
|
||||
const flat: Record<string, any> = {};
|
||||
|
||||
for (const prop of properties) {
|
||||
if (!prop.name && !prop.displayName) continue;
|
||||
|
||||
const propName = prop.name || prop.displayName;
|
||||
const fullPath = prefix ? `${prefix}.${propName}` : propName;
|
||||
|
||||
flat[fullPath] = prop;
|
||||
|
||||
// Recursively flatten nested options
|
||||
if (prop.options && Array.isArray(prop.options)) {
|
||||
Object.assign(flat, this.flattenProperties(prop.options, fullPath));
|
||||
}
|
||||
}
|
||||
|
||||
return flat;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge registry and dynamic changes, avoiding duplicates
|
||||
*/
|
||||
private mergeChanges(
|
||||
registryChanges: DetectedChange[],
|
||||
dynamicChanges: DetectedChange[]
|
||||
): DetectedChange[] {
|
||||
const merged = [...registryChanges];
|
||||
|
||||
// Add dynamic changes that aren't already in registry
|
||||
for (const dynamicChange of dynamicChanges) {
|
||||
const existsInRegistry = registryChanges.some(
|
||||
rc => rc.propertyName === dynamicChange.propertyName &&
|
||||
rc.changeType === dynamicChange.changeType
|
||||
);
|
||||
|
||||
if (!existsInRegistry) {
|
||||
merged.push(dynamicChange);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by severity (HIGH -> MEDIUM -> LOW)
|
||||
const severityOrder = { HIGH: 0, MEDIUM: 1, LOW: 2 };
|
||||
merged.sort((a, b) => severityOrder[a.severity] - severityOrder[b.severity]);
|
||||
|
||||
return merged;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate overall severity of the upgrade
|
||||
*/
|
||||
private calculateOverallSeverity(changes: DetectedChange[]): 'LOW' | 'MEDIUM' | 'HIGH' {
|
||||
if (changes.some(c => c.severity === 'HIGH')) return 'HIGH';
|
||||
if (changes.some(c => c.severity === 'MEDIUM')) return 'MEDIUM';
|
||||
return 'LOW';
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate actionable recommendations for the upgrade
|
||||
*/
|
||||
private generateRecommendations(changes: DetectedChange[]): string[] {
|
||||
const recommendations: string[] = [];
|
||||
|
||||
const breakingChanges = changes.filter(c => c.isBreaking);
|
||||
const autoMigratable = changes.filter(c => c.autoMigratable);
|
||||
const manualRequired = changes.filter(c => !c.autoMigratable);
|
||||
|
||||
if (breakingChanges.length === 0) {
|
||||
recommendations.push('✓ No breaking changes detected. This upgrade should be safe.');
|
||||
} else {
|
||||
recommendations.push(
|
||||
`⚠ ${breakingChanges.length} breaking change(s) detected. Review carefully before applying.`
|
||||
);
|
||||
}
|
||||
|
||||
if (autoMigratable.length > 0) {
|
||||
recommendations.push(
|
||||
`✓ ${autoMigratable.length} change(s) can be automatically migrated.`
|
||||
);
|
||||
}
|
||||
|
||||
if (manualRequired.length > 0) {
|
||||
recommendations.push(
|
||||
`✋ ${manualRequired.length} change(s) require manual intervention.`
|
||||
);
|
||||
|
||||
// List specific manual changes
|
||||
for (const change of manualRequired) {
|
||||
recommendations.push(` - ${change.propertyName}: ${change.migrationHint}`);
|
||||
}
|
||||
}
|
||||
|
||||
return recommendations;
|
||||
}
|
||||
|
||||
/**
|
||||
* Quick check: does this upgrade have breaking changes?
|
||||
*/
|
||||
hasBreakingChanges(nodeType: string, fromVersion: string, toVersion: string): boolean {
|
||||
const registryChanges = getBreakingChangesForNode(nodeType, fromVersion, toVersion);
|
||||
return registryChanges.length > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get simple list of property names that changed
|
||||
*/
|
||||
getChangedProperties(nodeType: string, fromVersion: string, toVersion: string): string[] {
|
||||
const registryChanges = getAllChangesForNode(nodeType, fromVersion, toVersion);
|
||||
return registryChanges.map(c => c.propertyName);
|
||||
}
|
||||
}
|
||||
315
src/services/breaking-changes-registry.ts
Normal file
315
src/services/breaking-changes-registry.ts
Normal file
@@ -0,0 +1,315 @@
|
||||
/**
|
||||
* Breaking Changes Registry
|
||||
*
|
||||
* Central registry of known breaking changes between node versions.
|
||||
* Used by the autofixer to detect and migrate version upgrades intelligently.
|
||||
*
|
||||
* Each entry defines:
|
||||
* - Which versions are affected
|
||||
* - What properties changed
|
||||
* - Whether it's auto-migratable
|
||||
* - Migration strategies and hints
|
||||
*/
|
||||
|
||||
export interface BreakingChange {
|
||||
nodeType: string;
|
||||
fromVersion: string;
|
||||
toVersion: string;
|
||||
propertyName: string;
|
||||
changeType: 'added' | 'removed' | 'renamed' | 'type_changed' | 'requirement_changed' | 'default_changed';
|
||||
isBreaking: boolean;
|
||||
oldValue?: string;
|
||||
newValue?: string;
|
||||
migrationHint: string;
|
||||
autoMigratable: boolean;
|
||||
migrationStrategy?: {
|
||||
type: 'add_property' | 'remove_property' | 'rename_property' | 'set_default';
|
||||
defaultValue?: any;
|
||||
sourceProperty?: string;
|
||||
targetProperty?: string;
|
||||
};
|
||||
severity: 'LOW' | 'MEDIUM' | 'HIGH';
|
||||
}
|
||||
|
||||
/**
|
||||
* Registry of known breaking changes across all n8n nodes
|
||||
*/
|
||||
export const BREAKING_CHANGES_REGISTRY: BreakingChange[] = [
|
||||
// ==========================================
|
||||
// Execute Workflow Node
|
||||
// ==========================================
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.executeWorkflow',
|
||||
fromVersion: '1.0',
|
||||
toVersion: '1.1',
|
||||
propertyName: 'parameters.inputFieldMapping',
|
||||
changeType: 'added',
|
||||
isBreaking: true,
|
||||
migrationHint: 'In v1.1+, the Execute Workflow node requires explicit field mapping to pass data to sub-workflows. Add an "inputFieldMapping" object with "mappings" array defining how to map fields from parent to child workflow.',
|
||||
autoMigratable: true,
|
||||
migrationStrategy: {
|
||||
type: 'add_property',
|
||||
defaultValue: {
|
||||
mappings: []
|
||||
}
|
||||
},
|
||||
severity: 'HIGH'
|
||||
},
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.executeWorkflow',
|
||||
fromVersion: '1.0',
|
||||
toVersion: '1.1',
|
||||
propertyName: 'parameters.mode',
|
||||
changeType: 'requirement_changed',
|
||||
isBreaking: false,
|
||||
migrationHint: 'The "mode" parameter behavior changed in v1.1. Default is now "static" instead of "list". Ensure your workflow ID specification matches the selected mode.',
|
||||
autoMigratable: false,
|
||||
severity: 'MEDIUM'
|
||||
},
|
||||
|
||||
// ==========================================
|
||||
// Webhook Node
|
||||
// ==========================================
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.webhook',
|
||||
fromVersion: '2.0',
|
||||
toVersion: '2.1',
|
||||
propertyName: 'webhookId',
|
||||
changeType: 'added',
|
||||
isBreaking: true,
|
||||
migrationHint: 'In v2.1+, webhooks require a unique "webhookId" field in addition to the path. This ensures webhook persistence across workflow updates. A UUID will be auto-generated if not provided.',
|
||||
autoMigratable: true,
|
||||
migrationStrategy: {
|
||||
type: 'add_property',
|
||||
defaultValue: null // Will be generated as UUID at runtime
|
||||
},
|
||||
severity: 'HIGH'
|
||||
},
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.webhook',
|
||||
fromVersion: '1.0',
|
||||
toVersion: '2.0',
|
||||
propertyName: 'parameters.path',
|
||||
changeType: 'requirement_changed',
|
||||
isBreaking: true,
|
||||
migrationHint: 'In v2.0+, the webhook path must be explicitly defined and cannot be empty. Ensure a valid path is set.',
|
||||
autoMigratable: false,
|
||||
severity: 'HIGH'
|
||||
},
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.webhook',
|
||||
fromVersion: '1.0',
|
||||
toVersion: '2.0',
|
||||
propertyName: 'parameters.responseMode',
|
||||
changeType: 'added',
|
||||
isBreaking: false,
|
||||
migrationHint: 'v2.0 introduces a "responseMode" parameter to control how the webhook responds. Default is "onReceived" (immediate response). Use "lastNode" to wait for workflow completion.',
|
||||
autoMigratable: true,
|
||||
migrationStrategy: {
|
||||
type: 'add_property',
|
||||
defaultValue: 'onReceived'
|
||||
},
|
||||
severity: 'LOW'
|
||||
},
|
||||
|
||||
// ==========================================
|
||||
// HTTP Request Node
|
||||
// ==========================================
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.httpRequest',
|
||||
fromVersion: '4.1',
|
||||
toVersion: '4.2',
|
||||
propertyName: 'parameters.sendBody',
|
||||
changeType: 'requirement_changed',
|
||||
isBreaking: false,
|
||||
migrationHint: 'In v4.2+, "sendBody" must be explicitly set to true for POST/PUT/PATCH requests to include a body. Previous versions had implicit body sending.',
|
||||
autoMigratable: true,
|
||||
migrationStrategy: {
|
||||
type: 'add_property',
|
||||
defaultValue: true
|
||||
},
|
||||
severity: 'MEDIUM'
|
||||
},
|
||||
|
||||
// ==========================================
|
||||
// Code Node (JavaScript)
|
||||
// ==========================================
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.code',
|
||||
fromVersion: '1.0',
|
||||
toVersion: '2.0',
|
||||
propertyName: 'parameters.mode',
|
||||
changeType: 'added',
|
||||
isBreaking: false,
|
||||
migrationHint: 'v2.0 introduces execution modes: "runOnceForAllItems" (default) and "runOnceForEachItem". The default mode processes all items at once, which may differ from v1.0 behavior.',
|
||||
autoMigratable: true,
|
||||
migrationStrategy: {
|
||||
type: 'add_property',
|
||||
defaultValue: 'runOnceForAllItems'
|
||||
},
|
||||
severity: 'MEDIUM'
|
||||
},
|
||||
|
||||
// ==========================================
|
||||
// Schedule Trigger Node
|
||||
// ==========================================
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.scheduleTrigger',
|
||||
fromVersion: '1.0',
|
||||
toVersion: '1.1',
|
||||
propertyName: 'parameters.rule.interval',
|
||||
changeType: 'type_changed',
|
||||
isBreaking: true,
|
||||
oldValue: 'string',
|
||||
newValue: 'array',
|
||||
migrationHint: 'In v1.1+, the interval parameter changed from a single string to an array of interval objects. Convert your single interval to an array format: [{field: "hours", value: 1}]',
|
||||
autoMigratable: false,
|
||||
severity: 'HIGH'
|
||||
},
|
||||
|
||||
// ==========================================
|
||||
// Error Handling (Global Change)
|
||||
// ==========================================
|
||||
{
|
||||
nodeType: '*', // Applies to all nodes
|
||||
fromVersion: '1.0',
|
||||
toVersion: '2.0',
|
||||
propertyName: 'continueOnFail',
|
||||
changeType: 'removed',
|
||||
isBreaking: false,
|
||||
migrationHint: 'The "continueOnFail" property is deprecated. Use "onError" instead with value "continueErrorOutput" or "continueRegularOutput".',
|
||||
autoMigratable: true,
|
||||
migrationStrategy: {
|
||||
type: 'rename_property',
|
||||
sourceProperty: 'continueOnFail',
|
||||
targetProperty: 'onError',
|
||||
defaultValue: 'continueErrorOutput'
|
||||
},
|
||||
severity: 'MEDIUM'
|
||||
}
|
||||
];
|
||||
|
||||
/**
|
||||
* Get breaking changes for a specific node type and version upgrade
|
||||
*/
|
||||
export function getBreakingChangesForNode(
|
||||
nodeType: string,
|
||||
fromVersion: string,
|
||||
toVersion: string
|
||||
): BreakingChange[] {
|
||||
return BREAKING_CHANGES_REGISTRY.filter(change => {
|
||||
// Match exact node type or wildcard (*)
|
||||
const nodeMatches = change.nodeType === nodeType || change.nodeType === '*';
|
||||
|
||||
// Check if version range matches
|
||||
const versionMatches =
|
||||
compareVersions(fromVersion, change.fromVersion) >= 0 &&
|
||||
compareVersions(toVersion, change.toVersion) <= 0;
|
||||
|
||||
return nodeMatches && versionMatches && change.isBreaking;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all changes (breaking and non-breaking) for a version upgrade
|
||||
*/
|
||||
export function getAllChangesForNode(
|
||||
nodeType: string,
|
||||
fromVersion: string,
|
||||
toVersion: string
|
||||
): BreakingChange[] {
|
||||
return BREAKING_CHANGES_REGISTRY.filter(change => {
|
||||
const nodeMatches = change.nodeType === nodeType || change.nodeType === '*';
|
||||
const versionMatches =
|
||||
compareVersions(fromVersion, change.fromVersion) >= 0 &&
|
||||
compareVersions(toVersion, change.toVersion) <= 0;
|
||||
|
||||
return nodeMatches && versionMatches;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get auto-migratable changes for a version upgrade
|
||||
*/
|
||||
export function getAutoMigratableChanges(
|
||||
nodeType: string,
|
||||
fromVersion: string,
|
||||
toVersion: string
|
||||
): BreakingChange[] {
|
||||
return getAllChangesForNode(nodeType, fromVersion, toVersion).filter(
|
||||
change => change.autoMigratable
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a specific node has known breaking changes for a version upgrade
|
||||
*/
|
||||
export function hasBreakingChanges(
|
||||
nodeType: string,
|
||||
fromVersion: string,
|
||||
toVersion: string
|
||||
): boolean {
|
||||
return getBreakingChangesForNode(nodeType, fromVersion, toVersion).length > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get migration hints for a version upgrade
|
||||
*/
|
||||
export function getMigrationHints(
|
||||
nodeType: string,
|
||||
fromVersion: string,
|
||||
toVersion: string
|
||||
): string[] {
|
||||
const changes = getAllChangesForNode(nodeType, fromVersion, toVersion);
|
||||
return changes.map(change => change.migrationHint);
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple version comparison
|
||||
* Returns: -1 if v1 < v2, 0 if equal, 1 if v1 > v2
|
||||
*/
|
||||
function compareVersions(v1: string, v2: string): number {
|
||||
const parts1 = v1.split('.').map(Number);
|
||||
const parts2 = v2.split('.').map(Number);
|
||||
|
||||
for (let i = 0; i < Math.max(parts1.length, parts2.length); i++) {
|
||||
const p1 = parts1[i] || 0;
|
||||
const p2 = parts2[i] || 0;
|
||||
|
||||
if (p1 < p2) return -1;
|
||||
if (p1 > p2) return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get nodes with known version migrations
|
||||
*/
|
||||
export function getNodesWithVersionMigrations(): string[] {
|
||||
const nodeTypes = new Set<string>();
|
||||
|
||||
BREAKING_CHANGES_REGISTRY.forEach(change => {
|
||||
if (change.nodeType !== '*') {
|
||||
nodeTypes.add(change.nodeType);
|
||||
}
|
||||
});
|
||||
|
||||
return Array.from(nodeTypes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all versions tracked for a specific node
|
||||
*/
|
||||
export function getTrackedVersionsForNode(nodeType: string): string[] {
|
||||
const versions = new Set<string>();
|
||||
|
||||
BREAKING_CHANGES_REGISTRY
|
||||
.filter(change => change.nodeType === nodeType || change.nodeType === '*')
|
||||
.forEach(change => {
|
||||
versions.add(change.fromVersion);
|
||||
versions.add(change.toVersion);
|
||||
});
|
||||
|
||||
return Array.from(versions).sort((a, b) => compareVersions(a, b));
|
||||
}
|
||||
@@ -1,10 +1,12 @@
|
||||
/**
|
||||
* Configuration Validator Service
|
||||
*
|
||||
*
|
||||
* Validates node configurations to catch errors before execution.
|
||||
* Provides helpful suggestions and identifies missing or misconfigured properties.
|
||||
*/
|
||||
|
||||
import { shouldSkipLiteralValidation } from '../utils/expression-utils.js';
|
||||
|
||||
export interface ValidationResult {
|
||||
valid: boolean;
|
||||
errors: ValidationError[];
|
||||
@@ -31,13 +33,19 @@ export interface ValidationWarning {
|
||||
}
|
||||
|
||||
export class ConfigValidator {
|
||||
/**
|
||||
* UI-only property types that should not be validated as configuration
|
||||
*/
|
||||
private static readonly UI_ONLY_TYPES = ['notice', 'callout', 'infoBox', 'info'];
|
||||
|
||||
/**
|
||||
* Validate a node configuration
|
||||
*/
|
||||
static validate(
|
||||
nodeType: string,
|
||||
config: Record<string, any>,
|
||||
properties: any[]
|
||||
nodeType: string,
|
||||
config: Record<string, any>,
|
||||
properties: any[],
|
||||
userProvidedKeys?: Set<string> // NEW: Track user-provided properties to avoid warning about defaults
|
||||
): ValidationResult {
|
||||
// Input validation
|
||||
if (!config || typeof config !== 'object') {
|
||||
@@ -46,7 +54,7 @@ export class ConfigValidator {
|
||||
if (!properties || !Array.isArray(properties)) {
|
||||
throw new TypeError('Properties must be a non-null array');
|
||||
}
|
||||
|
||||
|
||||
const errors: ValidationError[] = [];
|
||||
const warnings: ValidationWarning[] = [];
|
||||
const suggestions: string[] = [];
|
||||
@@ -69,8 +77,8 @@ export class ConfigValidator {
|
||||
this.performNodeSpecificValidation(nodeType, config, errors, warnings, suggestions, autofix);
|
||||
|
||||
// Check for common issues
|
||||
this.checkCommonIssues(nodeType, config, properties, warnings, suggestions);
|
||||
|
||||
this.checkCommonIssues(nodeType, config, properties, warnings, suggestions, userProvidedKeys);
|
||||
|
||||
// Security checks
|
||||
this.performSecurityChecks(nodeType, config, warnings);
|
||||
|
||||
@@ -234,8 +242,86 @@ export class ConfigValidator {
|
||||
message: `Property '${key}' must be a boolean, got ${typeof value}`,
|
||||
fix: `Change ${key} to true or false`
|
||||
});
|
||||
} else if (prop.type === 'resourceLocator') {
|
||||
// resourceLocator validation: Used by AI model nodes (OpenAI, Anthropic, etc.)
|
||||
// Must be an object with required properties:
|
||||
// - mode: string ('list' | 'id' | 'url')
|
||||
// - value: any (the actual model/resource identifier)
|
||||
// Common mistake: passing string directly instead of object structure
|
||||
if (typeof value !== 'object' || value === null || Array.isArray(value)) {
|
||||
const fixValue = typeof value === 'string' ? value : JSON.stringify(value);
|
||||
errors.push({
|
||||
type: 'invalid_type',
|
||||
property: key,
|
||||
message: `Property '${key}' is a resourceLocator and must be an object with 'mode' and 'value' properties, got ${typeof value}`,
|
||||
fix: `Change ${key} to { mode: "list", value: ${JSON.stringify(fixValue)} } or { mode: "id", value: ${JSON.stringify(fixValue)} }`
|
||||
});
|
||||
} else {
|
||||
// Check required properties
|
||||
if (!value.mode) {
|
||||
errors.push({
|
||||
type: 'missing_required',
|
||||
property: `${key}.mode`,
|
||||
message: `resourceLocator '${key}' is missing required property 'mode'`,
|
||||
fix: `Add mode property: { mode: "list", value: ${JSON.stringify(value.value || '')} }`
|
||||
});
|
||||
} else if (typeof value.mode !== 'string') {
|
||||
errors.push({
|
||||
type: 'invalid_type',
|
||||
property: `${key}.mode`,
|
||||
message: `resourceLocator '${key}.mode' must be a string, got ${typeof value.mode}`,
|
||||
fix: `Set mode to a valid string value`
|
||||
});
|
||||
} else if (prop.modes) {
|
||||
// Schema-based validation: Check if mode exists in the modes definition
|
||||
// In n8n, modes are defined at the top level of resourceLocator properties
|
||||
// Modes can be defined in different ways:
|
||||
// 1. Array of mode objects: [{name: 'list', ...}, {name: 'id', ...}, {name: 'name', ...}]
|
||||
// 2. Object with mode keys: { list: {...}, id: {...}, url: {...}, name: {...} }
|
||||
const modes = prop.modes;
|
||||
|
||||
// Validate modes structure before processing to prevent crashes
|
||||
if (!modes || typeof modes !== 'object') {
|
||||
// Invalid schema structure - skip validation to prevent false positives
|
||||
continue;
|
||||
}
|
||||
|
||||
let allowedModes: string[] = [];
|
||||
|
||||
if (Array.isArray(modes)) {
|
||||
// Array format (most common in n8n): extract name property from each mode object
|
||||
allowedModes = modes
|
||||
.map(m => (typeof m === 'object' && m !== null) ? m.name : m)
|
||||
.filter(m => typeof m === 'string' && m.length > 0);
|
||||
} else {
|
||||
// Object format: extract keys as mode names
|
||||
allowedModes = Object.keys(modes).filter(k => k.length > 0);
|
||||
}
|
||||
|
||||
// Only validate if we successfully extracted modes
|
||||
if (allowedModes.length > 0 && !allowedModes.includes(value.mode)) {
|
||||
errors.push({
|
||||
type: 'invalid_value',
|
||||
property: `${key}.mode`,
|
||||
message: `resourceLocator '${key}.mode' must be one of [${allowedModes.join(', ')}], got '${value.mode}'`,
|
||||
fix: `Change mode to one of: ${allowedModes.join(', ')}`
|
||||
});
|
||||
}
|
||||
}
|
||||
// If no modes defined at property level, skip mode validation
|
||||
// This prevents false positives for nodes with dynamic/runtime-determined modes
|
||||
|
||||
if (value.value === undefined) {
|
||||
errors.push({
|
||||
type: 'missing_required',
|
||||
property: `${key}.value`,
|
||||
message: `resourceLocator '${key}' is missing required property 'value'`,
|
||||
fix: `Add value property to specify the ${prop.displayName || key}`
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Options validation
|
||||
if (prop.type === 'options' && prop.options) {
|
||||
const validValues = prop.options.map((opt: any) =>
|
||||
@@ -297,13 +383,16 @@ export class ConfigValidator {
|
||||
): void {
|
||||
// URL validation
|
||||
if (config.url && typeof config.url === 'string') {
|
||||
if (!config.url.startsWith('http://') && !config.url.startsWith('https://')) {
|
||||
errors.push({
|
||||
type: 'invalid_value',
|
||||
property: 'url',
|
||||
message: 'URL must start with http:// or https://',
|
||||
fix: 'Add https:// to the beginning of your URL'
|
||||
});
|
||||
// Skip validation for expressions - they will be evaluated at runtime
|
||||
if (!shouldSkipLiteralValidation(config.url)) {
|
||||
if (!config.url.startsWith('http://') && !config.url.startsWith('https://')) {
|
||||
errors.push({
|
||||
type: 'invalid_value',
|
||||
property: 'url',
|
||||
message: 'URL must start with http:// or https://',
|
||||
fix: 'Add https:// to the beginning of your URL'
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -333,15 +422,19 @@ export class ConfigValidator {
|
||||
|
||||
// JSON body validation
|
||||
if (config.sendBody && config.contentType === 'json' && config.jsonBody) {
|
||||
try {
|
||||
JSON.parse(config.jsonBody);
|
||||
} catch (e) {
|
||||
errors.push({
|
||||
type: 'invalid_value',
|
||||
property: 'jsonBody',
|
||||
message: 'jsonBody contains invalid JSON',
|
||||
fix: 'Ensure jsonBody contains valid JSON syntax'
|
||||
});
|
||||
// Skip validation for expressions - they will be evaluated at runtime
|
||||
if (!shouldSkipLiteralValidation(config.jsonBody)) {
|
||||
try {
|
||||
JSON.parse(config.jsonBody);
|
||||
} catch (e) {
|
||||
const errorMsg = e instanceof Error ? e.message : 'Unknown parsing error';
|
||||
errors.push({
|
||||
type: 'invalid_value',
|
||||
property: 'jsonBody',
|
||||
message: `jsonBody contains invalid JSON: ${errorMsg}`,
|
||||
fix: 'Fix JSON syntax error and ensure valid JSON format'
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -445,30 +538,48 @@ export class ConfigValidator {
|
||||
config: Record<string, any>,
|
||||
properties: any[],
|
||||
warnings: ValidationWarning[],
|
||||
suggestions: string[]
|
||||
suggestions: string[],
|
||||
userProvidedKeys?: Set<string> // NEW: Only warn about user-provided properties
|
||||
): void {
|
||||
// Skip visibility checks for Code nodes as they have simple property structure
|
||||
if (nodeType === 'nodes-base.code') {
|
||||
// Code nodes don't have complex displayOptions, so skip visibility warnings
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
// Check for properties that won't be used
|
||||
const visibleProps = properties.filter(p => this.isPropertyVisible(p, config));
|
||||
const configuredKeys = Object.keys(config);
|
||||
|
||||
|
||||
for (const key of configuredKeys) {
|
||||
// Skip internal properties that are always present
|
||||
if (key === '@version' || key.startsWith('_')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
// CRITICAL FIX: Only warn about properties the user actually provided, not defaults
|
||||
if (userProvidedKeys && !userProvidedKeys.has(key)) {
|
||||
continue; // Skip properties that were added as defaults
|
||||
}
|
||||
|
||||
// Find the property definition
|
||||
const prop = properties.find(p => p.name === key);
|
||||
|
||||
// Skip UI-only properties (notice, callout, etc.) - they're not configuration
|
||||
if (prop && this.UI_ONLY_TYPES.includes(prop.type)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if property is visible with current settings
|
||||
if (!visibleProps.find(p => p.name === key)) {
|
||||
// Get visibility requirements for better error message
|
||||
const visibilityReq = this.getVisibilityRequirement(prop, config);
|
||||
|
||||
warnings.push({
|
||||
type: 'inefficient',
|
||||
property: key,
|
||||
message: `Property '${key}' is configured but won't be used due to current settings`,
|
||||
suggestion: 'Remove this property or adjust other settings to make it visible'
|
||||
message: `Property '${prop?.displayName || key}' won't be used - not visible with current settings`,
|
||||
suggestion: visibilityReq || 'Remove this property or adjust other settings to make it visible'
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -517,6 +628,36 @@ export class ConfigValidator {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get visibility requirement for a property
|
||||
* Explains what needs to be set for the property to be visible
|
||||
*/
|
||||
private static getVisibilityRequirement(prop: any, config: Record<string, any>): string | undefined {
|
||||
if (!prop || !prop.displayOptions?.show) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const requirements: string[] = [];
|
||||
for (const [field, values] of Object.entries(prop.displayOptions.show)) {
|
||||
const expectedValues = Array.isArray(values) ? values : [values];
|
||||
const currentValue = config[field];
|
||||
|
||||
// Only include if the current value doesn't match
|
||||
if (!expectedValues.includes(currentValue)) {
|
||||
const valueStr = expectedValues.length === 1
|
||||
? `"${expectedValues[0]}"`
|
||||
: expectedValues.map(v => `"${v}"`).join(' or ');
|
||||
requirements.push(`${field}=${valueStr}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (requirements.length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return `Requires: ${requirements.join(', ')}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Basic JavaScript syntax validation
|
||||
*/
|
||||
|
||||
@@ -78,6 +78,9 @@ export class EnhancedConfigValidator extends ConfigValidator {
|
||||
// Extract operation context from config
|
||||
const operationContext = this.extractOperationContext(config);
|
||||
|
||||
// Extract user-provided keys before applying defaults (CRITICAL FIX for warning system)
|
||||
const userProvidedKeys = new Set(Object.keys(config));
|
||||
|
||||
// Filter properties based on mode and operation, and get config with defaults
|
||||
const { properties: filteredProperties, configWithDefaults } = this.filterPropertiesByMode(
|
||||
properties,
|
||||
@@ -87,7 +90,8 @@ export class EnhancedConfigValidator extends ConfigValidator {
|
||||
);
|
||||
|
||||
// Perform base validation on filtered properties with defaults applied
|
||||
const baseResult = super.validate(nodeType, configWithDefaults, filteredProperties);
|
||||
// Pass userProvidedKeys to prevent warnings about default values
|
||||
const baseResult = super.validate(nodeType, configWithDefaults, filteredProperties, userProvidedKeys);
|
||||
|
||||
// Enhance the result
|
||||
const enhancedResult: EnhancedValidationResult = {
|
||||
@@ -314,7 +318,11 @@ export class EnhancedConfigValidator extends ConfigValidator {
|
||||
case 'nodes-base.mysql':
|
||||
NodeSpecificValidators.validateMySQL(context);
|
||||
break;
|
||||
|
||||
|
||||
case 'nodes-base.set':
|
||||
NodeSpecificValidators.validateSet(context);
|
||||
break;
|
||||
|
||||
case 'nodes-base.switch':
|
||||
this.validateSwitchNodeStructure(config, result);
|
||||
break;
|
||||
@@ -393,7 +401,59 @@ export class EnhancedConfigValidator extends ConfigValidator {
|
||||
config: Record<string, any>,
|
||||
result: EnhancedValidationResult
|
||||
): void {
|
||||
// Examples removed - validation provides error messages and fixes instead
|
||||
const url = String(config.url || '');
|
||||
const options = config.options || {};
|
||||
|
||||
// 1. Suggest alwaysOutputData for better error handling (node-level property)
|
||||
// Note: We can't check if it exists (it's node-level, not in parameters),
|
||||
// but we can suggest it as a best practice
|
||||
if (!result.suggestions.some(s => typeof s === 'string' && s.includes('alwaysOutputData'))) {
|
||||
result.suggestions.push(
|
||||
'Consider adding alwaysOutputData: true at node level (not in parameters) for better error handling. ' +
|
||||
'This ensures the node produces output even when HTTP requests fail, allowing downstream error handling.'
|
||||
);
|
||||
}
|
||||
|
||||
// 2. Suggest responseFormat for API endpoints
|
||||
const lowerUrl = url.toLowerCase();
|
||||
const isApiEndpoint =
|
||||
// Subdomain patterns (api.example.com)
|
||||
/^https?:\/\/api\./i.test(url) ||
|
||||
// Path patterns with word boundaries to prevent false positives like "therapist", "restaurant"
|
||||
/\/api[\/\?]|\/api$/i.test(url) ||
|
||||
/\/rest[\/\?]|\/rest$/i.test(url) ||
|
||||
// Known API service domains
|
||||
lowerUrl.includes('supabase.co') ||
|
||||
lowerUrl.includes('firebase') ||
|
||||
lowerUrl.includes('googleapis.com') ||
|
||||
// Versioned API paths (e.g., example.com/v1, example.com/v2)
|
||||
/\.com\/v\d+/i.test(url);
|
||||
|
||||
if (isApiEndpoint && !options.response?.response?.responseFormat) {
|
||||
result.suggestions.push(
|
||||
'API endpoints should explicitly set options.response.response.responseFormat to "json" or "text" ' +
|
||||
'to prevent confusion about response parsing. Example: ' +
|
||||
'{ "options": { "response": { "response": { "responseFormat": "json" } } } }'
|
||||
);
|
||||
}
|
||||
|
||||
// 3. Enhanced URL protocol validation for expressions
|
||||
if (url && url.startsWith('=')) {
|
||||
// Expression-based URL - check for common protocol issues
|
||||
const expressionContent = url.slice(1); // Remove = prefix
|
||||
const lowerExpression = expressionContent.toLowerCase();
|
||||
|
||||
// Check for missing protocol in expression (case-insensitive)
|
||||
if (expressionContent.startsWith('www.') ||
|
||||
(expressionContent.includes('{{') && !lowerExpression.includes('http'))) {
|
||||
result.warnings.push({
|
||||
type: 'invalid_value',
|
||||
property: 'url',
|
||||
message: 'URL expression appears to be missing http:// or https:// protocol',
|
||||
suggestion: 'Include protocol in your expression. Example: ={{ "https://" + $json.domain + ".com" }}'
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -458,6 +518,15 @@ export class EnhancedConfigValidator extends ConfigValidator {
|
||||
return Array.from(seen.values());
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a warning should be filtered out (hardcoded credentials shown only in strict mode)
|
||||
*/
|
||||
private static shouldFilterCredentialWarning(warning: ValidationWarning): boolean {
|
||||
return warning.type === 'security' &&
|
||||
warning.message !== undefined &&
|
||||
warning.message.includes('Hardcoded nodeCredentialType');
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply profile-based filtering to validation results
|
||||
*/
|
||||
@@ -469,22 +538,40 @@ export class EnhancedConfigValidator extends ConfigValidator {
|
||||
case 'minimal':
|
||||
// Only keep missing required errors
|
||||
result.errors = result.errors.filter(e => e.type === 'missing_required');
|
||||
result.warnings = [];
|
||||
// Keep ONLY critical warnings (security and deprecated)
|
||||
// But filter out hardcoded credential type warnings (only show in strict mode)
|
||||
result.warnings = result.warnings.filter(w => {
|
||||
if (this.shouldFilterCredentialWarning(w)) {
|
||||
return false;
|
||||
}
|
||||
return w.type === 'security' || w.type === 'deprecated';
|
||||
});
|
||||
result.suggestions = [];
|
||||
break;
|
||||
|
||||
|
||||
case 'runtime':
|
||||
// Keep critical runtime errors only
|
||||
result.errors = result.errors.filter(e =>
|
||||
e.type === 'missing_required' ||
|
||||
result.errors = result.errors.filter(e =>
|
||||
e.type === 'missing_required' ||
|
||||
e.type === 'invalid_value' ||
|
||||
(e.type === 'invalid_type' && e.message.includes('undefined'))
|
||||
);
|
||||
// Keep only security warnings
|
||||
result.warnings = result.warnings.filter(w => w.type === 'security');
|
||||
// Keep security and deprecated warnings, REMOVE property visibility warnings
|
||||
result.warnings = result.warnings.filter(w => {
|
||||
// Filter out hardcoded credential type warnings (only show in strict mode)
|
||||
if (this.shouldFilterCredentialWarning(w)) {
|
||||
return false;
|
||||
}
|
||||
if (w.type === 'security' || w.type === 'deprecated') return true;
|
||||
// FILTER OUT property visibility warnings (too noisy)
|
||||
if (w.type === 'inefficient' && w.message && w.message.includes('not visible')) {
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
result.suggestions = [];
|
||||
break;
|
||||
|
||||
|
||||
case 'strict':
|
||||
// Keep everything, add more suggestions
|
||||
if (result.warnings.length === 0 && result.errors.length === 0) {
|
||||
@@ -494,14 +581,32 @@ export class EnhancedConfigValidator extends ConfigValidator {
|
||||
// Require error handling for external service nodes
|
||||
this.enforceErrorHandlingForProfile(result, profile);
|
||||
break;
|
||||
|
||||
|
||||
case 'ai-friendly':
|
||||
default:
|
||||
// Current behavior - balanced for AI agents
|
||||
// Filter out noise but keep helpful warnings
|
||||
result.warnings = result.warnings.filter(w =>
|
||||
w.type !== 'inefficient' || !w.property?.startsWith('_')
|
||||
);
|
||||
result.warnings = result.warnings.filter(w => {
|
||||
// Filter out hardcoded credential type warnings (only show in strict mode)
|
||||
if (this.shouldFilterCredentialWarning(w)) {
|
||||
return false;
|
||||
}
|
||||
// Keep security and deprecated warnings
|
||||
if (w.type === 'security' || w.type === 'deprecated') return true;
|
||||
// Keep missing common properties
|
||||
if (w.type === 'missing_common') return true;
|
||||
// Keep best practice warnings
|
||||
if (w.type === 'best_practice') return true;
|
||||
// FILTER OUT inefficient warnings about property visibility (now fixed at source)
|
||||
if (w.type === 'inefficient' && w.message && w.message.includes('not visible')) {
|
||||
return false; // These are now rare due to userProvidedKeys fix
|
||||
}
|
||||
// Filter out internal property warnings
|
||||
if (w.type === 'inefficient' && w.property?.startsWith('_')) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
// Add error handling suggestions for AI-friendly profile
|
||||
this.addErrorHandlingSuggestions(result);
|
||||
break;
|
||||
|
||||
@@ -207,8 +207,14 @@ export class ExpressionValidator {
|
||||
expr: string,
|
||||
result: ExpressionValidationResult
|
||||
): void {
|
||||
// Check for missing $ prefix - but exclude cases where $ is already present
|
||||
const missingPrefixPattern = /(?<!\$)\b(json|node|input|items|workflow|execution)\b(?!\s*:)/;
|
||||
// Check for missing $ prefix - but exclude cases where $ is already present OR it's property access (e.g., .json)
|
||||
// The pattern now excludes:
|
||||
// - Immediately preceded by $ (e.g., $json) - handled by (?<!\$)
|
||||
// - Preceded by a dot (e.g., .json in $('Node').item.json.field) - handled by (?<!\.)
|
||||
// - Inside word characters (e.g., myJson) - handled by (?<!\w)
|
||||
// - Inside bracket notation (e.g., ['json']) - handled by (?<![)
|
||||
// - After opening bracket or quote (e.g., "json" or ['json'])
|
||||
const missingPrefixPattern = /(?<![.$\w['])\b(json|node|input|items|workflow|execution)\b(?!\s*[:''])/;
|
||||
if (expr.match(missingPrefixPattern)) {
|
||||
result.warnings.push(
|
||||
'Possible missing $ prefix for variable (e.g., use $json instead of json)'
|
||||
|
||||
@@ -170,10 +170,23 @@ export class N8nApiClient {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Lists workflows from n8n instance.
|
||||
*
|
||||
* @param params - Query parameters for filtering and pagination
|
||||
* @returns Paginated list of workflows
|
||||
*
|
||||
* @remarks
|
||||
* This method handles two response formats for backwards compatibility:
|
||||
* - Modern (n8n v0.200.0+): {data: Workflow[], nextCursor?: string}
|
||||
* - Legacy (older versions): Workflow[] (wrapped automatically)
|
||||
*
|
||||
* @see https://github.com/czlonkowski/n8n-mcp/issues/349
|
||||
*/
|
||||
async listWorkflows(params: WorkflowListParams = {}): Promise<WorkflowListResponse> {
|
||||
try {
|
||||
const response = await this.client.get('/workflows', { params });
|
||||
return response.data;
|
||||
return this.validateListResponse<Workflow>(response.data, 'workflows');
|
||||
} catch (error) {
|
||||
throw handleN8nApiError(error);
|
||||
}
|
||||
@@ -191,10 +204,23 @@ export class N8nApiClient {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Lists executions from n8n instance.
|
||||
*
|
||||
* @param params - Query parameters for filtering and pagination
|
||||
* @returns Paginated list of executions
|
||||
*
|
||||
* @remarks
|
||||
* This method handles two response formats for backwards compatibility:
|
||||
* - Modern (n8n v0.200.0+): {data: Execution[], nextCursor?: string}
|
||||
* - Legacy (older versions): Execution[] (wrapped automatically)
|
||||
*
|
||||
* @see https://github.com/czlonkowski/n8n-mcp/issues/349
|
||||
*/
|
||||
async listExecutions(params: ExecutionListParams = {}): Promise<ExecutionListResponse> {
|
||||
try {
|
||||
const response = await this.client.get('/executions', { params });
|
||||
return response.data;
|
||||
return this.validateListResponse<Execution>(response.data, 'executions');
|
||||
} catch (error) {
|
||||
throw handleN8nApiError(error);
|
||||
}
|
||||
@@ -212,7 +238,16 @@ export class N8nApiClient {
|
||||
async triggerWebhook(request: WebhookRequest): Promise<any> {
|
||||
try {
|
||||
const { webhookUrl, httpMethod, data, headers, waitForResponse = true } = request;
|
||||
|
||||
|
||||
// SECURITY: Validate URL for SSRF protection (includes DNS resolution)
|
||||
// See: https://github.com/czlonkowski/n8n-mcp/issues/265 (HIGH-03)
|
||||
const { SSRFProtection } = await import('../utils/ssrf-protection');
|
||||
const validation = await SSRFProtection.validateWebhookUrl(webhookUrl);
|
||||
|
||||
if (!validation.valid) {
|
||||
throw new Error(`SSRF protection: ${validation.reason}`);
|
||||
}
|
||||
|
||||
// Extract path from webhook URL
|
||||
const url = new URL(webhookUrl);
|
||||
const webhookPath = url.pathname;
|
||||
@@ -252,10 +287,23 @@ export class N8nApiClient {
|
||||
}
|
||||
|
||||
// Credential Management
|
||||
/**
|
||||
* Lists credentials from n8n instance.
|
||||
*
|
||||
* @param params - Query parameters for filtering and pagination
|
||||
* @returns Paginated list of credentials
|
||||
*
|
||||
* @remarks
|
||||
* This method handles two response formats for backwards compatibility:
|
||||
* - Modern (n8n v0.200.0+): {data: Credential[], nextCursor?: string}
|
||||
* - Legacy (older versions): Credential[] (wrapped automatically)
|
||||
*
|
||||
* @see https://github.com/czlonkowski/n8n-mcp/issues/349
|
||||
*/
|
||||
async listCredentials(params: CredentialListParams = {}): Promise<CredentialListResponse> {
|
||||
try {
|
||||
const response = await this.client.get('/credentials', { params });
|
||||
return response.data;
|
||||
return this.validateListResponse<Credential>(response.data, 'credentials');
|
||||
} catch (error) {
|
||||
throw handleN8nApiError(error);
|
||||
}
|
||||
@@ -297,10 +345,23 @@ export class N8nApiClient {
|
||||
}
|
||||
|
||||
// Tag Management
|
||||
/**
|
||||
* Lists tags from n8n instance.
|
||||
*
|
||||
* @param params - Query parameters for filtering and pagination
|
||||
* @returns Paginated list of tags
|
||||
*
|
||||
* @remarks
|
||||
* This method handles two response formats for backwards compatibility:
|
||||
* - Modern (n8n v0.200.0+): {data: Tag[], nextCursor?: string}
|
||||
* - Legacy (older versions): Tag[] (wrapped automatically)
|
||||
*
|
||||
* @see https://github.com/czlonkowski/n8n-mcp/issues/349
|
||||
*/
|
||||
async listTags(params: TagListParams = {}): Promise<TagListResponse> {
|
||||
try {
|
||||
const response = await this.client.get('/tags', { params });
|
||||
return response.data;
|
||||
return this.validateListResponse<Tag>(response.data, 'tags');
|
||||
} catch (error) {
|
||||
throw handleN8nApiError(error);
|
||||
}
|
||||
@@ -403,4 +464,49 @@ export class N8nApiClient {
|
||||
throw handleN8nApiError(error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates and normalizes n8n API list responses.
|
||||
* Handles both modern format {data: [], nextCursor?: string} and legacy array format.
|
||||
*
|
||||
* @param responseData - Raw response data from n8n API
|
||||
* @param resourceType - Resource type for error messages (e.g., 'workflows', 'executions')
|
||||
* @returns Normalized response in modern format
|
||||
* @throws Error if response structure is invalid
|
||||
*/
|
||||
private validateListResponse<T>(
|
||||
responseData: any,
|
||||
resourceType: string
|
||||
): { data: T[]; nextCursor?: string | null } {
|
||||
// Validate response structure
|
||||
if (!responseData || typeof responseData !== 'object') {
|
||||
throw new Error(`Invalid response from n8n API for ${resourceType}: response is not an object`);
|
||||
}
|
||||
|
||||
// Handle legacy case where API returns array directly (older n8n versions)
|
||||
if (Array.isArray(responseData)) {
|
||||
logger.warn(
|
||||
`n8n API returned array directly instead of {data, nextCursor} object for ${resourceType}. ` +
|
||||
'Wrapping in expected format for backwards compatibility.'
|
||||
);
|
||||
return {
|
||||
data: responseData,
|
||||
nextCursor: null
|
||||
};
|
||||
}
|
||||
|
||||
// Validate expected format {data: [], nextCursor?: string}
|
||||
if (!Array.isArray(responseData.data)) {
|
||||
const keys = Object.keys(responseData).slice(0, 5);
|
||||
const keysPreview = keys.length < Object.keys(responseData).length
|
||||
? `${keys.join(', ')}...`
|
||||
: keys.join(', ');
|
||||
throw new Error(
|
||||
`Invalid response from n8n API for ${resourceType}: expected {data: [], nextCursor?: string}, ` +
|
||||
`got object with keys: [${keysPreview}]`
|
||||
);
|
||||
}
|
||||
|
||||
return responseData;
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,7 @@
|
||||
import { z } from 'zod';
|
||||
import { WorkflowNode, WorkflowConnection, Workflow } from '../types/n8n-api';
|
||||
import { isTriggerNode, isActivatableTrigger } from '../utils/node-type-utils';
|
||||
import { isNonExecutableNode } from '../utils/node-classification';
|
||||
|
||||
// Zod schemas for n8n API validation
|
||||
|
||||
@@ -22,17 +24,31 @@ export const workflowNodeSchema = z.object({
|
||||
executeOnce: z.boolean().optional(),
|
||||
});
|
||||
|
||||
// Connection array schema used by all connection types
|
||||
const connectionArraySchema = z.array(
|
||||
z.array(
|
||||
z.object({
|
||||
node: z.string(),
|
||||
type: z.string(),
|
||||
index: z.number(),
|
||||
})
|
||||
)
|
||||
);
|
||||
|
||||
/**
|
||||
* Workflow connection schema supporting all connection types.
|
||||
* Note: 'main' is optional because AI nodes exclusively use AI-specific
|
||||
* connection types (ai_languageModel, ai_memory, etc.) without main connections.
|
||||
*/
|
||||
export const workflowConnectionSchema = z.record(
|
||||
z.object({
|
||||
main: z.array(
|
||||
z.array(
|
||||
z.object({
|
||||
node: z.string(),
|
||||
type: z.string(),
|
||||
index: z.number(),
|
||||
})
|
||||
)
|
||||
),
|
||||
main: connectionArraySchema.optional(),
|
||||
error: connectionArraySchema.optional(),
|
||||
ai_tool: connectionArraySchema.optional(),
|
||||
ai_languageModel: connectionArraySchema.optional(),
|
||||
ai_memory: connectionArraySchema.optional(),
|
||||
ai_embedding: connectionArraySchema.optional(),
|
||||
ai_vectorStore: connectionArraySchema.optional(),
|
||||
})
|
||||
);
|
||||
|
||||
@@ -194,6 +210,14 @@ export function validateWorkflowStructure(workflow: Partial<Workflow>): string[]
|
||||
errors.push('Workflow must have at least one node');
|
||||
}
|
||||
|
||||
// Check if workflow has only non-executable nodes (sticky notes)
|
||||
if (workflow.nodes && workflow.nodes.length > 0) {
|
||||
const hasExecutableNodes = workflow.nodes.some(node => !isNonExecutableNode(node.type));
|
||||
if (!hasExecutableNodes) {
|
||||
errors.push('Workflow must have at least one executable node. Sticky notes alone cannot form a valid workflow.');
|
||||
}
|
||||
}
|
||||
|
||||
if (!workflow.connections) {
|
||||
errors.push('Workflow connections are required');
|
||||
}
|
||||
@@ -201,20 +225,71 @@ export function validateWorkflowStructure(workflow: Partial<Workflow>): string[]
|
||||
// Check for minimum viable workflow
|
||||
if (workflow.nodes && workflow.nodes.length === 1) {
|
||||
const singleNode = workflow.nodes[0];
|
||||
const isWebhookOnly = singleNode.type === 'n8n-nodes-base.webhook' ||
|
||||
const isWebhookOnly = singleNode.type === 'n8n-nodes-base.webhook' ||
|
||||
singleNode.type === 'n8n-nodes-base.webhookTrigger';
|
||||
|
||||
|
||||
if (!isWebhookOnly) {
|
||||
errors.push('Single-node workflows are only valid for webhooks. Add at least one more node and connect them. Example: Manual Trigger → Set node');
|
||||
errors.push(`Single non-webhook node workflow is invalid. Current node: "${singleNode.name}" (${singleNode.type}). Add another node using: {type: 'addNode', node: {name: 'Process Data', type: 'n8n-nodes-base.set', typeVersion: 3.4, position: [450, 300], parameters: {}}}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Check for empty connections in multi-node workflows
|
||||
// Check for disconnected nodes in multi-node workflows
|
||||
if (workflow.nodes && workflow.nodes.length > 1 && workflow.connections) {
|
||||
// Filter out non-executable nodes (sticky notes) when counting nodes
|
||||
const executableNodes = workflow.nodes.filter(node => !isNonExecutableNode(node.type));
|
||||
const connectionCount = Object.keys(workflow.connections).length;
|
||||
|
||||
if (connectionCount === 0) {
|
||||
errors.push('Multi-node workflow has empty connections. Connect nodes like this: connections: { "Node1 Name": { "main": [[{ "node": "Node2 Name", "type": "main", "index": 0 }]] } }');
|
||||
|
||||
// First check: workflow has no connections at all (only check if there are multiple executable nodes)
|
||||
if (connectionCount === 0 && executableNodes.length > 1) {
|
||||
const nodeNames = executableNodes.slice(0, 2).map(n => n.name);
|
||||
errors.push(`Multi-node workflow has no connections between nodes. Add a connection using: {type: 'addConnection', source: '${nodeNames[0]}', target: '${nodeNames[1]}', sourcePort: 'main', targetPort: 'main'}`);
|
||||
} else if (connectionCount > 0 || executableNodes.length > 1) {
|
||||
// Second check: detect disconnected nodes (nodes with no incoming or outgoing connections)
|
||||
const connectedNodes = new Set<string>();
|
||||
|
||||
// Collect all nodes that appear in connections (as source or target)
|
||||
Object.entries(workflow.connections).forEach(([sourceName, connection]) => {
|
||||
connectedNodes.add(sourceName); // Node has outgoing connection
|
||||
|
||||
if (connection.main && Array.isArray(connection.main)) {
|
||||
connection.main.forEach((outputs) => {
|
||||
if (Array.isArray(outputs)) {
|
||||
outputs.forEach((target) => {
|
||||
connectedNodes.add(target.node); // Node has incoming connection
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Find disconnected nodes (excluding non-executable nodes and triggers)
|
||||
// Non-executable nodes (sticky notes) are UI-only and don't need connections
|
||||
// Trigger nodes only need outgoing connections
|
||||
const disconnectedNodes = workflow.nodes.filter(node => {
|
||||
// Skip non-executable nodes (sticky notes, etc.) - they're UI-only annotations
|
||||
if (isNonExecutableNode(node.type)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const isConnected = connectedNodes.has(node.name);
|
||||
const isNodeTrigger = isTriggerNode(node.type);
|
||||
|
||||
// Trigger nodes only need outgoing connections
|
||||
if (isNodeTrigger) {
|
||||
return !workflow.connections?.[node.name]; // Disconnected if no outgoing connections
|
||||
}
|
||||
|
||||
// Regular nodes need at least one connection (incoming or outgoing)
|
||||
return !isConnected;
|
||||
});
|
||||
|
||||
if (disconnectedNodes.length > 0) {
|
||||
const disconnectedList = disconnectedNodes.map(n => `"${n.name}" (${n.type})`).join(', ');
|
||||
const firstDisconnected = disconnectedNodes[0];
|
||||
const suggestedSource = workflow.nodes.find(n => connectedNodes.has(n.name))?.name || workflow.nodes[0].name;
|
||||
|
||||
errors.push(`Disconnected nodes detected: ${disconnectedList}. Each node must have at least one connection. Add a connection: {type: 'addConnection', source: '${suggestedSource}', target: '${firstDisconnected.name}', sourcePort: 'main', targetPort: 'main'}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -236,6 +311,16 @@ export function validateWorkflowStructure(workflow: Partial<Workflow>): string[]
|
||||
});
|
||||
}
|
||||
|
||||
// Validate filter-based nodes (IF v2.2+, Switch v3.2+) have complete metadata
|
||||
if (workflow.nodes) {
|
||||
workflow.nodes.forEach((node, index) => {
|
||||
const filterErrors = validateFilterBasedNodeMetadata(node);
|
||||
if (filterErrors.length > 0) {
|
||||
errors.push(...filterErrors.map(err => `Node "${node.name}" (index ${index}): ${err}`));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Validate connections
|
||||
if (workflow.connections) {
|
||||
try {
|
||||
@@ -245,12 +330,89 @@ export function validateWorkflowStructure(workflow: Partial<Workflow>): string[]
|
||||
}
|
||||
}
|
||||
|
||||
// Validate active workflows have activatable triggers
|
||||
// Issue #351: executeWorkflowTrigger cannot activate a workflow
|
||||
// It can only be invoked by other workflows
|
||||
if ((workflow as any).active === true && workflow.nodes && workflow.nodes.length > 0) {
|
||||
const activatableTriggers = workflow.nodes.filter(node =>
|
||||
!node.disabled && isActivatableTrigger(node.type)
|
||||
);
|
||||
|
||||
const executeWorkflowTriggers = workflow.nodes.filter(node =>
|
||||
!node.disabled && node.type.toLowerCase().includes('executeworkflow')
|
||||
);
|
||||
|
||||
if (activatableTriggers.length === 0 && executeWorkflowTriggers.length > 0) {
|
||||
// Workflow is active but only has executeWorkflowTrigger nodes
|
||||
const triggerNames = executeWorkflowTriggers.map(n => n.name).join(', ');
|
||||
errors.push(
|
||||
`Cannot activate workflow with only Execute Workflow Trigger nodes (${triggerNames}). ` +
|
||||
'Execute Workflow Trigger can only be invoked by other workflows, not activated. ' +
|
||||
'Either deactivate the workflow or add a webhook/schedule/polling trigger.'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Validate Switch and IF node connection structures match their rules
|
||||
if (workflow.nodes && workflow.connections) {
|
||||
const switchNodes = workflow.nodes.filter(n => {
|
||||
if (n.type !== 'n8n-nodes-base.switch') return false;
|
||||
const mode = (n.parameters as any)?.mode;
|
||||
return !mode || mode === 'rules'; // Default mode is 'rules'
|
||||
});
|
||||
|
||||
for (const switchNode of switchNodes) {
|
||||
const params = switchNode.parameters as any;
|
||||
const rules = params?.rules?.rules || [];
|
||||
const nodeConnections = workflow.connections[switchNode.name];
|
||||
|
||||
if (rules.length > 0 && nodeConnections?.main) {
|
||||
const outputBranches = nodeConnections.main.length;
|
||||
|
||||
// Switch nodes in "rules" mode need output branches matching rules count
|
||||
if (outputBranches !== rules.length) {
|
||||
const ruleNames = rules.map((r: any, i: number) =>
|
||||
r.outputKey ? `"${r.outputKey}" (index ${i})` : `Rule ${i}`
|
||||
).join(', ');
|
||||
|
||||
errors.push(
|
||||
`Switch node "${switchNode.name}" has ${rules.length} rules [${ruleNames}] ` +
|
||||
`but only ${outputBranches} output branch${outputBranches !== 1 ? 'es' : ''} in connections. ` +
|
||||
`Each rule needs its own output branch. When connecting to Switch outputs, specify sourceIndex: ` +
|
||||
rules.map((_: any, i: number) => i).join(', ') +
|
||||
` (or use case parameter for clarity).`
|
||||
);
|
||||
}
|
||||
|
||||
// Check for empty output branches (except trailing ones)
|
||||
const nonEmptyBranches = nodeConnections.main.filter((branch: any[]) => branch.length > 0).length;
|
||||
if (nonEmptyBranches < rules.length) {
|
||||
const emptyIndices = nodeConnections.main
|
||||
.map((branch: any[], i: number) => branch.length === 0 ? i : -1)
|
||||
.filter((i: number) => i !== -1 && i < rules.length);
|
||||
|
||||
if (emptyIndices.length > 0) {
|
||||
const ruleInfo = emptyIndices.map((i: number) => {
|
||||
const rule = rules[i];
|
||||
return rule.outputKey ? `"${rule.outputKey}" (index ${i})` : `Rule ${i}`;
|
||||
}).join(', ');
|
||||
|
||||
errors.push(
|
||||
`Switch node "${switchNode.name}" has unconnected output${emptyIndices.length !== 1 ? 's' : ''}: ${ruleInfo}. ` +
|
||||
`Add connection${emptyIndices.length !== 1 ? 's' : ''} using sourceIndex: ${emptyIndices.join(' or ')}.`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate that all connection references exist and use node NAMES (not IDs)
|
||||
if (workflow.nodes && workflow.connections) {
|
||||
const nodeNames = new Set(workflow.nodes.map(node => node.name));
|
||||
const nodeIds = new Set(workflow.nodes.map(node => node.id));
|
||||
const nodeIdToName = new Map(workflow.nodes.map(node => [node.id, node.name]));
|
||||
|
||||
|
||||
Object.entries(workflow.connections).forEach(([sourceName, connection]) => {
|
||||
// Check if source exists by name (correct)
|
||||
if (!nodeNames.has(sourceName)) {
|
||||
@@ -289,12 +451,177 @@ export function validateWorkflowStructure(workflow: Partial<Workflow>): string[]
|
||||
|
||||
// Check if workflow has webhook trigger
|
||||
export function hasWebhookTrigger(workflow: Workflow): boolean {
|
||||
return workflow.nodes.some(node =>
|
||||
node.type === 'n8n-nodes-base.webhook' ||
|
||||
return workflow.nodes.some(node =>
|
||||
node.type === 'n8n-nodes-base.webhook' ||
|
||||
node.type === 'n8n-nodes-base.webhookTrigger'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate filter-based node metadata (IF v2.2+, Switch v3.2+)
|
||||
* Returns array of error messages
|
||||
*/
|
||||
export function validateFilterBasedNodeMetadata(node: WorkflowNode): string[] {
|
||||
const errors: string[] = [];
|
||||
|
||||
// Check if node is filter-based
|
||||
const isIFNode = node.type === 'n8n-nodes-base.if' && node.typeVersion >= 2.2;
|
||||
const isSwitchNode = node.type === 'n8n-nodes-base.switch' && node.typeVersion >= 3.2;
|
||||
|
||||
if (!isIFNode && !isSwitchNode) {
|
||||
return errors; // Not a filter-based node
|
||||
}
|
||||
|
||||
// Validate IF node
|
||||
if (isIFNode) {
|
||||
const conditions = (node.parameters.conditions as any);
|
||||
|
||||
// Check conditions.options exists
|
||||
if (!conditions?.options) {
|
||||
errors.push(
|
||||
'Missing required "conditions.options". ' +
|
||||
'IF v2.2+ requires: {version: 2, leftValue: "", caseSensitive: true, typeValidation: "strict"}'
|
||||
);
|
||||
} else {
|
||||
// Validate required fields
|
||||
const requiredFields = {
|
||||
version: 2,
|
||||
leftValue: '',
|
||||
caseSensitive: 'boolean',
|
||||
typeValidation: 'strict'
|
||||
};
|
||||
|
||||
for (const [field, expectedValue] of Object.entries(requiredFields)) {
|
||||
if (!(field in conditions.options)) {
|
||||
errors.push(
|
||||
`Missing required field "conditions.options.${field}". ` +
|
||||
`Expected value: ${typeof expectedValue === 'string' ? `"${expectedValue}"` : expectedValue}`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate operators in conditions
|
||||
if (conditions?.conditions && Array.isArray(conditions.conditions)) {
|
||||
conditions.conditions.forEach((condition: any, i: number) => {
|
||||
const operatorErrors = validateOperatorStructure(condition.operator, `conditions.conditions[${i}].operator`);
|
||||
errors.push(...operatorErrors);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Validate Switch node
|
||||
if (isSwitchNode) {
|
||||
const rules = (node.parameters.rules as any);
|
||||
|
||||
if (rules?.rules && Array.isArray(rules.rules)) {
|
||||
rules.rules.forEach((rule: any, ruleIndex: number) => {
|
||||
// Check rule.conditions.options
|
||||
if (!rule.conditions?.options) {
|
||||
errors.push(
|
||||
`Missing required "rules.rules[${ruleIndex}].conditions.options". ` +
|
||||
'Switch v3.2+ requires: {version: 2, leftValue: "", caseSensitive: true, typeValidation: "strict"}'
|
||||
);
|
||||
} else {
|
||||
// Validate required fields
|
||||
const requiredFields = {
|
||||
version: 2,
|
||||
leftValue: '',
|
||||
caseSensitive: 'boolean',
|
||||
typeValidation: 'strict'
|
||||
};
|
||||
|
||||
for (const [field, expectedValue] of Object.entries(requiredFields)) {
|
||||
if (!(field in rule.conditions.options)) {
|
||||
errors.push(
|
||||
`Missing required field "rules.rules[${ruleIndex}].conditions.options.${field}". ` +
|
||||
`Expected value: ${typeof expectedValue === 'string' ? `"${expectedValue}"` : expectedValue}`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate operators in rule conditions
|
||||
if (rule.conditions?.conditions && Array.isArray(rule.conditions.conditions)) {
|
||||
rule.conditions.conditions.forEach((condition: any, condIndex: number) => {
|
||||
const operatorErrors = validateOperatorStructure(
|
||||
condition.operator,
|
||||
`rules.rules[${ruleIndex}].conditions.conditions[${condIndex}].operator`
|
||||
);
|
||||
errors.push(...operatorErrors);
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return errors;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate operator structure
|
||||
* Ensures operator has correct format: {type, operation, singleValue?}
|
||||
*/
|
||||
export function validateOperatorStructure(operator: any, path: string): string[] {
|
||||
const errors: string[] = [];
|
||||
|
||||
if (!operator || typeof operator !== 'object') {
|
||||
errors.push(`${path}: operator is missing or not an object`);
|
||||
return errors;
|
||||
}
|
||||
|
||||
// Check required field: type (data type, not operation name)
|
||||
if (!operator.type) {
|
||||
errors.push(
|
||||
`${path}: missing required field "type". ` +
|
||||
'Must be a data type: "string", "number", "boolean", "dateTime", "array", or "object"'
|
||||
);
|
||||
} else {
|
||||
const validTypes = ['string', 'number', 'boolean', 'dateTime', 'array', 'object'];
|
||||
if (!validTypes.includes(operator.type)) {
|
||||
errors.push(
|
||||
`${path}: invalid type "${operator.type}". ` +
|
||||
`Type must be a data type (${validTypes.join(', ')}), not an operation name. ` +
|
||||
'Did you mean to use the "operation" field?'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Check required field: operation
|
||||
if (!operator.operation) {
|
||||
errors.push(
|
||||
`${path}: missing required field "operation". ` +
|
||||
'Operation specifies the comparison type (e.g., "equals", "contains", "isNotEmpty")'
|
||||
);
|
||||
}
|
||||
|
||||
// Check singleValue based on operator type
|
||||
if (operator.operation) {
|
||||
const unaryOperators = ['isEmpty', 'isNotEmpty', 'true', 'false', 'isNumeric'];
|
||||
const isUnary = unaryOperators.includes(operator.operation);
|
||||
|
||||
if (isUnary) {
|
||||
// Unary operators MUST have singleValue: true
|
||||
if (operator.singleValue !== true) {
|
||||
errors.push(
|
||||
`${path}: unary operator "${operator.operation}" requires "singleValue: true". ` +
|
||||
'Unary operators do not use rightValue.'
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// Binary operators should NOT have singleValue: true
|
||||
if (operator.singleValue === true) {
|
||||
errors.push(
|
||||
`${path}: binary operator "${operator.operation}" should not have "singleValue: true". ` +
|
||||
'Only unary operators (isEmpty, isNotEmpty, true, false, isNumeric) need this property.'
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errors;
|
||||
}
|
||||
|
||||
// Get webhook URL from workflow
|
||||
export function getWebhookUrl(workflow: Workflow): string | null {
|
||||
const webhookNode = workflow.nodes.find(node =>
|
||||
|
||||
410
src/services/node-migration-service.ts
Normal file
410
src/services/node-migration-service.ts
Normal file
@@ -0,0 +1,410 @@
|
||||
/**
|
||||
* Node Migration Service
|
||||
*
|
||||
* Handles smart auto-migration of node configurations during version upgrades.
|
||||
* Applies migration strategies from the breaking changes registry and detectors.
|
||||
*
|
||||
* Migration strategies:
|
||||
* - add_property: Add new required/optional properties with defaults
|
||||
* - remove_property: Remove deprecated properties
|
||||
* - rename_property: Rename properties that changed names
|
||||
* - set_default: Set default values for properties
|
||||
*/
|
||||
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
import { BreakingChangeDetector, DetectedChange } from './breaking-change-detector';
|
||||
import { NodeVersionService } from './node-version-service';
|
||||
|
||||
export interface MigrationResult {
|
||||
success: boolean;
|
||||
nodeId: string;
|
||||
nodeName: string;
|
||||
fromVersion: string;
|
||||
toVersion: string;
|
||||
appliedMigrations: AppliedMigration[];
|
||||
remainingIssues: string[];
|
||||
confidence: 'HIGH' | 'MEDIUM' | 'LOW';
|
||||
updatedNode: any; // The migrated node configuration
|
||||
}
|
||||
|
||||
export interface AppliedMigration {
|
||||
propertyName: string;
|
||||
action: string;
|
||||
oldValue?: any;
|
||||
newValue?: any;
|
||||
description: string;
|
||||
}
|
||||
|
||||
export class NodeMigrationService {
|
||||
constructor(
|
||||
private versionService: NodeVersionService,
|
||||
private breakingChangeDetector: BreakingChangeDetector
|
||||
) {}
|
||||
|
||||
/**
|
||||
* Migrate a node from its current version to a target version
|
||||
*/
|
||||
async migrateNode(
|
||||
node: any,
|
||||
fromVersion: string,
|
||||
toVersion: string
|
||||
): Promise<MigrationResult> {
|
||||
const nodeId = node.id || 'unknown';
|
||||
const nodeName = node.name || 'Unknown Node';
|
||||
const nodeType = node.type;
|
||||
|
||||
// Analyze the version upgrade
|
||||
const analysis = await this.breakingChangeDetector.analyzeVersionUpgrade(
|
||||
nodeType,
|
||||
fromVersion,
|
||||
toVersion
|
||||
);
|
||||
|
||||
// Start with a copy of the node
|
||||
const migratedNode = JSON.parse(JSON.stringify(node));
|
||||
|
||||
// Apply the version update
|
||||
migratedNode.typeVersion = this.parseVersion(toVersion);
|
||||
|
||||
const appliedMigrations: AppliedMigration[] = [];
|
||||
const remainingIssues: string[] = [];
|
||||
|
||||
// Apply auto-migratable changes
|
||||
for (const change of analysis.changes.filter(c => c.autoMigratable)) {
|
||||
const migration = this.applyMigration(migratedNode, change);
|
||||
|
||||
if (migration) {
|
||||
appliedMigrations.push(migration);
|
||||
}
|
||||
}
|
||||
|
||||
// Collect remaining manual issues
|
||||
for (const change of analysis.changes.filter(c => !c.autoMigratable)) {
|
||||
remainingIssues.push(
|
||||
`Manual action required for "${change.propertyName}": ${change.migrationHint}`
|
||||
);
|
||||
}
|
||||
|
||||
// Determine confidence based on remaining issues
|
||||
let confidence: 'HIGH' | 'MEDIUM' | 'LOW' = 'HIGH';
|
||||
|
||||
if (remainingIssues.length > 0) {
|
||||
confidence = remainingIssues.length > 3 ? 'LOW' : 'MEDIUM';
|
||||
}
|
||||
|
||||
return {
|
||||
success: remainingIssues.length === 0,
|
||||
nodeId,
|
||||
nodeName,
|
||||
fromVersion,
|
||||
toVersion,
|
||||
appliedMigrations,
|
||||
remainingIssues,
|
||||
confidence,
|
||||
updatedNode: migratedNode
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply a single migration change to a node
|
||||
*/
|
||||
private applyMigration(node: any, change: DetectedChange): AppliedMigration | null {
|
||||
if (!change.migrationStrategy) return null;
|
||||
|
||||
const { type, defaultValue, sourceProperty, targetProperty } = change.migrationStrategy;
|
||||
|
||||
switch (type) {
|
||||
case 'add_property':
|
||||
return this.addProperty(node, change.propertyName, defaultValue, change);
|
||||
|
||||
case 'remove_property':
|
||||
return this.removeProperty(node, change.propertyName, change);
|
||||
|
||||
case 'rename_property':
|
||||
return this.renameProperty(node, sourceProperty!, targetProperty!, change);
|
||||
|
||||
case 'set_default':
|
||||
return this.setDefault(node, change.propertyName, defaultValue, change);
|
||||
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a new property to the node configuration
|
||||
*/
|
||||
private addProperty(
|
||||
node: any,
|
||||
propertyPath: string,
|
||||
defaultValue: any,
|
||||
change: DetectedChange
|
||||
): AppliedMigration {
|
||||
const value = this.resolveDefaultValue(propertyPath, defaultValue, node);
|
||||
|
||||
// Handle nested property paths (e.g., "parameters.inputFieldMapping")
|
||||
const parts = propertyPath.split('.');
|
||||
let target = node;
|
||||
|
||||
for (let i = 0; i < parts.length - 1; i++) {
|
||||
const part = parts[i];
|
||||
if (!target[part]) {
|
||||
target[part] = {};
|
||||
}
|
||||
target = target[part];
|
||||
}
|
||||
|
||||
const finalKey = parts[parts.length - 1];
|
||||
target[finalKey] = value;
|
||||
|
||||
return {
|
||||
propertyName: propertyPath,
|
||||
action: 'Added property',
|
||||
newValue: value,
|
||||
description: `Added "${propertyPath}" with default value`
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a deprecated property from the node configuration
|
||||
*/
|
||||
private removeProperty(
|
||||
node: any,
|
||||
propertyPath: string,
|
||||
change: DetectedChange
|
||||
): AppliedMigration | null {
|
||||
const parts = propertyPath.split('.');
|
||||
let target = node;
|
||||
|
||||
for (let i = 0; i < parts.length - 1; i++) {
|
||||
const part = parts[i];
|
||||
if (!target[part]) return null; // Property doesn't exist
|
||||
target = target[part];
|
||||
}
|
||||
|
||||
const finalKey = parts[parts.length - 1];
|
||||
const oldValue = target[finalKey];
|
||||
|
||||
if (oldValue !== undefined) {
|
||||
delete target[finalKey];
|
||||
|
||||
return {
|
||||
propertyName: propertyPath,
|
||||
action: 'Removed property',
|
||||
oldValue,
|
||||
description: `Removed deprecated property "${propertyPath}"`
|
||||
};
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Rename a property (move value from old name to new name)
|
||||
*/
|
||||
private renameProperty(
|
||||
node: any,
|
||||
sourcePath: string,
|
||||
targetPath: string,
|
||||
change: DetectedChange
|
||||
): AppliedMigration | null {
|
||||
// Get old value
|
||||
const sourceParts = sourcePath.split('.');
|
||||
let sourceTarget = node;
|
||||
|
||||
for (let i = 0; i < sourceParts.length - 1; i++) {
|
||||
if (!sourceTarget[sourceParts[i]]) return null;
|
||||
sourceTarget = sourceTarget[sourceParts[i]];
|
||||
}
|
||||
|
||||
const sourceKey = sourceParts[sourceParts.length - 1];
|
||||
const oldValue = sourceTarget[sourceKey];
|
||||
|
||||
if (oldValue === undefined) return null; // Source doesn't exist
|
||||
|
||||
// Set new value
|
||||
const targetParts = targetPath.split('.');
|
||||
let targetTarget = node;
|
||||
|
||||
for (let i = 0; i < targetParts.length - 1; i++) {
|
||||
if (!targetTarget[targetParts[i]]) {
|
||||
targetTarget[targetParts[i]] = {};
|
||||
}
|
||||
targetTarget = targetTarget[targetParts[i]];
|
||||
}
|
||||
|
||||
const targetKey = targetParts[targetParts.length - 1];
|
||||
targetTarget[targetKey] = oldValue;
|
||||
|
||||
// Remove old value
|
||||
delete sourceTarget[sourceKey];
|
||||
|
||||
return {
|
||||
propertyName: targetPath,
|
||||
action: 'Renamed property',
|
||||
oldValue: `${sourcePath}: ${JSON.stringify(oldValue)}`,
|
||||
newValue: `${targetPath}: ${JSON.stringify(oldValue)}`,
|
||||
description: `Renamed "${sourcePath}" to "${targetPath}"`
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a default value for a property
|
||||
*/
|
||||
private setDefault(
|
||||
node: any,
|
||||
propertyPath: string,
|
||||
defaultValue: any,
|
||||
change: DetectedChange
|
||||
): AppliedMigration | null {
|
||||
const parts = propertyPath.split('.');
|
||||
let target = node;
|
||||
|
||||
for (let i = 0; i < parts.length - 1; i++) {
|
||||
if (!target[parts[i]]) {
|
||||
target[parts[i]] = {};
|
||||
}
|
||||
target = target[parts[i]];
|
||||
}
|
||||
|
||||
const finalKey = parts[parts.length - 1];
|
||||
|
||||
// Only set if not already defined
|
||||
if (target[finalKey] === undefined) {
|
||||
const value = this.resolveDefaultValue(propertyPath, defaultValue, node);
|
||||
target[finalKey] = value;
|
||||
|
||||
return {
|
||||
propertyName: propertyPath,
|
||||
action: 'Set default value',
|
||||
newValue: value,
|
||||
description: `Set default value for "${propertyPath}"`
|
||||
};
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve default value with special handling for certain property types
|
||||
*/
|
||||
private resolveDefaultValue(propertyPath: string, defaultValue: any, node: any): any {
|
||||
// Special case: webhookId needs a UUID
|
||||
if (propertyPath === 'webhookId' || propertyPath.endsWith('.webhookId')) {
|
||||
return uuidv4();
|
||||
}
|
||||
|
||||
// Special case: webhook path needs a unique value
|
||||
if (propertyPath === 'path' || propertyPath.endsWith('.path')) {
|
||||
if (node.type === 'n8n-nodes-base.webhook') {
|
||||
return `/webhook-${Date.now()}`;
|
||||
}
|
||||
}
|
||||
|
||||
// Return provided default or null
|
||||
return defaultValue !== null && defaultValue !== undefined ? defaultValue : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse version string to number (for typeVersion field)
|
||||
*/
|
||||
private parseVersion(version: string): number {
|
||||
const parts = version.split('.').map(Number);
|
||||
|
||||
// Handle versions like "1.1" -> 1.1, "2.0" -> 2
|
||||
if (parts.length === 1) return parts[0];
|
||||
if (parts.length === 2) return parts[0] + parts[1] / 10;
|
||||
|
||||
// For more complex versions, just use first number
|
||||
return parts[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate that a migrated node is valid
|
||||
*/
|
||||
async validateMigratedNode(node: any, nodeType: string): Promise<{
|
||||
valid: boolean;
|
||||
errors: string[];
|
||||
warnings: string[];
|
||||
}> {
|
||||
const errors: string[] = [];
|
||||
const warnings: string[] = [];
|
||||
|
||||
// Basic validation
|
||||
if (!node.typeVersion) {
|
||||
errors.push('Missing typeVersion after migration');
|
||||
}
|
||||
|
||||
if (!node.parameters) {
|
||||
errors.push('Missing parameters object');
|
||||
}
|
||||
|
||||
// Check for common issues
|
||||
if (nodeType === 'n8n-nodes-base.webhook') {
|
||||
if (!node.parameters?.path) {
|
||||
errors.push('Webhook node missing required "path" parameter');
|
||||
}
|
||||
if (node.typeVersion >= 2.1 && !node.webhookId) {
|
||||
warnings.push('Webhook v2.1+ typically requires webhookId');
|
||||
}
|
||||
}
|
||||
|
||||
if (nodeType === 'n8n-nodes-base.executeWorkflow') {
|
||||
if (node.typeVersion >= 1.1 && !node.parameters?.inputFieldMapping) {
|
||||
errors.push('Execute Workflow v1.1+ requires inputFieldMapping');
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
valid: errors.length === 0,
|
||||
errors,
|
||||
warnings
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch migrate multiple nodes in a workflow
|
||||
*/
|
||||
async migrateWorkflowNodes(
|
||||
workflow: any,
|
||||
targetVersions: Record<string, string> // nodeId -> targetVersion
|
||||
): Promise<{
|
||||
success: boolean;
|
||||
results: MigrationResult[];
|
||||
overallConfidence: 'HIGH' | 'MEDIUM' | 'LOW';
|
||||
}> {
|
||||
const results: MigrationResult[] = [];
|
||||
|
||||
for (const node of workflow.nodes || []) {
|
||||
const targetVersion = targetVersions[node.id];
|
||||
|
||||
if (targetVersion && node.typeVersion) {
|
||||
const currentVersion = node.typeVersion.toString();
|
||||
|
||||
const result = await this.migrateNode(node, currentVersion, targetVersion);
|
||||
results.push(result);
|
||||
|
||||
// Update node in place
|
||||
Object.assign(node, result.updatedNode);
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate overall confidence
|
||||
const confidences = results.map(r => r.confidence);
|
||||
let overallConfidence: 'HIGH' | 'MEDIUM' | 'LOW' = 'HIGH';
|
||||
|
||||
if (confidences.includes('LOW')) {
|
||||
overallConfidence = 'LOW';
|
||||
} else if (confidences.includes('MEDIUM')) {
|
||||
overallConfidence = 'MEDIUM';
|
||||
}
|
||||
|
||||
const success = results.every(r => r.success);
|
||||
|
||||
return {
|
||||
success,
|
||||
results,
|
||||
overallConfidence
|
||||
};
|
||||
}
|
||||
}
|
||||
361
src/services/node-sanitizer.ts
Normal file
361
src/services/node-sanitizer.ts
Normal file
@@ -0,0 +1,361 @@
|
||||
/**
|
||||
* Node Sanitizer Service
|
||||
*
|
||||
* Ensures nodes have complete metadata required by n8n UI.
|
||||
* Based on n8n AI Workflow Builder patterns:
|
||||
* - Merges node type defaults with user parameters
|
||||
* - Auto-adds required metadata for filter-based nodes (IF v2.2+, Switch v3.2+)
|
||||
* - Fixes operator structure
|
||||
* - Prevents "Could not find property option" errors
|
||||
*/
|
||||
|
||||
import { INodeParameters } from 'n8n-workflow';
|
||||
import { logger } from '../utils/logger';
|
||||
import { WorkflowNode } from '../types/n8n-api';
|
||||
|
||||
/**
|
||||
* Sanitize a single node by adding required metadata
|
||||
*/
|
||||
export function sanitizeNode(node: WorkflowNode): WorkflowNode {
|
||||
const sanitized = { ...node };
|
||||
|
||||
// Apply node-specific sanitization
|
||||
if (isFilterBasedNode(node.type, node.typeVersion)) {
|
||||
sanitized.parameters = sanitizeFilterBasedNode(
|
||||
sanitized.parameters as INodeParameters,
|
||||
node.type,
|
||||
node.typeVersion
|
||||
);
|
||||
}
|
||||
|
||||
return sanitized;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sanitize all nodes in a workflow
|
||||
*/
|
||||
export function sanitizeWorkflowNodes(workflow: any): any {
|
||||
if (!workflow.nodes || !Array.isArray(workflow.nodes)) {
|
||||
return workflow;
|
||||
}
|
||||
|
||||
return {
|
||||
...workflow,
|
||||
nodes: workflow.nodes.map((node: any) => sanitizeNode(node))
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if node is filter-based (IF v2.2+, Switch v3.2+)
|
||||
*/
|
||||
function isFilterBasedNode(nodeType: string, typeVersion: number): boolean {
|
||||
if (nodeType === 'n8n-nodes-base.if') {
|
||||
return typeVersion >= 2.2;
|
||||
}
|
||||
if (nodeType === 'n8n-nodes-base.switch') {
|
||||
return typeVersion >= 3.2;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sanitize filter-based nodes (IF v2.2+, Switch v3.2+)
|
||||
* Ensures conditions.options has complete structure
|
||||
*/
|
||||
function sanitizeFilterBasedNode(
|
||||
parameters: INodeParameters,
|
||||
nodeType: string,
|
||||
typeVersion: number
|
||||
): INodeParameters {
|
||||
const sanitized = { ...parameters };
|
||||
|
||||
// Handle IF node
|
||||
if (nodeType === 'n8n-nodes-base.if' && typeVersion >= 2.2) {
|
||||
sanitized.conditions = sanitizeFilterConditions(sanitized.conditions as any);
|
||||
}
|
||||
|
||||
// Handle Switch node
|
||||
if (nodeType === 'n8n-nodes-base.switch' && typeVersion >= 3.2) {
|
||||
if (sanitized.rules && typeof sanitized.rules === 'object') {
|
||||
const rules = sanitized.rules as any;
|
||||
if (rules.rules && Array.isArray(rules.rules)) {
|
||||
rules.rules = rules.rules.map((rule: any) => ({
|
||||
...rule,
|
||||
conditions: sanitizeFilterConditions(rule.conditions)
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sanitized;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sanitize filter conditions structure
|
||||
*/
|
||||
function sanitizeFilterConditions(conditions: any): any {
|
||||
if (!conditions || typeof conditions !== 'object') {
|
||||
return conditions;
|
||||
}
|
||||
|
||||
const sanitized = { ...conditions };
|
||||
|
||||
// Ensure options has complete structure
|
||||
if (!sanitized.options) {
|
||||
sanitized.options = {};
|
||||
}
|
||||
|
||||
// Add required filter options metadata
|
||||
const requiredOptions = {
|
||||
version: 2,
|
||||
leftValue: '',
|
||||
caseSensitive: true,
|
||||
typeValidation: 'strict'
|
||||
};
|
||||
|
||||
// Merge with existing options, preserving user values
|
||||
sanitized.options = {
|
||||
...requiredOptions,
|
||||
...sanitized.options
|
||||
};
|
||||
|
||||
// Sanitize conditions array
|
||||
if (sanitized.conditions && Array.isArray(sanitized.conditions)) {
|
||||
sanitized.conditions = sanitized.conditions.map((condition: any) =>
|
||||
sanitizeCondition(condition)
|
||||
);
|
||||
}
|
||||
|
||||
return sanitized;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sanitize a single condition
|
||||
*/
|
||||
function sanitizeCondition(condition: any): any {
|
||||
if (!condition || typeof condition !== 'object') {
|
||||
return condition;
|
||||
}
|
||||
|
||||
const sanitized = { ...condition };
|
||||
|
||||
// Ensure condition has an ID
|
||||
if (!sanitized.id) {
|
||||
sanitized.id = generateConditionId();
|
||||
}
|
||||
|
||||
// Sanitize operator structure
|
||||
if (sanitized.operator) {
|
||||
sanitized.operator = sanitizeOperator(sanitized.operator);
|
||||
}
|
||||
|
||||
return sanitized;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sanitize operator structure
|
||||
* Ensures operator has correct format: {type, operation, singleValue?}
|
||||
*/
|
||||
function sanitizeOperator(operator: any): any {
|
||||
if (!operator || typeof operator !== 'object') {
|
||||
return operator;
|
||||
}
|
||||
|
||||
const sanitized = { ...operator };
|
||||
|
||||
// Fix common mistake: type field used for operation name
|
||||
// WRONG: {type: "isNotEmpty"}
|
||||
// RIGHT: {type: "string", operation: "isNotEmpty"}
|
||||
if (sanitized.type && !sanitized.operation) {
|
||||
// Check if type value looks like an operation (lowercase, no dots)
|
||||
const typeValue = sanitized.type as string;
|
||||
if (isOperationName(typeValue)) {
|
||||
logger.debug(`Fixing operator structure: converting type="${typeValue}" to operation`);
|
||||
|
||||
// Infer data type from operation
|
||||
const dataType = inferDataType(typeValue);
|
||||
sanitized.type = dataType;
|
||||
sanitized.operation = typeValue;
|
||||
}
|
||||
}
|
||||
|
||||
// Set singleValue based on operator type
|
||||
if (sanitized.operation) {
|
||||
if (isUnaryOperator(sanitized.operation)) {
|
||||
// Unary operators require singleValue: true
|
||||
sanitized.singleValue = true;
|
||||
} else {
|
||||
// Binary operators should NOT have singleValue (or it should be false/undefined)
|
||||
// Remove it to prevent UI errors
|
||||
delete sanitized.singleValue;
|
||||
}
|
||||
}
|
||||
|
||||
return sanitized;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if string looks like an operation name (not a data type)
|
||||
*/
|
||||
function isOperationName(value: string): boolean {
|
||||
// Operation names are lowercase and don't contain dots
|
||||
// Data types are: string, number, boolean, dateTime, array, object
|
||||
const dataTypes = ['string', 'number', 'boolean', 'dateTime', 'array', 'object'];
|
||||
return !dataTypes.includes(value) && /^[a-z][a-zA-Z]*$/.test(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Infer data type from operation name
|
||||
*/
|
||||
function inferDataType(operation: string): string {
|
||||
// Boolean operations
|
||||
const booleanOps = ['true', 'false', 'isEmpty', 'isNotEmpty'];
|
||||
if (booleanOps.includes(operation)) {
|
||||
return 'boolean';
|
||||
}
|
||||
|
||||
// Number operations
|
||||
const numberOps = ['isNumeric', 'gt', 'gte', 'lt', 'lte'];
|
||||
if (numberOps.some(op => operation.includes(op))) {
|
||||
return 'number';
|
||||
}
|
||||
|
||||
// Date operations
|
||||
const dateOps = ['after', 'before', 'afterDate', 'beforeDate'];
|
||||
if (dateOps.some(op => operation.includes(op))) {
|
||||
return 'dateTime';
|
||||
}
|
||||
|
||||
// Default to string
|
||||
return 'string';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if operator is unary (requires singleValue: true)
|
||||
*/
|
||||
function isUnaryOperator(operation: string): boolean {
|
||||
const unaryOps = [
|
||||
'isEmpty',
|
||||
'isNotEmpty',
|
||||
'true',
|
||||
'false',
|
||||
'isNumeric'
|
||||
];
|
||||
return unaryOps.includes(operation);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate unique condition ID
|
||||
*/
|
||||
function generateConditionId(): string {
|
||||
return `condition-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate that a node has complete metadata
|
||||
* Returns array of issues found
|
||||
*/
|
||||
export function validateNodeMetadata(node: WorkflowNode): string[] {
|
||||
const issues: string[] = [];
|
||||
|
||||
if (!isFilterBasedNode(node.type, node.typeVersion)) {
|
||||
return issues; // Not a filter-based node
|
||||
}
|
||||
|
||||
// Check IF node
|
||||
if (node.type === 'n8n-nodes-base.if') {
|
||||
const conditions = (node.parameters.conditions as any);
|
||||
if (!conditions?.options) {
|
||||
issues.push('Missing conditions.options');
|
||||
} else {
|
||||
const required = ['version', 'leftValue', 'typeValidation', 'caseSensitive'];
|
||||
for (const field of required) {
|
||||
if (!(field in conditions.options)) {
|
||||
issues.push(`Missing conditions.options.${field}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check operators
|
||||
if (conditions?.conditions && Array.isArray(conditions.conditions)) {
|
||||
for (let i = 0; i < conditions.conditions.length; i++) {
|
||||
const condition = conditions.conditions[i];
|
||||
const operatorIssues = validateOperator(condition.operator, `conditions.conditions[${i}].operator`);
|
||||
issues.push(...operatorIssues);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check Switch node
|
||||
if (node.type === 'n8n-nodes-base.switch') {
|
||||
const rules = (node.parameters.rules as any);
|
||||
if (rules?.rules && Array.isArray(rules.rules)) {
|
||||
for (let i = 0; i < rules.rules.length; i++) {
|
||||
const rule = rules.rules[i];
|
||||
if (!rule.conditions?.options) {
|
||||
issues.push(`Missing rules.rules[${i}].conditions.options`);
|
||||
} else {
|
||||
const required = ['version', 'leftValue', 'typeValidation', 'caseSensitive'];
|
||||
for (const field of required) {
|
||||
if (!(field in rule.conditions.options)) {
|
||||
issues.push(`Missing rules.rules[${i}].conditions.options.${field}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check operators
|
||||
if (rule.conditions?.conditions && Array.isArray(rule.conditions.conditions)) {
|
||||
for (let j = 0; j < rule.conditions.conditions.length; j++) {
|
||||
const condition = rule.conditions.conditions[j];
|
||||
const operatorIssues = validateOperator(
|
||||
condition.operator,
|
||||
`rules.rules[${i}].conditions.conditions[${j}].operator`
|
||||
);
|
||||
issues.push(...operatorIssues);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate operator structure
|
||||
*/
|
||||
function validateOperator(operator: any, path: string): string[] {
|
||||
const issues: string[] = [];
|
||||
|
||||
if (!operator || typeof operator !== 'object') {
|
||||
issues.push(`${path}: operator is missing or not an object`);
|
||||
return issues;
|
||||
}
|
||||
|
||||
if (!operator.type) {
|
||||
issues.push(`${path}: missing required field 'type'`);
|
||||
} else if (!['string', 'number', 'boolean', 'dateTime', 'array', 'object'].includes(operator.type)) {
|
||||
issues.push(`${path}: invalid type "${operator.type}" (must be data type, not operation)`);
|
||||
}
|
||||
|
||||
if (!operator.operation) {
|
||||
issues.push(`${path}: missing required field 'operation'`);
|
||||
}
|
||||
|
||||
// Check singleValue based on operator type
|
||||
if (operator.operation) {
|
||||
if (isUnaryOperator(operator.operation)) {
|
||||
// Unary operators MUST have singleValue: true
|
||||
if (operator.singleValue !== true) {
|
||||
issues.push(`${path}: unary operator "${operator.operation}" requires singleValue: true`);
|
||||
}
|
||||
} else {
|
||||
// Binary operators should NOT have singleValue
|
||||
if (operator.singleValue === true) {
|
||||
issues.push(`${path}: binary operator "${operator.operation}" should not have singleValue: true (only unary operators need this)`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
@@ -269,13 +269,15 @@ export class NodeSpecificValidators {
|
||||
|
||||
private static validateGoogleSheetsAppend(context: NodeValidationContext): void {
|
||||
const { config, errors, warnings, autofix } = context;
|
||||
|
||||
if (!config.range) {
|
||||
|
||||
// In Google Sheets v4+, range is only required if NOT using the columns resourceMapper
|
||||
// The columns parameter is a resourceMapper introduced in v4 that handles range automatically
|
||||
if (!config.range && !config.columns) {
|
||||
errors.push({
|
||||
type: 'missing_required',
|
||||
property: 'range',
|
||||
message: 'Range is required for append operation',
|
||||
fix: 'Specify range like "Sheet1!A:B" or "Sheet1!A1:B10"'
|
||||
message: 'Range or columns mapping is required for append operation',
|
||||
fix: 'Specify range like "Sheet1!A:B" OR use columns with mappingMode'
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1036,16 +1038,9 @@ export class NodeSpecificValidators {
|
||||
delete autofix.continueOnFail;
|
||||
}
|
||||
|
||||
// Response mode validation
|
||||
if (responseMode === 'responseNode' && !config.onError && !config.continueOnFail) {
|
||||
errors.push({
|
||||
type: 'invalid_configuration',
|
||||
property: 'responseMode',
|
||||
message: 'responseNode mode requires onError: "continueRegularOutput"',
|
||||
fix: 'Set onError to ensure response is always sent'
|
||||
});
|
||||
}
|
||||
|
||||
// Note: responseNode mode validation moved to workflow-validator.ts
|
||||
// where it has access to node-level onError property (not just config/parameters)
|
||||
|
||||
// Always output data for debugging
|
||||
if (!config.alwaysOutputData) {
|
||||
suggestions.push('Enable alwaysOutputData to debug webhook payloads');
|
||||
@@ -1556,4 +1551,59 @@ export class NodeSpecificValidators {
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate Set node configuration
|
||||
*/
|
||||
static validateSet(context: NodeValidationContext): void {
|
||||
const { config, errors, warnings } = context;
|
||||
|
||||
// Validate jsonOutput when present (used in JSON mode or when directly setting JSON)
|
||||
if (config.jsonOutput !== undefined && config.jsonOutput !== null && config.jsonOutput !== '') {
|
||||
try {
|
||||
const parsed = JSON.parse(config.jsonOutput);
|
||||
|
||||
// Set node with JSON input expects an OBJECT {}, not an ARRAY []
|
||||
// This is a common mistake that n8n UI catches but our validator should too
|
||||
if (Array.isArray(parsed)) {
|
||||
errors.push({
|
||||
type: 'invalid_value',
|
||||
property: 'jsonOutput',
|
||||
message: 'Set node expects a JSON object {}, not an array []',
|
||||
fix: 'Either wrap array items as object properties: {"items": [...]}, OR use a different approach for multiple items'
|
||||
});
|
||||
}
|
||||
|
||||
// Warn about empty objects
|
||||
if (typeof parsed === 'object' && !Array.isArray(parsed) && Object.keys(parsed).length === 0) {
|
||||
warnings.push({
|
||||
type: 'inefficient',
|
||||
property: 'jsonOutput',
|
||||
message: 'jsonOutput is an empty object - this node will output no data',
|
||||
suggestion: 'Add properties to the object or remove this node if not needed'
|
||||
});
|
||||
}
|
||||
} catch (e) {
|
||||
errors.push({
|
||||
type: 'syntax_error',
|
||||
property: 'jsonOutput',
|
||||
message: `Invalid JSON in jsonOutput: ${e instanceof Error ? e.message : 'Syntax error'}`,
|
||||
fix: 'Ensure jsonOutput contains valid JSON syntax'
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Validate mode-specific requirements
|
||||
if (config.mode === 'manual') {
|
||||
// In manual mode, at least one field should be defined
|
||||
const hasFields = config.values && Object.keys(config.values).length > 0;
|
||||
if (!hasFields && !config.jsonOutput) {
|
||||
warnings.push({
|
||||
type: 'missing_common',
|
||||
message: 'Set node has no fields configured - will output empty items',
|
||||
suggestion: 'Add fields in the Values section or use JSON mode'
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
377
src/services/node-version-service.ts
Normal file
377
src/services/node-version-service.ts
Normal file
@@ -0,0 +1,377 @@
|
||||
/**
|
||||
* Node Version Service
|
||||
*
|
||||
* Central service for node version discovery, comparison, and upgrade path recommendation.
|
||||
* Provides caching for performance and integrates with the database and breaking change detector.
|
||||
*/
|
||||
|
||||
import { NodeRepository } from '../database/node-repository';
|
||||
import { BreakingChangeDetector } from './breaking-change-detector';
|
||||
|
||||
export interface NodeVersion {
|
||||
nodeType: string;
|
||||
version: string;
|
||||
packageName: string;
|
||||
displayName: string;
|
||||
isCurrentMax: boolean;
|
||||
minimumN8nVersion?: string;
|
||||
breakingChanges: any[];
|
||||
deprecatedProperties: string[];
|
||||
addedProperties: string[];
|
||||
releasedAt?: Date;
|
||||
}
|
||||
|
||||
export interface VersionComparison {
|
||||
nodeType: string;
|
||||
currentVersion: string;
|
||||
latestVersion: string;
|
||||
isOutdated: boolean;
|
||||
versionGap: number; // How many versions behind
|
||||
hasBreakingChanges: boolean;
|
||||
recommendUpgrade: boolean;
|
||||
confidence: 'HIGH' | 'MEDIUM' | 'LOW';
|
||||
reason: string;
|
||||
}
|
||||
|
||||
export interface UpgradePath {
|
||||
nodeType: string;
|
||||
fromVersion: string;
|
||||
toVersion: string;
|
||||
direct: boolean; // Can upgrade directly or needs intermediate steps
|
||||
intermediateVersions: string[]; // If multi-step upgrade needed
|
||||
totalBreakingChanges: number;
|
||||
autoMigratableChanges: number;
|
||||
manualRequiredChanges: number;
|
||||
estimatedEffort: 'LOW' | 'MEDIUM' | 'HIGH';
|
||||
steps: UpgradeStep[];
|
||||
}
|
||||
|
||||
export interface UpgradeStep {
|
||||
fromVersion: string;
|
||||
toVersion: string;
|
||||
breakingChanges: number;
|
||||
migrationHints: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Node Version Service with caching
|
||||
*/
|
||||
export class NodeVersionService {
|
||||
private versionCache: Map<string, NodeVersion[]> = new Map();
|
||||
private cacheTTL: number = 5 * 60 * 1000; // 5 minutes
|
||||
private cacheTimestamps: Map<string, number> = new Map();
|
||||
|
||||
constructor(
|
||||
private nodeRepository: NodeRepository,
|
||||
private breakingChangeDetector: BreakingChangeDetector
|
||||
) {}
|
||||
|
||||
/**
|
||||
* Get all available versions for a node type
|
||||
*/
|
||||
getAvailableVersions(nodeType: string): NodeVersion[] {
|
||||
// Check cache first
|
||||
const cached = this.getCachedVersions(nodeType);
|
||||
if (cached) return cached;
|
||||
|
||||
// Query from database
|
||||
const versions = this.nodeRepository.getNodeVersions(nodeType);
|
||||
|
||||
// Cache the result
|
||||
this.cacheVersions(nodeType, versions);
|
||||
|
||||
return versions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the latest available version for a node type
|
||||
*/
|
||||
getLatestVersion(nodeType: string): string | null {
|
||||
const versions = this.getAvailableVersions(nodeType);
|
||||
|
||||
if (versions.length === 0) {
|
||||
// Fallback to main nodes table
|
||||
const node = this.nodeRepository.getNode(nodeType);
|
||||
return node?.version || null;
|
||||
}
|
||||
|
||||
// Find version marked as current max
|
||||
const maxVersion = versions.find(v => v.isCurrentMax);
|
||||
if (maxVersion) return maxVersion.version;
|
||||
|
||||
// Fallback: sort and get highest
|
||||
const sorted = versions.sort((a, b) => this.compareVersions(b.version, a.version));
|
||||
return sorted[0]?.version || null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare a node's current version against the latest available
|
||||
*/
|
||||
compareVersions(currentVersion: string, latestVersion: string): number {
|
||||
const parts1 = currentVersion.split('.').map(Number);
|
||||
const parts2 = latestVersion.split('.').map(Number);
|
||||
|
||||
for (let i = 0; i < Math.max(parts1.length, parts2.length); i++) {
|
||||
const p1 = parts1[i] || 0;
|
||||
const p2 = parts2[i] || 0;
|
||||
|
||||
if (p1 < p2) return -1;
|
||||
if (p1 > p2) return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Analyze if a node version is outdated and should be upgraded
|
||||
*/
|
||||
analyzeVersion(nodeType: string, currentVersion: string): VersionComparison {
|
||||
const latestVersion = this.getLatestVersion(nodeType);
|
||||
|
||||
if (!latestVersion) {
|
||||
return {
|
||||
nodeType,
|
||||
currentVersion,
|
||||
latestVersion: currentVersion,
|
||||
isOutdated: false,
|
||||
versionGap: 0,
|
||||
hasBreakingChanges: false,
|
||||
recommendUpgrade: false,
|
||||
confidence: 'HIGH',
|
||||
reason: 'No version information available. Using current version.'
|
||||
};
|
||||
}
|
||||
|
||||
const comparison = this.compareVersions(currentVersion, latestVersion);
|
||||
const isOutdated = comparison < 0;
|
||||
|
||||
if (!isOutdated) {
|
||||
return {
|
||||
nodeType,
|
||||
currentVersion,
|
||||
latestVersion,
|
||||
isOutdated: false,
|
||||
versionGap: 0,
|
||||
hasBreakingChanges: false,
|
||||
recommendUpgrade: false,
|
||||
confidence: 'HIGH',
|
||||
reason: 'Node is already at the latest version.'
|
||||
};
|
||||
}
|
||||
|
||||
// Calculate version gap
|
||||
const versionGap = this.calculateVersionGap(currentVersion, latestVersion);
|
||||
|
||||
// Check for breaking changes
|
||||
const hasBreakingChanges = this.breakingChangeDetector.hasBreakingChanges(
|
||||
nodeType,
|
||||
currentVersion,
|
||||
latestVersion
|
||||
);
|
||||
|
||||
// Determine upgrade recommendation and confidence
|
||||
let recommendUpgrade = true;
|
||||
let confidence: 'HIGH' | 'MEDIUM' | 'LOW' = 'HIGH';
|
||||
let reason = `Version ${latestVersion} available. `;
|
||||
|
||||
if (hasBreakingChanges) {
|
||||
confidence = 'MEDIUM';
|
||||
reason += 'Contains breaking changes. Review before upgrading.';
|
||||
} else {
|
||||
reason += 'Safe to upgrade (no breaking changes detected).';
|
||||
}
|
||||
|
||||
if (versionGap > 2) {
|
||||
confidence = 'LOW';
|
||||
reason += ` Version gap is large (${versionGap} versions). Consider incremental upgrade.`;
|
||||
}
|
||||
|
||||
return {
|
||||
nodeType,
|
||||
currentVersion,
|
||||
latestVersion,
|
||||
isOutdated,
|
||||
versionGap,
|
||||
hasBreakingChanges,
|
||||
recommendUpgrade,
|
||||
confidence,
|
||||
reason
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate the version gap (number of versions between)
|
||||
*/
|
||||
private calculateVersionGap(fromVersion: string, toVersion: string): number {
|
||||
const from = fromVersion.split('.').map(Number);
|
||||
const to = toVersion.split('.').map(Number);
|
||||
|
||||
// Simple gap calculation based on version numbers
|
||||
let gap = 0;
|
||||
|
||||
for (let i = 0; i < Math.max(from.length, to.length); i++) {
|
||||
const f = from[i] || 0;
|
||||
const t = to[i] || 0;
|
||||
gap += Math.abs(t - f);
|
||||
}
|
||||
|
||||
return gap;
|
||||
}
|
||||
|
||||
/**
|
||||
* Suggest the best upgrade path for a node
|
||||
*/
|
||||
async suggestUpgradePath(nodeType: string, currentVersion: string): Promise<UpgradePath | null> {
|
||||
const latestVersion = this.getLatestVersion(nodeType);
|
||||
|
||||
if (!latestVersion) return null;
|
||||
|
||||
const comparison = this.compareVersions(currentVersion, latestVersion);
|
||||
if (comparison >= 0) return null; // Already at latest or newer
|
||||
|
||||
// Get all available versions between current and latest
|
||||
const allVersions = this.getAvailableVersions(nodeType);
|
||||
const intermediateVersions = allVersions
|
||||
.filter(v =>
|
||||
this.compareVersions(v.version, currentVersion) > 0 &&
|
||||
this.compareVersions(v.version, latestVersion) < 0
|
||||
)
|
||||
.map(v => v.version)
|
||||
.sort((a, b) => this.compareVersions(a, b));
|
||||
|
||||
// Analyze the upgrade
|
||||
const analysis = await this.breakingChangeDetector.analyzeVersionUpgrade(
|
||||
nodeType,
|
||||
currentVersion,
|
||||
latestVersion
|
||||
);
|
||||
|
||||
// Determine if direct upgrade is safe
|
||||
const versionGap = this.calculateVersionGap(currentVersion, latestVersion);
|
||||
const direct = versionGap <= 1 || !analysis.hasBreakingChanges;
|
||||
|
||||
// Generate upgrade steps
|
||||
const steps: UpgradeStep[] = [];
|
||||
|
||||
if (direct || intermediateVersions.length === 0) {
|
||||
// Direct upgrade
|
||||
steps.push({
|
||||
fromVersion: currentVersion,
|
||||
toVersion: latestVersion,
|
||||
breakingChanges: analysis.changes.filter(c => c.isBreaking).length,
|
||||
migrationHints: analysis.recommendations
|
||||
});
|
||||
} else {
|
||||
// Multi-step upgrade through intermediate versions
|
||||
let stepFrom = currentVersion;
|
||||
|
||||
for (const intermediateVersion of intermediateVersions) {
|
||||
const stepAnalysis = await this.breakingChangeDetector.analyzeVersionUpgrade(
|
||||
nodeType,
|
||||
stepFrom,
|
||||
intermediateVersion
|
||||
);
|
||||
|
||||
steps.push({
|
||||
fromVersion: stepFrom,
|
||||
toVersion: intermediateVersion,
|
||||
breakingChanges: stepAnalysis.changes.filter(c => c.isBreaking).length,
|
||||
migrationHints: stepAnalysis.recommendations
|
||||
});
|
||||
|
||||
stepFrom = intermediateVersion;
|
||||
}
|
||||
|
||||
// Final step to latest
|
||||
const finalStepAnalysis = await this.breakingChangeDetector.analyzeVersionUpgrade(
|
||||
nodeType,
|
||||
stepFrom,
|
||||
latestVersion
|
||||
);
|
||||
|
||||
steps.push({
|
||||
fromVersion: stepFrom,
|
||||
toVersion: latestVersion,
|
||||
breakingChanges: finalStepAnalysis.changes.filter(c => c.isBreaking).length,
|
||||
migrationHints: finalStepAnalysis.recommendations
|
||||
});
|
||||
}
|
||||
|
||||
// Calculate estimated effort
|
||||
const totalBreakingChanges = steps.reduce((sum, step) => sum + step.breakingChanges, 0);
|
||||
let estimatedEffort: 'LOW' | 'MEDIUM' | 'HIGH' = 'LOW';
|
||||
|
||||
if (totalBreakingChanges > 5 || steps.length > 3) {
|
||||
estimatedEffort = 'HIGH';
|
||||
} else if (totalBreakingChanges > 2 || steps.length > 1) {
|
||||
estimatedEffort = 'MEDIUM';
|
||||
}
|
||||
|
||||
return {
|
||||
nodeType,
|
||||
fromVersion: currentVersion,
|
||||
toVersion: latestVersion,
|
||||
direct,
|
||||
intermediateVersions,
|
||||
totalBreakingChanges,
|
||||
autoMigratableChanges: analysis.autoMigratableCount,
|
||||
manualRequiredChanges: analysis.manualRequiredCount,
|
||||
estimatedEffort,
|
||||
steps
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a specific version exists for a node
|
||||
*/
|
||||
versionExists(nodeType: string, version: string): boolean {
|
||||
const versions = this.getAvailableVersions(nodeType);
|
||||
return versions.some(v => v.version === version);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get version metadata (breaking changes, added/deprecated properties)
|
||||
*/
|
||||
getVersionMetadata(nodeType: string, version: string): NodeVersion | null {
|
||||
const versionData = this.nodeRepository.getNodeVersion(nodeType, version);
|
||||
return versionData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear the version cache
|
||||
*/
|
||||
clearCache(nodeType?: string): void {
|
||||
if (nodeType) {
|
||||
this.versionCache.delete(nodeType);
|
||||
this.cacheTimestamps.delete(nodeType);
|
||||
} else {
|
||||
this.versionCache.clear();
|
||||
this.cacheTimestamps.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cached versions if still valid
|
||||
*/
|
||||
private getCachedVersions(nodeType: string): NodeVersion[] | null {
|
||||
const cached = this.versionCache.get(nodeType);
|
||||
const timestamp = this.cacheTimestamps.get(nodeType);
|
||||
|
||||
if (cached && timestamp) {
|
||||
const age = Date.now() - timestamp;
|
||||
if (age < this.cacheTTL) {
|
||||
return cached;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cache versions with timestamp
|
||||
*/
|
||||
private cacheVersions(nodeType: string, versions: NodeVersion[]): void {
|
||||
this.versionCache.set(nodeType, versions);
|
||||
this.cacheTimestamps.set(nodeType, Date.now());
|
||||
}
|
||||
}
|
||||
423
src/services/post-update-validator.ts
Normal file
423
src/services/post-update-validator.ts
Normal file
@@ -0,0 +1,423 @@
|
||||
/**
|
||||
* Post-Update Validator
|
||||
*
|
||||
* Generates comprehensive, AI-friendly migration reports after node version upgrades.
|
||||
* Provides actionable guidance for AI agents on what manual steps are needed.
|
||||
*
|
||||
* Validation includes:
|
||||
* - New required properties
|
||||
* - Deprecated/removed properties
|
||||
* - Behavior changes
|
||||
* - Step-by-step migration instructions
|
||||
*/
|
||||
|
||||
import { BreakingChangeDetector, DetectedChange } from './breaking-change-detector';
|
||||
import { MigrationResult } from './node-migration-service';
|
||||
import { NodeVersionService } from './node-version-service';
|
||||
|
||||
export interface PostUpdateGuidance {
|
||||
nodeId: string;
|
||||
nodeName: string;
|
||||
nodeType: string;
|
||||
oldVersion: string;
|
||||
newVersion: string;
|
||||
migrationStatus: 'complete' | 'partial' | 'manual_required';
|
||||
requiredActions: RequiredAction[];
|
||||
deprecatedProperties: DeprecatedProperty[];
|
||||
behaviorChanges: BehaviorChange[];
|
||||
migrationSteps: string[];
|
||||
confidence: 'HIGH' | 'MEDIUM' | 'LOW';
|
||||
estimatedTime: string; // e.g., "5 minutes", "15 minutes"
|
||||
}
|
||||
|
||||
export interface RequiredAction {
|
||||
type: 'ADD_PROPERTY' | 'UPDATE_PROPERTY' | 'CONFIGURE_OPTION' | 'REVIEW_CONFIGURATION';
|
||||
property: string;
|
||||
reason: string;
|
||||
suggestedValue?: any;
|
||||
currentValue?: any;
|
||||
documentation?: string;
|
||||
priority: 'CRITICAL' | 'HIGH' | 'MEDIUM' | 'LOW';
|
||||
}
|
||||
|
||||
export interface DeprecatedProperty {
|
||||
property: string;
|
||||
status: 'removed' | 'deprecated';
|
||||
replacement?: string;
|
||||
action: 'remove' | 'replace' | 'ignore';
|
||||
impact: 'breaking' | 'warning';
|
||||
}
|
||||
|
||||
export interface BehaviorChange {
|
||||
aspect: string; // e.g., "data passing", "webhook handling"
|
||||
oldBehavior: string;
|
||||
newBehavior: string;
|
||||
impact: 'HIGH' | 'MEDIUM' | 'LOW';
|
||||
actionRequired: boolean;
|
||||
recommendation: string;
|
||||
}
|
||||
|
||||
export class PostUpdateValidator {
|
||||
constructor(
|
||||
private versionService: NodeVersionService,
|
||||
private breakingChangeDetector: BreakingChangeDetector
|
||||
) {}
|
||||
|
||||
/**
|
||||
* Generate comprehensive post-update guidance for a migrated node
|
||||
*/
|
||||
async generateGuidance(
|
||||
nodeId: string,
|
||||
nodeName: string,
|
||||
nodeType: string,
|
||||
oldVersion: string,
|
||||
newVersion: string,
|
||||
migrationResult: MigrationResult
|
||||
): Promise<PostUpdateGuidance> {
|
||||
// Analyze the version upgrade
|
||||
const analysis = await this.breakingChangeDetector.analyzeVersionUpgrade(
|
||||
nodeType,
|
||||
oldVersion,
|
||||
newVersion
|
||||
);
|
||||
|
||||
// Determine migration status
|
||||
const migrationStatus = this.determineMigrationStatus(migrationResult, analysis.changes);
|
||||
|
||||
// Generate required actions
|
||||
const requiredActions = this.generateRequiredActions(
|
||||
migrationResult,
|
||||
analysis.changes,
|
||||
nodeType
|
||||
);
|
||||
|
||||
// Identify deprecated properties
|
||||
const deprecatedProperties = this.identifyDeprecatedProperties(analysis.changes);
|
||||
|
||||
// Document behavior changes
|
||||
const behaviorChanges = this.documentBehaviorChanges(nodeType, oldVersion, newVersion);
|
||||
|
||||
// Generate step-by-step migration instructions
|
||||
const migrationSteps = this.generateMigrationSteps(
|
||||
requiredActions,
|
||||
deprecatedProperties,
|
||||
behaviorChanges
|
||||
);
|
||||
|
||||
// Calculate confidence and estimated time
|
||||
const confidence = this.calculateConfidence(requiredActions, migrationStatus);
|
||||
const estimatedTime = this.estimateTime(requiredActions, behaviorChanges);
|
||||
|
||||
return {
|
||||
nodeId,
|
||||
nodeName,
|
||||
nodeType,
|
||||
oldVersion,
|
||||
newVersion,
|
||||
migrationStatus,
|
||||
requiredActions,
|
||||
deprecatedProperties,
|
||||
behaviorChanges,
|
||||
migrationSteps,
|
||||
confidence,
|
||||
estimatedTime
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine the migration status based on results and changes
|
||||
*/
|
||||
private determineMigrationStatus(
|
||||
migrationResult: MigrationResult,
|
||||
changes: DetectedChange[]
|
||||
): 'complete' | 'partial' | 'manual_required' {
|
||||
if (migrationResult.remainingIssues.length === 0) {
|
||||
return 'complete';
|
||||
}
|
||||
|
||||
const criticalIssues = changes.filter(c => c.isBreaking && !c.autoMigratable);
|
||||
|
||||
if (criticalIssues.length > 0) {
|
||||
return 'manual_required';
|
||||
}
|
||||
|
||||
return 'partial';
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate actionable required actions for the AI agent
|
||||
*/
|
||||
private generateRequiredActions(
|
||||
migrationResult: MigrationResult,
|
||||
changes: DetectedChange[],
|
||||
nodeType: string
|
||||
): RequiredAction[] {
|
||||
const actions: RequiredAction[] = [];
|
||||
|
||||
// Actions from remaining issues (not auto-migrated)
|
||||
const manualChanges = changes.filter(c => !c.autoMigratable);
|
||||
|
||||
for (const change of manualChanges) {
|
||||
actions.push({
|
||||
type: this.mapChangeTypeToActionType(change.changeType),
|
||||
property: change.propertyName,
|
||||
reason: change.migrationHint,
|
||||
suggestedValue: change.newValue,
|
||||
currentValue: change.oldValue,
|
||||
documentation: this.getPropertyDocumentation(nodeType, change.propertyName),
|
||||
priority: this.mapSeverityToPriority(change.severity)
|
||||
});
|
||||
}
|
||||
|
||||
return actions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Identify deprecated or removed properties
|
||||
*/
|
||||
private identifyDeprecatedProperties(changes: DetectedChange[]): DeprecatedProperty[] {
|
||||
const deprecated: DeprecatedProperty[] = [];
|
||||
|
||||
for (const change of changes) {
|
||||
if (change.changeType === 'removed') {
|
||||
deprecated.push({
|
||||
property: change.propertyName,
|
||||
status: 'removed',
|
||||
replacement: change.migrationStrategy?.targetProperty,
|
||||
action: change.autoMigratable ? 'remove' : 'replace',
|
||||
impact: change.isBreaking ? 'breaking' : 'warning'
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return deprecated;
|
||||
}
|
||||
|
||||
/**
|
||||
* Document behavior changes for specific nodes
|
||||
*/
|
||||
private documentBehaviorChanges(
|
||||
nodeType: string,
|
||||
oldVersion: string,
|
||||
newVersion: string
|
||||
): BehaviorChange[] {
|
||||
const changes: BehaviorChange[] = [];
|
||||
|
||||
// Execute Workflow node behavior changes
|
||||
if (nodeType === 'n8n-nodes-base.executeWorkflow') {
|
||||
if (this.versionService.compareVersions(oldVersion, '1.1') < 0 &&
|
||||
this.versionService.compareVersions(newVersion, '1.1') >= 0) {
|
||||
changes.push({
|
||||
aspect: 'Data passing to sub-workflows',
|
||||
oldBehavior: 'Automatic data passing - all data from parent workflow automatically available',
|
||||
newBehavior: 'Explicit field mapping required - must define inputFieldMapping to pass specific fields',
|
||||
impact: 'HIGH',
|
||||
actionRequired: true,
|
||||
recommendation: 'Define inputFieldMapping with specific field mappings between parent and child workflows. Review data dependencies.'
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Webhook node behavior changes
|
||||
if (nodeType === 'n8n-nodes-base.webhook') {
|
||||
if (this.versionService.compareVersions(oldVersion, '2.1') < 0 &&
|
||||
this.versionService.compareVersions(newVersion, '2.1') >= 0) {
|
||||
changes.push({
|
||||
aspect: 'Webhook persistence',
|
||||
oldBehavior: 'Webhook URL changes on workflow updates',
|
||||
newBehavior: 'Stable webhook URL via webhookId field',
|
||||
impact: 'MEDIUM',
|
||||
actionRequired: false,
|
||||
recommendation: 'Webhook URLs now remain stable across workflow updates. Update external systems if needed.'
|
||||
});
|
||||
}
|
||||
|
||||
if (this.versionService.compareVersions(oldVersion, '2.0') < 0 &&
|
||||
this.versionService.compareVersions(newVersion, '2.0') >= 0) {
|
||||
changes.push({
|
||||
aspect: 'Response handling',
|
||||
oldBehavior: 'Automatic response after webhook trigger',
|
||||
newBehavior: 'Configurable response mode (onReceived vs lastNode)',
|
||||
impact: 'MEDIUM',
|
||||
actionRequired: true,
|
||||
recommendation: 'Review responseMode setting. Use "onReceived" for immediate responses or "lastNode" to wait for workflow completion.'
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return changes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate step-by-step migration instructions for AI agents
|
||||
*/
|
||||
private generateMigrationSteps(
|
||||
requiredActions: RequiredAction[],
|
||||
deprecatedProperties: DeprecatedProperty[],
|
||||
behaviorChanges: BehaviorChange[]
|
||||
): string[] {
|
||||
const steps: string[] = [];
|
||||
let stepNumber = 1;
|
||||
|
||||
// Start with deprecations
|
||||
if (deprecatedProperties.length > 0) {
|
||||
steps.push(`Step ${stepNumber++}: Remove deprecated properties`);
|
||||
for (const dep of deprecatedProperties) {
|
||||
steps.push(` - Remove "${dep.property}" ${dep.replacement ? `(use "${dep.replacement}" instead)` : ''}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Then critical actions
|
||||
const criticalActions = requiredActions.filter(a => a.priority === 'CRITICAL');
|
||||
if (criticalActions.length > 0) {
|
||||
steps.push(`Step ${stepNumber++}: Address critical configuration requirements`);
|
||||
for (const action of criticalActions) {
|
||||
steps.push(` - ${action.property}: ${action.reason}`);
|
||||
if (action.suggestedValue !== undefined) {
|
||||
steps.push(` Suggested value: ${JSON.stringify(action.suggestedValue)}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// High priority actions
|
||||
const highActions = requiredActions.filter(a => a.priority === 'HIGH');
|
||||
if (highActions.length > 0) {
|
||||
steps.push(`Step ${stepNumber++}: Configure required properties`);
|
||||
for (const action of highActions) {
|
||||
steps.push(` - ${action.property}: ${action.reason}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Behavior change adaptations
|
||||
const actionRequiredChanges = behaviorChanges.filter(c => c.actionRequired);
|
||||
if (actionRequiredChanges.length > 0) {
|
||||
steps.push(`Step ${stepNumber++}: Adapt to behavior changes`);
|
||||
for (const change of actionRequiredChanges) {
|
||||
steps.push(` - ${change.aspect}: ${change.recommendation}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Medium/Low priority actions
|
||||
const otherActions = requiredActions.filter(a => a.priority === 'MEDIUM' || a.priority === 'LOW');
|
||||
if (otherActions.length > 0) {
|
||||
steps.push(`Step ${stepNumber++}: Review optional configurations`);
|
||||
for (const action of otherActions) {
|
||||
steps.push(` - ${action.property}: ${action.reason}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Final validation step
|
||||
steps.push(`Step ${stepNumber}: Test workflow execution`);
|
||||
steps.push(' - Validate all node configurations');
|
||||
steps.push(' - Run a test execution');
|
||||
steps.push(' - Verify expected behavior');
|
||||
|
||||
return steps;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map change type to action type
|
||||
*/
|
||||
private mapChangeTypeToActionType(
|
||||
changeType: string
|
||||
): 'ADD_PROPERTY' | 'UPDATE_PROPERTY' | 'CONFIGURE_OPTION' | 'REVIEW_CONFIGURATION' {
|
||||
switch (changeType) {
|
||||
case 'added':
|
||||
return 'ADD_PROPERTY';
|
||||
case 'requirement_changed':
|
||||
case 'type_changed':
|
||||
return 'UPDATE_PROPERTY';
|
||||
case 'default_changed':
|
||||
return 'CONFIGURE_OPTION';
|
||||
default:
|
||||
return 'REVIEW_CONFIGURATION';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Map severity to priority
|
||||
*/
|
||||
private mapSeverityToPriority(
|
||||
severity: 'LOW' | 'MEDIUM' | 'HIGH'
|
||||
): 'CRITICAL' | 'HIGH' | 'MEDIUM' | 'LOW' {
|
||||
if (severity === 'HIGH') return 'CRITICAL';
|
||||
return severity;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get documentation for a property (placeholder - would integrate with node docs)
|
||||
*/
|
||||
private getPropertyDocumentation(nodeType: string, propertyName: string): string {
|
||||
// In future, this would fetch from node documentation
|
||||
return `See n8n documentation for ${nodeType} - ${propertyName}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate overall confidence in the migration
|
||||
*/
|
||||
private calculateConfidence(
|
||||
requiredActions: RequiredAction[],
|
||||
migrationStatus: 'complete' | 'partial' | 'manual_required'
|
||||
): 'HIGH' | 'MEDIUM' | 'LOW' {
|
||||
if (migrationStatus === 'complete') return 'HIGH';
|
||||
|
||||
const criticalActions = requiredActions.filter(a => a.priority === 'CRITICAL');
|
||||
|
||||
if (migrationStatus === 'manual_required' || criticalActions.length > 3) {
|
||||
return 'LOW';
|
||||
}
|
||||
|
||||
return 'MEDIUM';
|
||||
}
|
||||
|
||||
/**
|
||||
* Estimate time required for manual migration steps
|
||||
*/
|
||||
private estimateTime(
|
||||
requiredActions: RequiredAction[],
|
||||
behaviorChanges: BehaviorChange[]
|
||||
): string {
|
||||
const criticalCount = requiredActions.filter(a => a.priority === 'CRITICAL').length;
|
||||
const highCount = requiredActions.filter(a => a.priority === 'HIGH').length;
|
||||
const behaviorCount = behaviorChanges.filter(c => c.actionRequired).length;
|
||||
|
||||
const totalComplexity = criticalCount * 5 + highCount * 3 + behaviorCount * 2;
|
||||
|
||||
if (totalComplexity === 0) return '< 1 minute';
|
||||
if (totalComplexity <= 5) return '2-5 minutes';
|
||||
if (totalComplexity <= 10) return '5-10 minutes';
|
||||
if (totalComplexity <= 20) return '10-20 minutes';
|
||||
return '20+ minutes';
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a human-readable summary for logging/display
|
||||
*/
|
||||
generateSummary(guidance: PostUpdateGuidance): string {
|
||||
const lines: string[] = [];
|
||||
|
||||
lines.push(`Node "${guidance.nodeName}" upgraded from v${guidance.oldVersion} to v${guidance.newVersion}`);
|
||||
lines.push(`Status: ${guidance.migrationStatus.toUpperCase()}`);
|
||||
lines.push(`Confidence: ${guidance.confidence}`);
|
||||
lines.push(`Estimated time: ${guidance.estimatedTime}`);
|
||||
|
||||
if (guidance.requiredActions.length > 0) {
|
||||
lines.push(`\nRequired actions: ${guidance.requiredActions.length}`);
|
||||
for (const action of guidance.requiredActions.slice(0, 3)) {
|
||||
lines.push(` - [${action.priority}] ${action.property}: ${action.reason}`);
|
||||
}
|
||||
if (guidance.requiredActions.length > 3) {
|
||||
lines.push(` ... and ${guidance.requiredActions.length - 3} more`);
|
||||
}
|
||||
}
|
||||
|
||||
if (guidance.behaviorChanges.length > 0) {
|
||||
lines.push(`\nBehavior changes: ${guidance.behaviorChanges.length}`);
|
||||
for (const change of guidance.behaviorChanges) {
|
||||
lines.push(` - ${change.aspect}: ${change.newBehavior}`);
|
||||
}
|
||||
}
|
||||
|
||||
return lines.join('\n');
|
||||
}
|
||||
}
|
||||
@@ -21,8 +21,18 @@ export class UniversalExpressionValidator {
|
||||
private static readonly EXPRESSION_PREFIX = '=';
|
||||
|
||||
/**
|
||||
* Universal Rule 1: Any field containing {{ }} MUST have = prefix
|
||||
* This is an absolute rule in n8n - no exceptions
|
||||
* Universal Rule 1: Any field containing {{ }} MUST have = prefix to be evaluated
|
||||
* This applies to BOTH pure expressions and mixed content
|
||||
*
|
||||
* Examples:
|
||||
* - "{{ $json.value }}" -> literal text (NOT evaluated)
|
||||
* - "={{ $json.value }}" -> evaluated expression
|
||||
* - "Hello {{ $json.name }}!" -> literal text (NOT evaluated)
|
||||
* - "=Hello {{ $json.name }}!" -> evaluated (expression in mixed content)
|
||||
* - "=https://api.com/{{ $json.id }}/data" -> evaluated (real example from n8n)
|
||||
*
|
||||
* EXCEPTION: Some langchain node fields auto-evaluate without = prefix
|
||||
* (validated separately by AI-specific validators)
|
||||
*/
|
||||
static validateExpressionPrefix(value: any): UniversalValidationResult {
|
||||
// Only validate strings
|
||||
@@ -53,6 +63,10 @@ export class UniversalExpressionValidator {
|
||||
const hasPrefix = value.startsWith(this.EXPRESSION_PREFIX);
|
||||
const isMixedContent = this.hasMixedContent(value);
|
||||
|
||||
// For langchain nodes, we don't validate expression prefixes
|
||||
// They have AI-specific validators that handle their expression rules
|
||||
// This is checked at the node level, not here
|
||||
|
||||
if (!hasPrefix) {
|
||||
return {
|
||||
isValid: false,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user