mirror of
https://github.com/czlonkowski/n8n-mcp.git
synced 2026-02-06 05:23:08 +00:00
- Implement OpenAI batch API integration for metadata generation - Add metadata columns to database schema (metadata_json, metadata_generated_at) - Create MetadataGenerator service with structured output schemas - Create BatchProcessor for handling OpenAI batch jobs - Add --generate-metadata flag to fetch-templates script - Update template repository with metadata management methods - Add OpenAI configuration to environment variables - Include comprehensive tests for metadata generation - Use gpt-4o-mini model with 50% cost savings via batch API 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
109 lines
3.3 KiB
Plaintext
109 lines
3.3 KiB
Plaintext
# n8n Documentation MCP Server Configuration
|
|
|
|
# ====================
|
|
# COMMON CONFIGURATION
|
|
# ====================
|
|
|
|
# Database Configuration
|
|
# For local development: ./data/nodes.db
|
|
# For Docker: /app/data/nodes.db
|
|
# Custom paths supported in v2.7.16+ (must end with .db)
|
|
NODE_DB_PATH=./data/nodes.db
|
|
|
|
# Logging Level (debug, info, warn, error)
|
|
MCP_LOG_LEVEL=info
|
|
|
|
# Node Environment (development, production)
|
|
NODE_ENV=development
|
|
|
|
# Rebuild database on startup (true/false)
|
|
REBUILD_ON_START=false
|
|
|
|
# =========================
|
|
# LOCAL MODE CONFIGURATION
|
|
# =========================
|
|
# Used when running: npm run start:v2 or npm run dev:v2
|
|
|
|
# Local MCP Server Configuration
|
|
MCP_SERVER_PORT=3000
|
|
MCP_SERVER_HOST=localhost
|
|
# MCP_AUTH_TOKEN=optional-for-local-development
|
|
|
|
# =========================
|
|
# SIMPLE HTTP MODE
|
|
# =========================
|
|
# Used for private single-user deployments
|
|
|
|
# Server mode: stdio (local) or http (remote)
|
|
MCP_MODE=stdio
|
|
|
|
# Use fixed HTTP implementation (recommended for stability)
|
|
# Set to true to bypass StreamableHTTPServerTransport issues
|
|
USE_FIXED_HTTP=true
|
|
|
|
# HTTP Server Configuration (only used when MCP_MODE=http)
|
|
PORT=3000
|
|
HOST=0.0.0.0
|
|
|
|
# Base URL Configuration (optional)
|
|
# Set this when running behind a proxy or when the server is accessed via a different URL
|
|
# than what it binds to. If not set, URLs will be auto-detected from proxy headers (if TRUST_PROXY is set)
|
|
# or constructed from HOST and PORT.
|
|
# Examples:
|
|
# BASE_URL=https://n8n-mcp.example.com
|
|
# BASE_URL=https://your-domain.com:8443
|
|
# PUBLIC_URL=https://n8n-mcp.mydomain.com (alternative to BASE_URL)
|
|
|
|
# Authentication token for HTTP mode (REQUIRED)
|
|
# Generate with: openssl rand -base64 32
|
|
AUTH_TOKEN=your-secure-token-here
|
|
|
|
# CORS origin for HTTP mode (optional)
|
|
# Default: * (allow all origins)
|
|
# For production, set to your specific domain
|
|
# CORS_ORIGIN=https://your-client-domain.com
|
|
|
|
# Trust proxy configuration for correct IP logging (0=disabled, 1=trust first proxy)
|
|
# Set to 1 when running behind a reverse proxy (Nginx, Traefik, etc.)
|
|
# Set to the number of proxy hops if behind multiple proxies
|
|
# Default: 0 (disabled)
|
|
# TRUST_PROXY=0
|
|
|
|
# =========================
|
|
# N8N API CONFIGURATION
|
|
# =========================
|
|
# Optional: Enable n8n management tools by providing API credentials
|
|
# These tools allow creating, updating, and executing workflows
|
|
|
|
# n8n instance API URL (without /api/v1 suffix)
|
|
# Example: https://your-n8n-instance.com
|
|
# N8N_API_URL=
|
|
|
|
# n8n API Key (get from Settings > API in your n8n instance)
|
|
# N8N_API_KEY=
|
|
|
|
# n8n API request timeout in milliseconds (default: 30000)
|
|
# N8N_API_TIMEOUT=30000
|
|
|
|
# Maximum number of API request retries (default: 3)
|
|
# N8N_API_MAX_RETRIES=3
|
|
|
|
# =========================
|
|
# OPENAI API CONFIGURATION
|
|
# =========================
|
|
# Optional: Enable AI-powered template metadata generation
|
|
# Provides structured metadata for improved template discovery
|
|
|
|
# OpenAI API Key (get from https://platform.openai.com/api-keys)
|
|
# OPENAI_API_KEY=
|
|
|
|
# OpenAI Model for metadata generation (default: gpt-4o-mini)
|
|
# OPENAI_MODEL=gpt-4o-mini
|
|
|
|
# Batch size for metadata generation (default: 100)
|
|
# Templates are processed in batches using OpenAI's Batch API for 50% cost savings
|
|
# OPENAI_BATCH_SIZE=100
|
|
|
|
# Enable metadata generation during template fetch (default: false)
|
|
# Set to true to automatically generate metadata when running fetch:templates
|
|
# METADATA_GENERATION_ENABLED=false |