mirror of
https://github.com/czlonkowski/n8n-mcp.git
synced 2026-03-21 09:53:08 +00:00
fix: resolve multiple n8n_update_partial_workflow bugs (#635)
* fix: use correct MCP SDK API for server capabilities in test getServerVersion() returns Implementation (name/version only), not the full init result. Use client.getServerCapabilities() instead to access server capabilities, fixing the CI typecheck failure. Concieved by Romuald Członkowski - www.aiadvisors.pl/en Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com> * fix: resolve multiple n8n_update_partial_workflow bugs (#592, #599, #610, #623, #624, #625, #629, #630, #633) Phase 1 - Data loss prevention: - Add missing unary operators (empty, notEmpty, exists, notExists) to sanitizer (#592) - Preserve positional empty arrays in connections during removeNode/cleanStale (#610) - Scope sanitization to modified nodes only, preventing unrelated node corruption - Add empty body {} to activate/deactivate POST calls to fix 415 errors (#633) Phase 2 - Error handling & response clarity: - Serialize Zod errors to readable "path: message" strings (#630) - Add saved:true/false field to all response paths (#625) - Improve updateNode error hint with correct structure example (#623) - Track removed node names for better removeConnection errors (#624) Phase 3 - Connection & type fixes: - Coerce sourceOutput/targetInput to String() consistently (#629) - Accept numeric sourceOutput/targetInput at Zod schema level via transform Phase 4 - Tag operations via dedicated API (#599): - Track tags as tagsToAdd/tagsToRemove instead of mutating workflow.tags - Orchestrate tag creation and association via listTags/createTag/updateWorkflowTags - Reconcile conflicting add/remove for same tag (last operation wins) - Tag failures produce warnings, not hard errors Conceived by Romuald Członkowski - www.aiadvisors.pl/en Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com> * docs: add v2.37.0 changelog entry Conceived by Romuald Członkowski - www.aiadvisors.pl/en Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com> * fix: resolve pre-existing integration test failures in CI - Create new MCP Server instance per connection in test helpers (SDK 1.27+ requires separate Protocol instance per connection) - Normalize database paths with path.resolve() in shared-database singleton to prevent path mismatch errors across test files - Add no-op catch handler to deferred initialization promise in server.ts to prevent unhandled rejection warnings - Properly call mcpServer.shutdown() in test helper close() to release shared database references Conceived by Romuald Członkowski - www.aiadvisors.pl/en Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com> --------- Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
committed by
GitHub
parent
248f859c49
commit
9590f751d2
@@ -105,21 +105,14 @@ describe('MCP Protocol Compliance', () => {
|
||||
|
||||
describe('Message Format Validation', () => {
|
||||
it('should reject messages without method', async () => {
|
||||
// Test by sending raw message through transport
|
||||
const [serverTransport, clientTransport] = InMemoryTransport.createLinkedPair();
|
||||
const testClient = new Client({ name: 'test', version: '1.0.0' }, {});
|
||||
|
||||
await mcpServer.connectToTransport(serverTransport);
|
||||
await testClient.connect(clientTransport);
|
||||
|
||||
// MCP SDK 1.27+ enforces single-connection per Server instance,
|
||||
// so use the existing client from beforeEach instead of a new one.
|
||||
try {
|
||||
// This should fail as MCP SDK validates method
|
||||
await (testClient as any).request({ method: '', params: {} });
|
||||
await (client as any).request({ method: '', params: {} });
|
||||
expect.fail('Should have thrown an error');
|
||||
} catch (error) {
|
||||
expect(error).toBeDefined();
|
||||
} finally {
|
||||
await testClient.close();
|
||||
}
|
||||
});
|
||||
|
||||
@@ -250,10 +243,15 @@ describe('MCP Protocol Compliance', () => {
|
||||
|
||||
describe('Transport Layer', () => {
|
||||
it('should handle transport disconnection gracefully', async () => {
|
||||
const [serverTransport, clientTransport] = InMemoryTransport.createLinkedPair();
|
||||
const testClient = new Client({ name: 'test', version: '1.0.0' }, {});
|
||||
// Use a dedicated server instance so we don't conflict with the
|
||||
// shared mcpServer that beforeEach already connected a transport to.
|
||||
const dedicatedServer = new TestableN8NMCPServer();
|
||||
await dedicatedServer.initialize();
|
||||
|
||||
await mcpServer.connectToTransport(serverTransport);
|
||||
const [serverTransport, clientTransport] = InMemoryTransport.createLinkedPair();
|
||||
await dedicatedServer.connectToTransport(serverTransport);
|
||||
|
||||
const testClient = new Client({ name: 'test', version: '1.0.0' }, {});
|
||||
await testClient.connect(clientTransport);
|
||||
|
||||
// Make a request
|
||||
@@ -270,6 +268,8 @@ describe('MCP Protocol Compliance', () => {
|
||||
} catch (error) {
|
||||
expect(error).toBeDefined();
|
||||
}
|
||||
|
||||
await dedicatedServer.close();
|
||||
});
|
||||
|
||||
it('should handle multiple sequential connections', async () => {
|
||||
|
||||
@@ -73,10 +73,11 @@ describe('MCP Session Management', { timeout: 15000 }, () => {
|
||||
const serverInfo = await client.getServerVersion();
|
||||
expect(serverInfo).toBeDefined();
|
||||
expect(serverInfo?.name).toBe('n8n-documentation-mcp');
|
||||
|
||||
// Check capabilities if they exist
|
||||
if (serverInfo?.capabilities) {
|
||||
expect(serverInfo.capabilities).toHaveProperty('tools');
|
||||
|
||||
// Check capabilities via the dedicated method
|
||||
const capabilities = client.getServerCapabilities();
|
||||
if (capabilities) {
|
||||
expect(capabilities).toHaveProperty('tools');
|
||||
}
|
||||
|
||||
// Clean up - ensure proper order
|
||||
@@ -340,9 +341,9 @@ describe('MCP Session Management', { timeout: 15000 }, () => {
|
||||
it('should handle different client versions', async () => {
|
||||
const mcpServer = new TestableN8NMCPServer();
|
||||
await mcpServer.initialize();
|
||||
|
||||
const clients = [];
|
||||
|
||||
// MCP SDK 1.27+ enforces single-connection per Server instance,
|
||||
// so we test each version sequentially rather than concurrently.
|
||||
for (const version of ['1.0.0', '1.1.0', '2.0.0']) {
|
||||
const [serverTransport, clientTransport] = InMemoryTransport.createLinkedPair();
|
||||
await mcpServer.connectToTransport(serverTransport);
|
||||
@@ -353,21 +354,14 @@ describe('MCP Session Management', { timeout: 15000 }, () => {
|
||||
}, {});
|
||||
|
||||
await client.connect(clientTransport);
|
||||
clients.push(client);
|
||||
|
||||
const info = await client.getServerVersion();
|
||||
expect(info!.name).toBe('n8n-documentation-mcp');
|
||||
|
||||
await client.close();
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
}
|
||||
|
||||
// All versions should work
|
||||
const responses = await Promise.all(
|
||||
clients.map(client => client.getServerVersion())
|
||||
);
|
||||
|
||||
responses.forEach(info => {
|
||||
expect(info!.name).toBe('n8n-documentation-mcp');
|
||||
});
|
||||
|
||||
// Clean up
|
||||
await Promise.all(clients.map(client => client.close()));
|
||||
await new Promise(resolve => setTimeout(resolve, 100)); // Give time for all clients to fully close
|
||||
await mcpServer.close();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
|
||||
import { Transport } from '@modelcontextprotocol/sdk/shared/transport.js';
|
||||
import {
|
||||
CallToolRequestSchema,
|
||||
import {
|
||||
CallToolRequestSchema,
|
||||
ListToolsRequestSchema,
|
||||
InitializeRequestSchema,
|
||||
} from '@modelcontextprotocol/sdk/types.js';
|
||||
@@ -14,18 +14,30 @@ export class TestableN8NMCPServer {
|
||||
private mcpServer: N8NDocumentationMCPServer;
|
||||
private server: Server;
|
||||
private transports = new Set<Transport>();
|
||||
private connections = new Set<any>();
|
||||
private static instanceCount = 0;
|
||||
private testDbPath: string;
|
||||
|
||||
constructor() {
|
||||
// Use a unique test database for each instance to avoid conflicts
|
||||
// This prevents concurrent test issues with database locking
|
||||
const instanceId = TestableN8NMCPServer.instanceCount++;
|
||||
this.testDbPath = `/tmp/n8n-mcp-test-${process.pid}-${instanceId}.db`;
|
||||
// Use path.resolve to produce a canonical absolute path so the shared
|
||||
// database singleton always sees the exact same string, preventing
|
||||
// "Shared database already initialized with different path" errors.
|
||||
const path = require('path');
|
||||
this.testDbPath = path.resolve(process.cwd(), 'data', 'nodes.db');
|
||||
process.env.NODE_DB_PATH = this.testDbPath;
|
||||
|
||||
this.server = new Server({
|
||||
|
||||
this.server = this.createServer();
|
||||
|
||||
this.mcpServer = new N8NDocumentationMCPServer();
|
||||
this.setupHandlers(this.server);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a fresh MCP SDK Server instance.
|
||||
* MCP SDK 1.27+ enforces single-connection per Protocol instance,
|
||||
* so we create a new one each time we need to connect to a transport.
|
||||
*/
|
||||
private createServer(): Server {
|
||||
return new Server({
|
||||
name: 'n8n-documentation-mcp',
|
||||
version: '1.0.0'
|
||||
}, {
|
||||
@@ -33,14 +45,11 @@ export class TestableN8NMCPServer {
|
||||
tools: {}
|
||||
}
|
||||
});
|
||||
|
||||
this.mcpServer = new N8NDocumentationMCPServer();
|
||||
this.setupHandlers();
|
||||
}
|
||||
|
||||
private setupHandlers() {
|
||||
private setupHandlers(server: Server) {
|
||||
// Initialize handler
|
||||
this.server.setRequestHandler(InitializeRequestSchema, async () => {
|
||||
server.setRequestHandler(InitializeRequestSchema, async () => {
|
||||
return {
|
||||
protocolVersion: '2024-11-05',
|
||||
capabilities: {
|
||||
@@ -54,27 +63,27 @@ export class TestableN8NMCPServer {
|
||||
});
|
||||
|
||||
// List tools handler
|
||||
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||
// Import the tools directly from the tools module
|
||||
const { n8nDocumentationToolsFinal } = await import('../../../src/mcp/tools');
|
||||
const { n8nManagementTools } = await import('../../../src/mcp/tools-n8n-manager');
|
||||
const { isN8nApiConfigured } = await import('../../../src/config/n8n-api');
|
||||
|
||||
|
||||
// Combine documentation tools with management tools if API is configured
|
||||
const tools = [...n8nDocumentationToolsFinal];
|
||||
if (isN8nApiConfigured()) {
|
||||
tools.push(...n8nManagementTools);
|
||||
}
|
||||
|
||||
|
||||
return { tools };
|
||||
});
|
||||
|
||||
// Call tool handler
|
||||
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
||||
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
||||
try {
|
||||
// The mcpServer.executeTool returns raw data, we need to wrap it in the MCP response format
|
||||
const result = await this.mcpServer.executeTool(request.params.name, request.params.arguments || {});
|
||||
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
@@ -98,21 +107,8 @@ export class TestableN8NMCPServer {
|
||||
}
|
||||
|
||||
async initialize(): Promise<void> {
|
||||
// Copy production database to test location for realistic testing
|
||||
try {
|
||||
const fs = await import('fs');
|
||||
const path = await import('path');
|
||||
const prodDbPath = path.join(process.cwd(), 'data', 'nodes.db');
|
||||
|
||||
if (await fs.promises.access(prodDbPath).then(() => true).catch(() => false)) {
|
||||
await fs.promises.copyFile(prodDbPath, this.testDbPath);
|
||||
}
|
||||
} catch (error) {
|
||||
// Ignore copy errors, database will be created fresh
|
||||
}
|
||||
|
||||
// The MCP server initializes its database lazily
|
||||
// We can trigger initialization by calling executeTool
|
||||
// The MCP server initializes its database lazily via the shared
|
||||
// database singleton. Trigger initialization by calling executeTool.
|
||||
try {
|
||||
await this.mcpServer.executeTool('tools_documentation', {});
|
||||
} catch (error) {
|
||||
@@ -125,33 +121,26 @@ export class TestableN8NMCPServer {
|
||||
if (!transport || typeof transport !== 'object') {
|
||||
throw new Error('Invalid transport provided');
|
||||
}
|
||||
|
||||
// Set up any missing transport handlers to prevent "Cannot set properties of undefined" errors
|
||||
if (transport && typeof transport === 'object') {
|
||||
const transportAny = transport as any;
|
||||
if (transportAny.serverTransport && !transportAny.serverTransport.onclose) {
|
||||
transportAny.serverTransport.onclose = () => {};
|
||||
}
|
||||
|
||||
// MCP SDK 1.27+ enforces single-connection per Protocol instance.
|
||||
// Close the current server and create a fresh one so that _transport
|
||||
// is guaranteed to be undefined. Reusing the same Server after close()
|
||||
// is unreliable because _transport is cleared asynchronously via the
|
||||
// transport onclose callback chain, which can fail in CI.
|
||||
try {
|
||||
await this.server.close();
|
||||
} catch {
|
||||
// Ignore errors during cleanup of previous transport
|
||||
}
|
||||
|
||||
// MCP SDK 1.27+ enforces single-connection per Server instance.
|
||||
// Close existing connections before connecting a new transport.
|
||||
for (const conn of this.connections) {
|
||||
try {
|
||||
if (conn && typeof conn.close === 'function') {
|
||||
await conn.close();
|
||||
}
|
||||
} catch {
|
||||
// Ignore errors during cleanup
|
||||
}
|
||||
}
|
||||
this.connections.clear();
|
||||
|
||||
// Create a brand-new Server instance for this connection
|
||||
this.server = this.createServer();
|
||||
this.setupHandlers(this.server);
|
||||
|
||||
// Track this transport for cleanup
|
||||
this.transports.add(transport);
|
||||
|
||||
const connection = await this.server.connect(transport);
|
||||
this.connections.add(connection);
|
||||
await this.server.connect(transport);
|
||||
}
|
||||
|
||||
async close(): Promise<void> {
|
||||
@@ -164,78 +153,47 @@ export class TestableN8NMCPServer {
|
||||
});
|
||||
|
||||
const performClose = async () => {
|
||||
// Close all connections first with timeout protection
|
||||
const connectionPromises = Array.from(this.connections).map(async (connection) => {
|
||||
const connTimeout = new Promise<void>((resolve) => setTimeout(resolve, 500));
|
||||
|
||||
try {
|
||||
if (connection && typeof connection.close === 'function') {
|
||||
await Promise.race([connection.close(), connTimeout]);
|
||||
}
|
||||
} catch (error) {
|
||||
// Ignore errors during connection cleanup
|
||||
}
|
||||
});
|
||||
|
||||
await Promise.allSettled(connectionPromises);
|
||||
this.connections.clear();
|
||||
|
||||
// Close the MCP SDK Server (resets _transport via _onclose)
|
||||
try {
|
||||
await this.server.close();
|
||||
} catch {
|
||||
// Ignore errors during server close
|
||||
}
|
||||
|
||||
// Shut down the inner N8NDocumentationMCPServer to release the
|
||||
// shared database reference and prevent resource leaks.
|
||||
try {
|
||||
await this.mcpServer.shutdown();
|
||||
} catch {
|
||||
// Ignore errors during inner server shutdown
|
||||
}
|
||||
|
||||
// Close all tracked transports with timeout protection
|
||||
const transportPromises: Promise<void>[] = [];
|
||||
|
||||
|
||||
for (const transport of this.transports) {
|
||||
const transportTimeout = new Promise<void>((resolve) => setTimeout(resolve, 500));
|
||||
|
||||
|
||||
try {
|
||||
// Force close all transports
|
||||
const transportAny = transport as any;
|
||||
|
||||
// Try different close methods
|
||||
if (transportAny.close && typeof transportAny.close === 'function') {
|
||||
transportPromises.push(
|
||||
Promise.race([transportAny.close(), transportTimeout])
|
||||
);
|
||||
}
|
||||
if (transportAny.serverTransport?.close) {
|
||||
transportPromises.push(
|
||||
Promise.race([transportAny.serverTransport.close(), transportTimeout])
|
||||
);
|
||||
}
|
||||
if (transportAny.clientTransport?.close) {
|
||||
transportPromises.push(
|
||||
Promise.race([transportAny.clientTransport.close(), transportTimeout])
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
} catch {
|
||||
// Ignore errors during transport cleanup
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for all transports to close with timeout
|
||||
|
||||
await Promise.allSettled(transportPromises);
|
||||
|
||||
// Clear the transports set
|
||||
this.transports.clear();
|
||||
|
||||
// Don't shut down the shared MCP server instance
|
||||
};
|
||||
|
||||
// Race between actual close and timeout
|
||||
await Promise.race([performClose(), closeTimeout]);
|
||||
|
||||
// Clean up test database
|
||||
if (this.testDbPath) {
|
||||
try {
|
||||
const fs = await import('fs');
|
||||
await fs.promises.unlink(this.testDbPath).catch(() => {});
|
||||
await fs.promises.unlink(`${this.testDbPath}-shm`).catch(() => {});
|
||||
await fs.promises.unlink(`${this.testDbPath}-wal`).catch(() => {});
|
||||
} catch (error) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static async shutdownShared(): Promise<void> {
|
||||
if (sharedMcpServer) {
|
||||
await sharedMcpServer.shutdown();
|
||||
|
||||
Reference in New Issue
Block a user