mirror of
https://github.com/czlonkowski/n8n-mcp.git
synced 2026-02-06 13:33:11 +00:00
Compare commits
3 Commits
fix/sse-se
...
update/n8n
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
140903a8ab | ||
|
|
c8c76e435d | ||
|
|
fad3437977 |
47
CHANGELOG.md
47
CHANGELOG.md
@@ -7,6 +7,53 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [2.33.6] - 2026-02-06
|
||||
|
||||
### Changed
|
||||
|
||||
- Updated n8n from 2.4.4 to 2.6.3
|
||||
- Updated n8n-core from 2.4.2 to 2.6.1
|
||||
- Updated n8n-workflow from 2.4.2 to 2.6.0
|
||||
- Updated @n8n/n8n-nodes-langchain from 2.4.3 to 2.6.2
|
||||
- Rebuilt node database with 806 nodes (544 from n8n-nodes-base, 262 from @n8n/n8n-nodes-langchain)
|
||||
- Updated README badge with new n8n version
|
||||
|
||||
## [2.33.5] - 2026-01-23
|
||||
|
||||
### Fixed
|
||||
|
||||
- **Critical memory leak: per-session database connections** (Issue #542): Fixed severe memory leak where each MCP session created its own database connection (~900MB per session)
|
||||
- Root cause: `N8NDocumentationMCPServer` called `createDatabaseAdapter()` for every new session, duplicating the entire 68MB database in memory
|
||||
- With 3-4 sessions, memory would exceed 4GB causing OOM kills every ~20 minutes
|
||||
- Fix: Implemented singleton `SharedDatabase` pattern - all sessions now share ONE database connection
|
||||
- Memory impact: Reduced from ~900MB per session to ~68MB total (shared) + ~5MB per session overhead
|
||||
- Added `getSharedDatabase()` and `releaseSharedDatabase()` for thread-safe connection management
|
||||
- Added reference counting to track active sessions using the shared connection
|
||||
|
||||
- **Session timeout optimization**: Reduced default session timeout from 30 minutes to 5 minutes
|
||||
- Faster cleanup of stale sessions reduces memory buildup
|
||||
- Configurable via `SESSION_TIMEOUT_MINUTES` environment variable
|
||||
|
||||
- **Eager instance cleanup**: When a client reconnects, previous sessions for the same instanceId are now immediately cleaned up
|
||||
- Prevents memory accumulation from reconnecting clients in multi-tenant deployments
|
||||
|
||||
- **Telemetry event listener leak**: Fixed event listeners in `TelemetryBatchProcessor` that were never removed
|
||||
- Added proper cleanup in `stop()` method
|
||||
- Added guard against multiple `start()` calls
|
||||
|
||||
### Added
|
||||
|
||||
- **New module: `src/database/shared-database.ts`** - Singleton database manager
|
||||
- `getSharedDatabase(dbPath)`: Thread-safe initialization with promise lock pattern
|
||||
- `releaseSharedDatabase(state)`: Reference counting for cleanup
|
||||
- `closeSharedDatabase()`: Graceful shutdown for process termination
|
||||
- `isSharedDatabaseInitialized()` and `getSharedDatabaseRefCount()`: Monitoring helpers
|
||||
|
||||
### Changed
|
||||
|
||||
- **`N8NDocumentationMCPServer.close()`**: Now releases shared database reference instead of closing the connection
|
||||
- **`SingleSessionHTTPServer.shutdown()`**: Calls `closeSharedDatabase()` during graceful shutdown
|
||||
|
||||
## [2.33.4] - 2026-01-21
|
||||
|
||||
### Fixed
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
[](https://www.npmjs.com/package/n8n-mcp)
|
||||
[](https://codecov.io/gh/czlonkowski/n8n-mcp)
|
||||
[](https://github.com/czlonkowski/n8n-mcp/actions)
|
||||
[](https://github.com/n8n-io/n8n)
|
||||
[](https://github.com/n8n-io/n8n)
|
||||
[](https://github.com/czlonkowski/n8n-mcp/pkgs/container/n8n-mcp)
|
||||
[](https://railway.com/deploy/n8n-mcp?referralCode=n8n-mcp)
|
||||
|
||||
|
||||
BIN
data/nodes.db
BIN
data/nodes.db
Binary file not shown.
20
dist/database/database-adapter.js
vendored
20
dist/database/database-adapter.js
vendored
@@ -311,6 +311,17 @@ class SQLJSStatement {
|
||||
this.stmt = stmt;
|
||||
this.onModify = onModify;
|
||||
this.boundParams = null;
|
||||
this.freed = false;
|
||||
}
|
||||
freeStatement() {
|
||||
if (!this.freed && this.stmt) {
|
||||
try {
|
||||
this.stmt.free();
|
||||
this.freed = true;
|
||||
}
|
||||
catch (e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
run(...params) {
|
||||
try {
|
||||
@@ -331,6 +342,9 @@ class SQLJSStatement {
|
||||
this.stmt.reset();
|
||||
throw error;
|
||||
}
|
||||
finally {
|
||||
this.freeStatement();
|
||||
}
|
||||
}
|
||||
get(...params) {
|
||||
try {
|
||||
@@ -352,6 +366,9 @@ class SQLJSStatement {
|
||||
this.stmt.reset();
|
||||
throw error;
|
||||
}
|
||||
finally {
|
||||
this.freeStatement();
|
||||
}
|
||||
}
|
||||
all(...params) {
|
||||
try {
|
||||
@@ -372,6 +389,9 @@ class SQLJSStatement {
|
||||
this.stmt.reset();
|
||||
throw error;
|
||||
}
|
||||
finally {
|
||||
this.freeStatement();
|
||||
}
|
||||
}
|
||||
iterate(...params) {
|
||||
return this.all(...params)[Symbol.iterator]();
|
||||
|
||||
2
dist/database/database-adapter.js.map
vendored
2
dist/database/database-adapter.js.map
vendored
File diff suppressed because one or more lines are too long
2
dist/http-server-single-session.d.ts.map
vendored
2
dist/http-server-single-session.d.ts.map
vendored
@@ -1 +1 @@
|
||||
{"version":3,"file":"http-server-single-session.d.ts","sourceRoot":"","sources":["../src/http-server-single-session.ts"],"names":[],"mappings":";AAMA,OAAO,OAAO,MAAM,SAAS,CAAC;AAoB9B,OAAO,EAAE,eAAe,EAA2B,MAAM,0BAA0B,CAAC;AACpF,OAAO,EAAE,YAAY,EAAE,MAAM,uBAAuB,CAAC;AAuErD,qBAAa,uBAAuB;IAElC,OAAO,CAAC,UAAU,CAA8D;IAChF,OAAO,CAAC,OAAO,CAA0D;IACzE,OAAO,CAAC,eAAe,CAAsE;IAC7F,OAAO,CAAC,eAAe,CAA4D;IACnF,OAAO,CAAC,kBAAkB,CAAyC;IACnE,OAAO,CAAC,OAAO,CAAwB;IACvC,OAAO,CAAC,cAAc,CAAwB;IAC9C,OAAO,CAAC,aAAa,CAAM;IAC3B,OAAO,CAAC,cAAc,CAAkB;IACxC,OAAO,CAAC,SAAS,CAAuB;IACxC,OAAO,CAAC,YAAY,CAA+B;;IAcnD,OAAO,CAAC,mBAAmB;IAmB3B,OAAO,CAAC,sBAAsB;YAqChB,aAAa;IAuC3B,OAAO,CAAC,qBAAqB;IAO7B,OAAO,CAAC,gBAAgB;IAkBxB,OAAO,CAAC,gBAAgB;IASxB,OAAO,CAAC,sBAAsB;IAkC9B,OAAO,CAAC,mBAAmB;YASb,oBAAoB;YAwBpB,oBAAoB;IAwBlC,OAAO,CAAC,iBAAiB;IAsBzB,OAAO,CAAC,aAAa;IA2BrB,OAAO,CAAC,mBAAmB;IAoDrB,aAAa,CACjB,GAAG,EAAE,OAAO,CAAC,OAAO,EACpB,GAAG,EAAE,OAAO,CAAC,QAAQ,EACrB,eAAe,CAAC,EAAE,eAAe,GAChC,OAAO,CAAC,IAAI,CAAC;YAmOF,eAAe;IA8C7B,OAAO,CAAC,SAAS;IAYjB,OAAO,CAAC,gBAAgB;IASlB,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;IAgnBtB,QAAQ,IAAI,OAAO,CAAC,IAAI,CAAC;IAkD/B,cAAc,IAAI;QAChB,MAAM,EAAE,OAAO,CAAC;QAChB,SAAS,CAAC,EAAE,MAAM,CAAC;QACnB,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,QAAQ,CAAC,EAAE;YACT,KAAK,EAAE,MAAM,CAAC;YACd,MAAM,EAAE,MAAM,CAAC;YACf,OAAO,EAAE,MAAM,CAAC;YAChB,GAAG,EAAE,MAAM,CAAC;YACZ,UAAU,EAAE,MAAM,EAAE,CAAC;SACtB,CAAC;KACH;IAmDM,kBAAkB,IAAI,YAAY,EAAE;IAoEpC,mBAAmB,CAAC,QAAQ,EAAE,YAAY,EAAE,GAAG,MAAM;CAsG7D"}
|
||||
{"version":3,"file":"http-server-single-session.d.ts","sourceRoot":"","sources":["../src/http-server-single-session.ts"],"names":[],"mappings":";AAMA,OAAO,OAAO,MAAM,SAAS,CAAC;AAoB9B,OAAO,EAAE,eAAe,EAA2B,MAAM,0BAA0B,CAAC;AACpF,OAAO,EAAE,YAAY,EAAE,MAAM,uBAAuB,CAAC;AAwErD,qBAAa,uBAAuB;IAElC,OAAO,CAAC,UAAU,CAA8D;IAChF,OAAO,CAAC,OAAO,CAA0D;IACzE,OAAO,CAAC,eAAe,CAAsE;IAC7F,OAAO,CAAC,eAAe,CAA4D;IACnF,OAAO,CAAC,kBAAkB,CAAyC;IACnE,OAAO,CAAC,OAAO,CAAwB;IACvC,OAAO,CAAC,cAAc,CAAwB;IAC9C,OAAO,CAAC,aAAa,CAAM;IAI3B,OAAO,CAAC,cAAc,CAER;IACd,OAAO,CAAC,SAAS,CAAuB;IACxC,OAAO,CAAC,YAAY,CAA+B;;IAcnD,OAAO,CAAC,mBAAmB;IAmB3B,OAAO,CAAC,sBAAsB;YAqChB,aAAa;IAuC3B,OAAO,CAAC,qBAAqB;IAO7B,OAAO,CAAC,gBAAgB;IAkBxB,OAAO,CAAC,gBAAgB;IASxB,OAAO,CAAC,sBAAsB;IAkC9B,OAAO,CAAC,mBAAmB;YASb,oBAAoB;YAwBpB,oBAAoB;IAwBlC,OAAO,CAAC,iBAAiB;IAsBzB,OAAO,CAAC,aAAa;IA2BrB,OAAO,CAAC,mBAAmB;IAoDrB,aAAa,CACjB,GAAG,EAAE,OAAO,CAAC,OAAO,EACpB,GAAG,EAAE,OAAO,CAAC,QAAQ,EACrB,eAAe,CAAC,EAAE,eAAe,GAChC,OAAO,CAAC,IAAI,CAAC;YA0PF,eAAe;IA4D7B,OAAO,CAAC,SAAS;IAYjB,OAAO,CAAC,gBAAgB;IASlB,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;IAgnBtB,QAAQ,IAAI,OAAO,CAAC,IAAI,CAAC;IA2D/B,cAAc,IAAI;QAChB,MAAM,EAAE,OAAO,CAAC;QAChB,SAAS,CAAC,EAAE,MAAM,CAAC;QACnB,GAAG,CAAC,EAAE,MAAM,CAAC;QACb,QAAQ,CAAC,EAAE;YACT,KAAK,EAAE,MAAM,CAAC;YACd,MAAM,EAAE,MAAM,CAAC;YACf,OAAO,EAAE,MAAM,CAAC;YAChB,GAAG,EAAE,MAAM,CAAC;YACZ,UAAU,EAAE,MAAM,EAAE,CAAC;SACtB,CAAC;KACH;IAmDM,kBAAkB,IAAI,YAAY,EAAE;IAoEpC,mBAAmB,CAAC,QAAQ,EAAE,YAAY,EAAE,GAAG,MAAM;CAsG7D"}
|
||||
44
dist/http-server-single-session.js
vendored
44
dist/http-server-single-session.js
vendored
@@ -22,6 +22,7 @@ const crypto_1 = require("crypto");
|
||||
const types_js_1 = require("@modelcontextprotocol/sdk/types.js");
|
||||
const protocol_version_1 = require("./utils/protocol-version");
|
||||
const instance_context_1 = require("./types/instance-context");
|
||||
const shared_database_1 = require("./database/shared-database");
|
||||
dotenv_1.default.config();
|
||||
const DEFAULT_PROTOCOL_VERSION = protocol_version_1.STANDARD_PROTOCOL_VERSION;
|
||||
const MAX_SESSIONS = Math.max(1, parseInt(process.env.N8N_MCP_MAX_SESSIONS || '100', 10));
|
||||
@@ -52,7 +53,7 @@ class SingleSessionHTTPServer {
|
||||
this.contextSwitchLocks = new Map();
|
||||
this.session = null;
|
||||
this.consoleManager = new console_manager_1.ConsoleManager();
|
||||
this.sessionTimeout = 30 * 60 * 1000;
|
||||
this.sessionTimeout = parseInt(process.env.SESSION_TIMEOUT_MINUTES || '5', 10) * 60 * 1000;
|
||||
this.authToken = null;
|
||||
this.cleanupTimer = null;
|
||||
this.validateEnvironment();
|
||||
@@ -290,6 +291,25 @@ class SingleSessionHTTPServer {
|
||||
return;
|
||||
}
|
||||
logger_1.logger.info('handleRequest: Creating new transport for initialize request');
|
||||
if (instanceContext?.instanceId) {
|
||||
const sessionsToRemove = [];
|
||||
for (const [existingSessionId, context] of Object.entries(this.sessionContexts)) {
|
||||
if (context?.instanceId === instanceContext.instanceId) {
|
||||
sessionsToRemove.push(existingSessionId);
|
||||
}
|
||||
}
|
||||
for (const oldSessionId of sessionsToRemove) {
|
||||
if (!this.transports[oldSessionId]) {
|
||||
continue;
|
||||
}
|
||||
logger_1.logger.info('Cleaning up previous session for instance', {
|
||||
instanceId: instanceContext.instanceId,
|
||||
oldSession: oldSessionId,
|
||||
reason: 'instance_reconnect'
|
||||
});
|
||||
await this.removeSession(oldSessionId, 'instance_reconnect');
|
||||
}
|
||||
}
|
||||
let sessionIdToUse;
|
||||
const isMultiTenantEnabled = process.env.ENABLE_MULTI_TENANT === 'true';
|
||||
const sessionStrategy = process.env.MULTI_TENANT_SESSION_STRATEGY || 'instance';
|
||||
@@ -434,12 +454,21 @@ class SingleSessionHTTPServer {
|
||||
}
|
||||
async resetSessionSSE(res) {
|
||||
if (this.session) {
|
||||
const sessionId = this.session.sessionId;
|
||||
logger_1.logger.info('Closing previous session for SSE', { sessionId });
|
||||
if (this.session.server && typeof this.session.server.close === 'function') {
|
||||
try {
|
||||
await this.session.server.close();
|
||||
}
|
||||
catch (serverError) {
|
||||
logger_1.logger.warn('Error closing server for SSE session', { sessionId, error: serverError });
|
||||
}
|
||||
}
|
||||
try {
|
||||
logger_1.logger.info('Closing previous session for SSE', { sessionId: this.session.sessionId });
|
||||
await this.session.transport.close();
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.warn('Error closing previous session:', error);
|
||||
catch (transportError) {
|
||||
logger_1.logger.warn('Error closing transport for SSE session', { sessionId, error: transportError });
|
||||
}
|
||||
}
|
||||
try {
|
||||
@@ -1014,6 +1043,13 @@ class SingleSessionHTTPServer {
|
||||
});
|
||||
});
|
||||
}
|
||||
try {
|
||||
await (0, shared_database_1.closeSharedDatabase)();
|
||||
logger_1.logger.info('Shared database closed');
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.warn('Error closing shared database:', error);
|
||||
}
|
||||
logger_1.logger.info('Single-Session HTTP server shutdown completed');
|
||||
}
|
||||
getSessionInfo() {
|
||||
|
||||
2
dist/http-server-single-session.js.map
vendored
2
dist/http-server-single-session.js.map
vendored
File diff suppressed because one or more lines are too long
3
dist/mcp/server.d.ts
vendored
3
dist/mcp/server.d.ts
vendored
@@ -13,6 +13,9 @@ export declare class N8NDocumentationMCPServer {
|
||||
private previousToolTimestamp;
|
||||
private earlyLogger;
|
||||
private disabledToolsCache;
|
||||
private useSharedDatabase;
|
||||
private sharedDbState;
|
||||
private isShutdown;
|
||||
constructor(instanceContext?: InstanceContext, earlyLogger?: EarlyErrorLogger);
|
||||
close(): Promise<void>;
|
||||
private initializeDatabase;
|
||||
|
||||
2
dist/mcp/server.d.ts.map
vendored
2
dist/mcp/server.d.ts.map
vendored
@@ -1 +1 @@
|
||||
{"version":3,"file":"server.d.ts","sourceRoot":"","sources":["../../src/mcp/server.ts"],"names":[],"mappings":"AAsCA,OAAO,EAAE,eAAe,EAAE,MAAM,2BAA2B,CAAC;AAE5D,OAAO,EAAE,gBAAgB,EAAE,MAAM,iCAAiC,CAAC;AAmGnE,qBAAa,yBAAyB;IACpC,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,EAAE,CAAgC;IAC1C,OAAO,CAAC,UAAU,CAA+B;IACjD,OAAO,CAAC,eAAe,CAAgC;IACvD,OAAO,CAAC,WAAW,CAAgB;IACnC,OAAO,CAAC,KAAK,CAAqB;IAClC,OAAO,CAAC,UAAU,CAAa;IAC/B,OAAO,CAAC,eAAe,CAAC,CAAkB;IAC1C,OAAO,CAAC,YAAY,CAAuB;IAC3C,OAAO,CAAC,qBAAqB,CAAsB;IACnD,OAAO,CAAC,WAAW,CAAiC;IACpD,OAAO,CAAC,kBAAkB,CAA4B;gBAE1C,eAAe,CAAC,EAAE,eAAe,EAAE,WAAW,CAAC,EAAE,gBAAgB;IAiGvE,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;YA6Bd,kBAAkB;YAwClB,wBAAwB;IA0BtC,OAAO,CAAC,kBAAkB;YA6CZ,iBAAiB;IAa/B,OAAO,CAAC,eAAe,CAAkB;YAE3B,sBAAsB;IAgDpC,OAAO,CAAC,gBAAgB;IAqCxB,OAAO,CAAC,aAAa;IAoTrB,OAAO,CAAC,wBAAwB;IAoFhC,OAAO,CAAC,kBAAkB;IAqE1B,OAAO,CAAC,uBAAuB;IAwB/B,OAAO,CAAC,qBAAqB;YAoTf,SAAS;YA2DT,WAAW;YAkFX,WAAW;YA0CX,cAAc;YA8Md,gBAAgB;IAqD9B,OAAO,CAAC,mBAAmB;IAwE3B,OAAO,CAAC,eAAe;YAsBT,eAAe;IA2L7B,OAAO,CAAC,kBAAkB;IAQ1B,OAAO,CAAC,uBAAuB;IA0D/B,OAAO,CAAC,iBAAiB;YAqFX,WAAW;YAgCX,oBAAoB;IAuFlC,OAAO,CAAC,aAAa;YAQP,qBAAqB;YAwDrB,iBAAiB;YAiKjB,OAAO;YAgDP,cAAc;YAwFd,iBAAiB;IAqC/B,OAAO,CAAC,iBAAiB;IA0BzB,OAAO,CAAC,iBAAiB;IA0BzB,OAAO,CAAC,eAAe;IAwCvB,OAAO,CAAC,kBAAkB;IAiC1B,OAAO,CAAC,aAAa;IAoCrB,OAAO,CAAC,0BAA0B;IAgClC,OAAO,CAAC,4BAA4B;YAKtB,oBAAoB;IAsDlC,OAAO,CAAC,gBAAgB;YAiBV,SAAS;YA6CT,kBAAkB;YAqElB,uBAAuB;YAsDvB,iBAAiB;IAqE/B,OAAO,CAAC,qBAAqB;IA8C7B,OAAO,CAAC,uBAAuB;IA4D/B,OAAO,CAAC,wBAAwB;IAkChC,OAAO,CAAC,iBAAiB;YAoDX,mBAAmB;YAoEnB,qBAAqB;IAS7B,OAAO,CAAC,SAAS,EAAE,GAAG,GAAG,OAAO,CAAC,IAAI,CAAC;YAS9B,aAAa;YAcb,iBAAiB;YAoBjB,WAAW;YAwBX,eAAe;YAqBf,mBAAmB;YAwBnB,yBAAyB;IA4CvC,OAAO,CAAC,kBAAkB;YAiBZ,gBAAgB;YA6HhB,2BAA2B;YAiE3B,2BAA2B;IAyEnC,GAAG,IAAI,OAAO,CAAC,IAAI,CAAC;IA0BpB,QAAQ,IAAI,OAAO,CAAC,IAAI,CAAC;CAuBhC"}
|
||||
{"version":3,"file":"server.d.ts","sourceRoot":"","sources":["../../src/mcp/server.ts"],"names":[],"mappings":"AAuCA,OAAO,EAAE,eAAe,EAAE,MAAM,2BAA2B,CAAC;AAE5D,OAAO,EAAE,gBAAgB,EAAE,MAAM,iCAAiC,CAAC;AAmGnE,qBAAa,yBAAyB;IACpC,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,EAAE,CAAgC;IAC1C,OAAO,CAAC,UAAU,CAA+B;IACjD,OAAO,CAAC,eAAe,CAAgC;IACvD,OAAO,CAAC,WAAW,CAAgB;IACnC,OAAO,CAAC,KAAK,CAAqB;IAClC,OAAO,CAAC,UAAU,CAAa;IAC/B,OAAO,CAAC,eAAe,CAAC,CAAkB;IAC1C,OAAO,CAAC,YAAY,CAAuB;IAC3C,OAAO,CAAC,qBAAqB,CAAsB;IACnD,OAAO,CAAC,WAAW,CAAiC;IACpD,OAAO,CAAC,kBAAkB,CAA4B;IACtD,OAAO,CAAC,iBAAiB,CAAkB;IAC3C,OAAO,CAAC,aAAa,CAAoC;IACzD,OAAO,CAAC,UAAU,CAAkB;gBAExB,eAAe,CAAC,EAAE,eAAe,EAAE,WAAW,CAAC,EAAE,gBAAgB;IAqGvE,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;YA+Cd,kBAAkB;YAiDlB,wBAAwB;IA0BtC,OAAO,CAAC,kBAAkB;YA6CZ,iBAAiB;IAa/B,OAAO,CAAC,eAAe,CAAkB;YAE3B,sBAAsB;IAgDpC,OAAO,CAAC,gBAAgB;IAqCxB,OAAO,CAAC,aAAa;IAoTrB,OAAO,CAAC,wBAAwB;IAoFhC,OAAO,CAAC,kBAAkB;IAqE1B,OAAO,CAAC,uBAAuB;IAwB/B,OAAO,CAAC,qBAAqB;YAoTf,SAAS;YA2DT,WAAW;YAkFX,WAAW;YA0CX,cAAc;YA8Md,gBAAgB;IAqD9B,OAAO,CAAC,mBAAmB;IAwE3B,OAAO,CAAC,eAAe;YAsBT,eAAe;IA2L7B,OAAO,CAAC,kBAAkB;IAQ1B,OAAO,CAAC,uBAAuB;IA0D/B,OAAO,CAAC,iBAAiB;YAqFX,WAAW;YAgCX,oBAAoB;IAuFlC,OAAO,CAAC,aAAa;YAQP,qBAAqB;YAwDrB,iBAAiB;YAiKjB,OAAO;YAgDP,cAAc;YAwFd,iBAAiB;IAqC/B,OAAO,CAAC,iBAAiB;IA0BzB,OAAO,CAAC,iBAAiB;IA0BzB,OAAO,CAAC,eAAe;IAwCvB,OAAO,CAAC,kBAAkB;IAiC1B,OAAO,CAAC,aAAa;IAoCrB,OAAO,CAAC,0BAA0B;IAgClC,OAAO,CAAC,4BAA4B;YAKtB,oBAAoB;IAsDlC,OAAO,CAAC,gBAAgB;YAiBV,SAAS;YA6CT,kBAAkB;YAqElB,uBAAuB;YAsDvB,iBAAiB;IAqE/B,OAAO,CAAC,qBAAqB;IA8C7B,OAAO,CAAC,uBAAuB;IA4D/B,OAAO,CAAC,wBAAwB;IAkChC,OAAO,CAAC,iBAAiB;YAoDX,mBAAmB;YAoEnB,qBAAqB;IAS7B,OAAO,CAAC,SAAS,EAAE,GAAG,GAAG,OAAO,CAAC,IAAI,CAAC;YAS9B,aAAa;YAcb,iBAAiB;YAoBjB,WAAW;YAwBX,eAAe;YAqBf,mBAAmB;YAwBnB,yBAAyB;IA4CvC,OAAO,CAAC,kBAAkB;YAiBZ,gBAAgB;YA6HhB,2BAA2B;YAiE3B,2BAA2B;IAyEnC,GAAG,IAAI,OAAO,CAAC,IAAI,CAAC;IA0BpB,QAAQ,IAAI,OAAO,CAAC,IAAI,CAAC;CAgEhC"}
|
||||
76
dist/mcp/server.js
vendored
76
dist/mcp/server.js
vendored
@@ -49,6 +49,7 @@ const workflow_examples_1 = require("./workflow-examples");
|
||||
const logger_1 = require("../utils/logger");
|
||||
const node_repository_1 = require("../database/node-repository");
|
||||
const database_adapter_1 = require("../database/database-adapter");
|
||||
const shared_database_1 = require("../database/shared-database");
|
||||
const property_filter_1 = require("../services/property-filter");
|
||||
const task_templates_1 = require("../services/task-templates");
|
||||
const config_validator_1 = require("../services/config-validator");
|
||||
@@ -80,6 +81,9 @@ class N8NDocumentationMCPServer {
|
||||
this.previousToolTimestamp = Date.now();
|
||||
this.earlyLogger = null;
|
||||
this.disabledToolsCache = null;
|
||||
this.useSharedDatabase = false;
|
||||
this.sharedDbState = null;
|
||||
this.isShutdown = false;
|
||||
this.dbHealthChecked = false;
|
||||
this.instanceContext = instanceContext;
|
||||
this.earlyLogger = earlyLogger || null;
|
||||
@@ -149,10 +153,22 @@ class N8NDocumentationMCPServer {
|
||||
this.setupHandlers();
|
||||
}
|
||||
async close() {
|
||||
try {
|
||||
await this.initialized;
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.debug('Initialization had failed, proceeding with cleanup', {
|
||||
error: error instanceof Error ? error.message : String(error)
|
||||
});
|
||||
}
|
||||
try {
|
||||
await this.server.close();
|
||||
this.cache.destroy();
|
||||
if (this.db) {
|
||||
if (this.useSharedDatabase && this.sharedDbState) {
|
||||
(0, shared_database_1.releaseSharedDatabase)(this.sharedDbState);
|
||||
logger_1.logger.debug('Released shared database reference');
|
||||
}
|
||||
else if (this.db) {
|
||||
try {
|
||||
this.db.close();
|
||||
}
|
||||
@@ -166,6 +182,7 @@ class N8NDocumentationMCPServer {
|
||||
this.repository = null;
|
||||
this.templateService = null;
|
||||
this.earlyLogger = null;
|
||||
this.sharedDbState = null;
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.warn('Error closing MCP server', { error: error instanceof Error ? error.message : String(error) });
|
||||
@@ -177,17 +194,27 @@ class N8NDocumentationMCPServer {
|
||||
this.earlyLogger.logCheckpoint(startup_checkpoints_1.STARTUP_CHECKPOINTS.DATABASE_CONNECTING);
|
||||
}
|
||||
logger_1.logger.debug('Database initialization starting...', { dbPath });
|
||||
this.db = await (0, database_adapter_1.createDatabaseAdapter)(dbPath);
|
||||
logger_1.logger.debug('Database adapter created');
|
||||
if (dbPath === ':memory:') {
|
||||
this.db = await (0, database_adapter_1.createDatabaseAdapter)(dbPath);
|
||||
logger_1.logger.debug('Database adapter created (in-memory mode)');
|
||||
await this.initializeInMemorySchema();
|
||||
logger_1.logger.debug('In-memory schema initialized');
|
||||
this.repository = new node_repository_1.NodeRepository(this.db);
|
||||
this.templateService = new template_service_1.TemplateService(this.db);
|
||||
enhanced_config_validator_1.EnhancedConfigValidator.initializeSimilarityServices(this.repository);
|
||||
this.useSharedDatabase = false;
|
||||
}
|
||||
else {
|
||||
const sharedState = await (0, shared_database_1.getSharedDatabase)(dbPath);
|
||||
this.db = sharedState.db;
|
||||
this.repository = sharedState.repository;
|
||||
this.templateService = sharedState.templateService;
|
||||
this.sharedDbState = sharedState;
|
||||
this.useSharedDatabase = true;
|
||||
logger_1.logger.debug('Using shared database connection');
|
||||
}
|
||||
this.repository = new node_repository_1.NodeRepository(this.db);
|
||||
logger_1.logger.debug('Node repository initialized');
|
||||
this.templateService = new template_service_1.TemplateService(this.db);
|
||||
logger_1.logger.debug('Template service initialized');
|
||||
enhanced_config_validator_1.EnhancedConfigValidator.initializeSimilarityServices(this.repository);
|
||||
logger_1.logger.debug('Similarity services initialized');
|
||||
if (this.earlyLogger) {
|
||||
this.earlyLogger.logCheckpoint(startup_checkpoints_1.STARTUP_CHECKPOINTS.DATABASE_CONNECTED);
|
||||
@@ -2889,7 +2916,26 @@ Full documentation is being prepared. For now, use get_node_essentials for confi
|
||||
process.stdin.resume();
|
||||
}
|
||||
async shutdown() {
|
||||
if (this.isShutdown) {
|
||||
logger_1.logger.debug('Shutdown already called, skipping');
|
||||
return;
|
||||
}
|
||||
this.isShutdown = true;
|
||||
logger_1.logger.info('Shutting down MCP server...');
|
||||
try {
|
||||
await this.initialized;
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.debug('Initialization had failed, proceeding with cleanup', {
|
||||
error: error instanceof Error ? error.message : String(error)
|
||||
});
|
||||
}
|
||||
try {
|
||||
await this.server.close();
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error('Error closing MCP server:', error);
|
||||
}
|
||||
if (this.cache) {
|
||||
try {
|
||||
this.cache.destroy();
|
||||
@@ -2899,15 +2945,29 @@ Full documentation is being prepared. For now, use get_node_essentials for confi
|
||||
logger_1.logger.error('Error cleaning up cache:', error);
|
||||
}
|
||||
}
|
||||
if (this.db) {
|
||||
if (this.useSharedDatabase && this.sharedDbState) {
|
||||
try {
|
||||
await this.db.close();
|
||||
(0, shared_database_1.releaseSharedDatabase)(this.sharedDbState);
|
||||
logger_1.logger.info('Released shared database reference');
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error('Error releasing shared database:', error);
|
||||
}
|
||||
}
|
||||
else if (this.db) {
|
||||
try {
|
||||
this.db.close();
|
||||
logger_1.logger.info('Database connection closed');
|
||||
}
|
||||
catch (error) {
|
||||
logger_1.logger.error('Error closing database:', error);
|
||||
}
|
||||
}
|
||||
this.db = null;
|
||||
this.repository = null;
|
||||
this.templateService = null;
|
||||
this.earlyLogger = null;
|
||||
this.sharedDbState = null;
|
||||
}
|
||||
}
|
||||
exports.N8NDocumentationMCPServer = N8NDocumentationMCPServer;
|
||||
|
||||
2
dist/mcp/server.js.map
vendored
2
dist/mcp/server.js.map
vendored
File diff suppressed because one or more lines are too long
4
dist/services/n8n-validation.d.ts
vendored
4
dist/services/n8n-validation.d.ts
vendored
@@ -26,10 +26,10 @@ export declare const workflowNodeSchema: z.ZodObject<{
|
||||
parameters: Record<string, unknown>;
|
||||
credentials?: Record<string, unknown> | undefined;
|
||||
retryOnFail?: boolean | undefined;
|
||||
continueOnFail?: boolean | undefined;
|
||||
maxTries?: number | undefined;
|
||||
waitBetweenTries?: number | undefined;
|
||||
alwaysOutputData?: boolean | undefined;
|
||||
continueOnFail?: boolean | undefined;
|
||||
executeOnce?: boolean | undefined;
|
||||
disabled?: boolean | undefined;
|
||||
notes?: string | undefined;
|
||||
@@ -43,10 +43,10 @@ export declare const workflowNodeSchema: z.ZodObject<{
|
||||
parameters: Record<string, unknown>;
|
||||
credentials?: Record<string, unknown> | undefined;
|
||||
retryOnFail?: boolean | undefined;
|
||||
continueOnFail?: boolean | undefined;
|
||||
maxTries?: number | undefined;
|
||||
waitBetweenTries?: number | undefined;
|
||||
alwaysOutputData?: boolean | undefined;
|
||||
continueOnFail?: boolean | undefined;
|
||||
executeOnce?: boolean | undefined;
|
||||
disabled?: boolean | undefined;
|
||||
notes?: string | undefined;
|
||||
|
||||
2
dist/telemetry/batch-processor.d.ts
vendored
2
dist/telemetry/batch-processor.d.ts
vendored
@@ -12,6 +12,8 @@ export declare class TelemetryBatchProcessor {
|
||||
private flushTimes;
|
||||
private deadLetterQueue;
|
||||
private readonly maxDeadLetterSize;
|
||||
private eventListeners;
|
||||
private started;
|
||||
constructor(supabase: SupabaseClient | null, isEnabled: () => boolean);
|
||||
start(): void;
|
||||
stop(): void;
|
||||
|
||||
2
dist/telemetry/batch-processor.d.ts.map
vendored
2
dist/telemetry/batch-processor.d.ts.map
vendored
@@ -1 +1 @@
|
||||
{"version":3,"file":"batch-processor.d.ts","sourceRoot":"","sources":["../../src/telemetry/batch-processor.ts"],"names":[],"mappings":"AAKA,OAAO,EAAE,cAAc,EAAE,MAAM,uBAAuB,CAAC;AACvD,OAAO,EAAE,cAAc,EAAE,iBAAiB,EAAE,sBAAsB,EAAoB,gBAAgB,EAAE,MAAM,mBAAmB,CAAC;AAoClI,qBAAa,uBAAuB;IAoBhC,OAAO,CAAC,QAAQ;IAChB,OAAO,CAAC,SAAS;IApBnB,OAAO,CAAC,UAAU,CAAC,CAAiB;IACpC,OAAO,CAAC,gBAAgB,CAAkB;IAC1C,OAAO,CAAC,mBAAmB,CAAkB;IAC7C,OAAO,CAAC,mBAAmB,CAAkB;IAC7C,OAAO,CAAC,cAAc,CAA0B;IAChD,OAAO,CAAC,OAAO,CAQb;IACF,OAAO,CAAC,UAAU,CAAgB;IAClC,OAAO,CAAC,eAAe,CAAuE;IAC9F,OAAO,CAAC,QAAQ,CAAC,iBAAiB,CAAO;gBAG/B,QAAQ,EAAE,cAAc,GAAG,IAAI,EAC/B,SAAS,EAAE,MAAM,OAAO;IAQlC,KAAK,IAAI,IAAI;IA+Bb,IAAI,IAAI,IAAI;IAWN,KAAK,CAAC,MAAM,CAAC,EAAE,cAAc,EAAE,EAAE,SAAS,CAAC,EAAE,iBAAiB,EAAE,EAAE,SAAS,CAAC,EAAE,sBAAsB,EAAE,GAAG,OAAO,CAAC,IAAI,CAAC;YAgD9G,WAAW;YAmDX,cAAc;YAuDd,cAAc;YAiEd,gBAAgB;IAgD9B,OAAO,CAAC,aAAa;IAarB,OAAO,CAAC,oBAAoB;IAiB5B,OAAO,CAAC,oBAAoB;YAmBd,sBAAsB;IAgCpC,OAAO,CAAC,eAAe;IAiBvB,UAAU,IAAI,gBAAgB,GAAG;QAAE,mBAAmB,EAAE,GAAG,CAAC;QAAC,mBAAmB,EAAE,MAAM,CAAA;KAAE;IAW1F,YAAY,IAAI,IAAI;CAarB"}
|
||||
{"version":3,"file":"batch-processor.d.ts","sourceRoot":"","sources":["../../src/telemetry/batch-processor.ts"],"names":[],"mappings":"AAKA,OAAO,EAAE,cAAc,EAAE,MAAM,uBAAuB,CAAC;AACvD,OAAO,EAAE,cAAc,EAAE,iBAAiB,EAAE,sBAAsB,EAAoB,gBAAgB,EAAE,MAAM,mBAAmB,CAAC;AAoClI,qBAAa,uBAAuB;IA2BhC,OAAO,CAAC,QAAQ;IAChB,OAAO,CAAC,SAAS;IA3BnB,OAAO,CAAC,UAAU,CAAC,CAAiB;IACpC,OAAO,CAAC,gBAAgB,CAAkB;IAC1C,OAAO,CAAC,mBAAmB,CAAkB;IAC7C,OAAO,CAAC,mBAAmB,CAAkB;IAC7C,OAAO,CAAC,cAAc,CAA0B;IAChD,OAAO,CAAC,OAAO,CAQb;IACF,OAAO,CAAC,UAAU,CAAgB;IAClC,OAAO,CAAC,eAAe,CAAuE;IAC9F,OAAO,CAAC,QAAQ,CAAC,iBAAiB,CAAO;IAEzC,OAAO,CAAC,cAAc,CAIf;IACP,OAAO,CAAC,OAAO,CAAkB;gBAGvB,QAAQ,EAAE,cAAc,GAAG,IAAI,EAC/B,SAAS,EAAE,MAAM,OAAO;IAQlC,KAAK,IAAI,IAAI;IA0Cb,IAAI,IAAI,IAAI;IAyBN,KAAK,CAAC,MAAM,CAAC,EAAE,cAAc,EAAE,EAAE,SAAS,CAAC,EAAE,iBAAiB,EAAE,EAAE,SAAS,CAAC,EAAE,sBAAsB,EAAE,GAAG,OAAO,CAAC,IAAI,CAAC;YAgD9G,WAAW;YAmDX,cAAc;YAuDd,cAAc;YAiEd,gBAAgB;IAgD9B,OAAO,CAAC,aAAa;IAarB,OAAO,CAAC,oBAAoB;IAiB5B,OAAO,CAAC,oBAAoB;YAmBd,sBAAsB;IAgCpC,OAAO,CAAC,eAAe;IAiBvB,UAAU,IAAI,gBAAgB,GAAG;QAAE,mBAAmB,EAAE,GAAG,CAAC;QAAC,mBAAmB,EAAE,MAAM,CAAA;KAAE;IAW1F,YAAY,IAAI,IAAI;CAarB"}
|
||||
31
dist/telemetry/batch-processor.js
vendored
31
dist/telemetry/batch-processor.js
vendored
@@ -33,26 +33,36 @@ class TelemetryBatchProcessor {
|
||||
this.flushTimes = [];
|
||||
this.deadLetterQueue = [];
|
||||
this.maxDeadLetterSize = 100;
|
||||
this.eventListeners = {};
|
||||
this.started = false;
|
||||
this.circuitBreaker = new telemetry_error_1.TelemetryCircuitBreaker();
|
||||
}
|
||||
start() {
|
||||
if (!this.isEnabled() || !this.supabase)
|
||||
return;
|
||||
if (this.started) {
|
||||
logger_1.logger.debug('Telemetry batch processor already started, skipping');
|
||||
return;
|
||||
}
|
||||
this.flushTimer = setInterval(() => {
|
||||
this.flush();
|
||||
}, telemetry_types_1.TELEMETRY_CONFIG.BATCH_FLUSH_INTERVAL);
|
||||
if (typeof this.flushTimer === 'object' && 'unref' in this.flushTimer) {
|
||||
this.flushTimer.unref();
|
||||
}
|
||||
process.on('beforeExit', () => this.flush());
|
||||
process.on('SIGINT', () => {
|
||||
this.eventListeners.beforeExit = () => this.flush();
|
||||
this.eventListeners.sigint = () => {
|
||||
this.flush();
|
||||
process.exit(0);
|
||||
});
|
||||
process.on('SIGTERM', () => {
|
||||
};
|
||||
this.eventListeners.sigterm = () => {
|
||||
this.flush();
|
||||
process.exit(0);
|
||||
});
|
||||
};
|
||||
process.on('beforeExit', this.eventListeners.beforeExit);
|
||||
process.on('SIGINT', this.eventListeners.sigint);
|
||||
process.on('SIGTERM', this.eventListeners.sigterm);
|
||||
this.started = true;
|
||||
logger_1.logger.debug('Telemetry batch processor started');
|
||||
}
|
||||
stop() {
|
||||
@@ -60,6 +70,17 @@ class TelemetryBatchProcessor {
|
||||
clearInterval(this.flushTimer);
|
||||
this.flushTimer = undefined;
|
||||
}
|
||||
if (this.eventListeners.beforeExit) {
|
||||
process.removeListener('beforeExit', this.eventListeners.beforeExit);
|
||||
}
|
||||
if (this.eventListeners.sigint) {
|
||||
process.removeListener('SIGINT', this.eventListeners.sigint);
|
||||
}
|
||||
if (this.eventListeners.sigterm) {
|
||||
process.removeListener('SIGTERM', this.eventListeners.sigterm);
|
||||
}
|
||||
this.eventListeners = {};
|
||||
this.started = false;
|
||||
logger_1.logger.debug('Telemetry batch processor stopped');
|
||||
}
|
||||
async flush(events, workflows, mutations) {
|
||||
|
||||
2
dist/telemetry/batch-processor.js.map
vendored
2
dist/telemetry/batch-processor.js.map
vendored
File diff suppressed because one or more lines are too long
7384
package-lock.json
generated
7384
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
10
package.json
10
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "n8n-mcp",
|
||||
"version": "2.33.4",
|
||||
"version": "2.33.6",
|
||||
"description": "Integration between n8n workflow automation and Model Context Protocol (MCP)",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
@@ -150,16 +150,16 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "1.20.1",
|
||||
"@n8n/n8n-nodes-langchain": "^2.4.3",
|
||||
"@n8n/n8n-nodes-langchain": "^2.6.2",
|
||||
"@supabase/supabase-js": "^2.57.4",
|
||||
"dotenv": "^16.5.0",
|
||||
"express": "^5.1.0",
|
||||
"express-rate-limit": "^7.1.5",
|
||||
"form-data": "^4.0.5",
|
||||
"lru-cache": "^11.2.1",
|
||||
"n8n": "^2.4.4",
|
||||
"n8n-core": "^2.4.2",
|
||||
"n8n-workflow": "^2.4.2",
|
||||
"n8n": "^2.6.3",
|
||||
"n8n-core": "^2.6.1",
|
||||
"n8n-workflow": "^2.6.0",
|
||||
"openai": "^4.77.0",
|
||||
"sql.js": "^1.13.0",
|
||||
"tslib": "^2.6.2",
|
||||
|
||||
@@ -419,12 +419,36 @@ class BetterSQLiteStatement implements PreparedStatement {
|
||||
|
||||
/**
|
||||
* Statement wrapper for sql.js
|
||||
*
|
||||
* IMPORTANT: sql.js requires explicit memory management via Statement.free().
|
||||
* This wrapper automatically frees statement memory after each operation
|
||||
* to prevent memory leaks during sustained traffic.
|
||||
*
|
||||
* See: https://sql.js.org/documentation/Statement.html
|
||||
* "After calling db.prepare() you must manually free the assigned memory
|
||||
* by calling Statement.free()."
|
||||
*/
|
||||
class SQLJSStatement implements PreparedStatement {
|
||||
private boundParams: any = null;
|
||||
|
||||
private freed: boolean = false;
|
||||
|
||||
constructor(private stmt: any, private onModify: () => void) {}
|
||||
|
||||
|
||||
/**
|
||||
* Free the underlying sql.js statement memory.
|
||||
* Safe to call multiple times - subsequent calls are no-ops.
|
||||
*/
|
||||
private freeStatement(): void {
|
||||
if (!this.freed && this.stmt) {
|
||||
try {
|
||||
this.stmt.free();
|
||||
this.freed = true;
|
||||
} catch (e) {
|
||||
// Statement may already be freed or invalid - ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
run(...params: any[]): RunResult {
|
||||
try {
|
||||
if (params.length > 0) {
|
||||
@@ -433,10 +457,10 @@ class SQLJSStatement implements PreparedStatement {
|
||||
this.stmt.bind(this.boundParams);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
this.stmt.run();
|
||||
this.onModify();
|
||||
|
||||
|
||||
// sql.js doesn't provide changes/lastInsertRowid easily
|
||||
return {
|
||||
changes: 1, // Assume success means 1 change
|
||||
@@ -445,9 +469,12 @@ class SQLJSStatement implements PreparedStatement {
|
||||
} catch (error) {
|
||||
this.stmt.reset();
|
||||
throw error;
|
||||
} finally {
|
||||
// Free statement memory after write operation completes
|
||||
this.freeStatement();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
get(...params: any[]): any {
|
||||
try {
|
||||
if (params.length > 0) {
|
||||
@@ -456,21 +483,24 @@ class SQLJSStatement implements PreparedStatement {
|
||||
this.stmt.bind(this.boundParams);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (this.stmt.step()) {
|
||||
const result = this.stmt.getAsObject();
|
||||
this.stmt.reset();
|
||||
return this.convertIntegerColumns(result);
|
||||
}
|
||||
|
||||
|
||||
this.stmt.reset();
|
||||
return undefined;
|
||||
} catch (error) {
|
||||
this.stmt.reset();
|
||||
throw error;
|
||||
} finally {
|
||||
// Free statement memory after read operation completes
|
||||
this.freeStatement();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
all(...params: any[]): any[] {
|
||||
try {
|
||||
if (params.length > 0) {
|
||||
@@ -479,17 +509,20 @@ class SQLJSStatement implements PreparedStatement {
|
||||
this.stmt.bind(this.boundParams);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const results: any[] = [];
|
||||
while (this.stmt.step()) {
|
||||
results.push(this.convertIntegerColumns(this.stmt.getAsObject()));
|
||||
}
|
||||
|
||||
|
||||
this.stmt.reset();
|
||||
return results;
|
||||
} catch (error) {
|
||||
this.stmt.reset();
|
||||
throw error;
|
||||
} finally {
|
||||
// Free statement memory after read operation completes
|
||||
this.freeStatement();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
197
src/database/shared-database.ts
Normal file
197
src/database/shared-database.ts
Normal file
@@ -0,0 +1,197 @@
|
||||
/**
|
||||
* Shared Database Manager - Singleton for cross-session database connection
|
||||
*
|
||||
* This module implements a singleton pattern to share a single database connection
|
||||
* across all MCP server sessions. This prevents memory leaks caused by each session
|
||||
* creating its own database connection (~900MB per session).
|
||||
*
|
||||
* Memory impact: Reduces per-session memory from ~900MB to near-zero by sharing
|
||||
* a single ~68MB database connection across all sessions.
|
||||
*
|
||||
* Issue: https://github.com/czlonkowski/n8n-mcp/issues/XXX
|
||||
*/
|
||||
|
||||
import { DatabaseAdapter, createDatabaseAdapter } from './database-adapter';
|
||||
import { NodeRepository } from './node-repository';
|
||||
import { TemplateService } from '../templates/template-service';
|
||||
import { EnhancedConfigValidator } from '../services/enhanced-config-validator';
|
||||
import { logger } from '../utils/logger';
|
||||
|
||||
/**
|
||||
* Shared database state - holds the singleton connection and services
|
||||
*/
|
||||
export interface SharedDatabaseState {
|
||||
db: DatabaseAdapter;
|
||||
repository: NodeRepository;
|
||||
templateService: TemplateService;
|
||||
dbPath: string;
|
||||
refCount: number;
|
||||
initialized: boolean;
|
||||
}
|
||||
|
||||
// Module-level singleton state
|
||||
let sharedState: SharedDatabaseState | null = null;
|
||||
let initializationPromise: Promise<SharedDatabaseState> | null = null;
|
||||
|
||||
/**
|
||||
* Get or create the shared database connection
|
||||
*
|
||||
* Thread-safe initialization using a promise lock pattern.
|
||||
* Multiple concurrent calls will wait for the same initialization.
|
||||
*
|
||||
* @param dbPath - Path to the SQLite database file
|
||||
* @returns Shared database state with connection and services
|
||||
*/
|
||||
export async function getSharedDatabase(dbPath: string): Promise<SharedDatabaseState> {
|
||||
// If already initialized with the same path, increment ref count and return
|
||||
if (sharedState && sharedState.initialized && sharedState.dbPath === dbPath) {
|
||||
sharedState.refCount++;
|
||||
logger.debug('Reusing shared database connection', {
|
||||
refCount: sharedState.refCount,
|
||||
dbPath
|
||||
});
|
||||
return sharedState;
|
||||
}
|
||||
|
||||
// If already initialized with a DIFFERENT path, this is a configuration error
|
||||
if (sharedState && sharedState.initialized && sharedState.dbPath !== dbPath) {
|
||||
logger.error('Attempted to initialize shared database with different path', {
|
||||
existingPath: sharedState.dbPath,
|
||||
requestedPath: dbPath
|
||||
});
|
||||
throw new Error(`Shared database already initialized with different path: ${sharedState.dbPath}`);
|
||||
}
|
||||
|
||||
// If initialization is in progress, wait for it
|
||||
if (initializationPromise) {
|
||||
try {
|
||||
const state = await initializationPromise;
|
||||
state.refCount++;
|
||||
logger.debug('Reusing shared database (waited for init)', {
|
||||
refCount: state.refCount,
|
||||
dbPath
|
||||
});
|
||||
return state;
|
||||
} catch (error) {
|
||||
// Initialization failed while we were waiting, clear promise and rethrow
|
||||
initializationPromise = null;
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Start new initialization
|
||||
initializationPromise = initializeSharedDatabase(dbPath);
|
||||
|
||||
try {
|
||||
const state = await initializationPromise;
|
||||
// Clear the promise on success to allow future re-initialization after close
|
||||
initializationPromise = null;
|
||||
return state;
|
||||
} catch (error) {
|
||||
// Clear promise on failure to allow retry
|
||||
initializationPromise = null;
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the shared database connection and services
|
||||
*/
|
||||
async function initializeSharedDatabase(dbPath: string): Promise<SharedDatabaseState> {
|
||||
logger.info('Initializing shared database connection', { dbPath });
|
||||
|
||||
const db = await createDatabaseAdapter(dbPath);
|
||||
const repository = new NodeRepository(db);
|
||||
const templateService = new TemplateService(db);
|
||||
|
||||
// Initialize similarity services for enhanced validation
|
||||
EnhancedConfigValidator.initializeSimilarityServices(repository);
|
||||
|
||||
sharedState = {
|
||||
db,
|
||||
repository,
|
||||
templateService,
|
||||
dbPath,
|
||||
refCount: 1,
|
||||
initialized: true
|
||||
};
|
||||
|
||||
logger.info('Shared database initialized successfully', {
|
||||
dbPath,
|
||||
refCount: sharedState.refCount
|
||||
});
|
||||
|
||||
return sharedState;
|
||||
}
|
||||
|
||||
/**
|
||||
* Release a reference to the shared database
|
||||
*
|
||||
* Decrements the reference count. Does NOT close the database
|
||||
* as it's shared across all sessions for the lifetime of the process.
|
||||
*
|
||||
* @param state - The shared database state to release
|
||||
*/
|
||||
export function releaseSharedDatabase(state: SharedDatabaseState): void {
|
||||
if (!state || !sharedState) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Guard against double-release (refCount going negative)
|
||||
if (sharedState.refCount <= 0) {
|
||||
logger.warn('Attempted to release shared database with refCount already at or below 0', {
|
||||
refCount: sharedState.refCount
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
sharedState.refCount--;
|
||||
logger.debug('Released shared database reference', {
|
||||
refCount: sharedState.refCount
|
||||
});
|
||||
|
||||
// Note: We intentionally do NOT close the database even when refCount hits 0
|
||||
// The database should remain open for the lifetime of the process to handle
|
||||
// new sessions. Only process shutdown should close it.
|
||||
}
|
||||
|
||||
/**
|
||||
* Force close the shared database (for graceful shutdown only)
|
||||
*
|
||||
* This should only be called during process shutdown, not during normal
|
||||
* session cleanup. Closing the database would break other active sessions.
|
||||
*/
|
||||
export async function closeSharedDatabase(): Promise<void> {
|
||||
if (!sharedState) {
|
||||
return;
|
||||
}
|
||||
|
||||
logger.info('Closing shared database connection', {
|
||||
refCount: sharedState.refCount
|
||||
});
|
||||
|
||||
try {
|
||||
sharedState.db.close();
|
||||
} catch (error) {
|
||||
logger.warn('Error closing shared database', {
|
||||
error: error instanceof Error ? error.message : String(error)
|
||||
});
|
||||
}
|
||||
|
||||
sharedState = null;
|
||||
initializationPromise = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if shared database is initialized
|
||||
*/
|
||||
export function isSharedDatabaseInitialized(): boolean {
|
||||
return sharedState !== null && sharedState.initialized;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current reference count (for debugging/monitoring)
|
||||
*/
|
||||
export function getSharedDatabaseRefCount(): number {
|
||||
return sharedState?.refCount ?? 0;
|
||||
}
|
||||
@@ -26,6 +26,7 @@ import {
|
||||
} from './utils/protocol-version';
|
||||
import { InstanceContext, validateInstanceContext } from './types/instance-context';
|
||||
import { SessionState } from './types/session-state';
|
||||
import { closeSharedDatabase } from './database/shared-database';
|
||||
|
||||
dotenv.config();
|
||||
|
||||
@@ -106,7 +107,12 @@ export class SingleSessionHTTPServer {
|
||||
private session: Session | null = null; // Keep for SSE compatibility
|
||||
private consoleManager = new ConsoleManager();
|
||||
private expressServer: any;
|
||||
private sessionTimeout = 30 * 60 * 1000; // 30 minutes
|
||||
// Session timeout reduced from 30 minutes to 5 minutes for faster cleanup
|
||||
// Configurable via SESSION_TIMEOUT_MINUTES environment variable
|
||||
// This prevents memory buildup from stale sessions
|
||||
private sessionTimeout = parseInt(
|
||||
process.env.SESSION_TIMEOUT_MINUTES || '5', 10
|
||||
) * 60 * 1000;
|
||||
private authToken: string | null = null;
|
||||
private cleanupTimer: NodeJS.Timeout | null = null;
|
||||
|
||||
@@ -492,6 +498,29 @@ export class SingleSessionHTTPServer {
|
||||
// For initialize requests: always create new transport and server
|
||||
logger.info('handleRequest: Creating new transport for initialize request');
|
||||
|
||||
// EAGER CLEANUP: Remove existing sessions for the same instance
|
||||
// This prevents memory buildup when clients reconnect without proper cleanup
|
||||
if (instanceContext?.instanceId) {
|
||||
const sessionsToRemove: string[] = [];
|
||||
for (const [existingSessionId, context] of Object.entries(this.sessionContexts)) {
|
||||
if (context?.instanceId === instanceContext.instanceId) {
|
||||
sessionsToRemove.push(existingSessionId);
|
||||
}
|
||||
}
|
||||
for (const oldSessionId of sessionsToRemove) {
|
||||
// Double-check session still exists (may have been cleaned by concurrent request)
|
||||
if (!this.transports[oldSessionId]) {
|
||||
continue;
|
||||
}
|
||||
logger.info('Cleaning up previous session for instance', {
|
||||
instanceId: instanceContext.instanceId,
|
||||
oldSession: oldSessionId,
|
||||
reason: 'instance_reconnect'
|
||||
});
|
||||
await this.removeSession(oldSessionId, 'instance_reconnect');
|
||||
}
|
||||
}
|
||||
|
||||
// Generate session ID based on multi-tenant configuration
|
||||
let sessionIdToUse: string;
|
||||
|
||||
@@ -1422,7 +1451,16 @@ export class SingleSessionHTTPServer {
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
// Close the shared database connection (only during process shutdown)
|
||||
// This must happen after all sessions are closed
|
||||
try {
|
||||
await closeSharedDatabase();
|
||||
logger.info('Shared database closed');
|
||||
} catch (error) {
|
||||
logger.warn('Error closing shared database:', error);
|
||||
}
|
||||
|
||||
logger.info('Single-Session HTTP server shutdown completed');
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import { getWorkflowExampleString } from './workflow-examples';
|
||||
import { logger } from '../utils/logger';
|
||||
import { NodeRepository } from '../database/node-repository';
|
||||
import { DatabaseAdapter, createDatabaseAdapter } from '../database/database-adapter';
|
||||
import { getSharedDatabase, releaseSharedDatabase, SharedDatabaseState } from '../database/shared-database';
|
||||
import { PropertyFilter } from '../services/property-filter';
|
||||
import { TaskTemplates } from '../services/task-templates';
|
||||
import { ConfigValidator } from '../services/config-validator';
|
||||
@@ -150,6 +151,9 @@ export class N8NDocumentationMCPServer {
|
||||
private previousToolTimestamp: number = Date.now();
|
||||
private earlyLogger: EarlyErrorLogger | null = null;
|
||||
private disabledToolsCache: Set<string> | null = null;
|
||||
private useSharedDatabase: boolean = false; // Track if using shared DB for cleanup
|
||||
private sharedDbState: SharedDatabaseState | null = null; // Reference to shared DB state for release
|
||||
private isShutdown: boolean = false; // Prevent double-shutdown
|
||||
|
||||
constructor(instanceContext?: InstanceContext, earlyLogger?: EarlyErrorLogger) {
|
||||
this.instanceContext = instanceContext;
|
||||
@@ -245,18 +249,39 @@ export class N8NDocumentationMCPServer {
|
||||
* Order of cleanup:
|
||||
* 1. Close MCP server connection
|
||||
* 2. Destroy cache (clears entries AND stops cleanup timer)
|
||||
* 3. Close database connection
|
||||
* 3. Release shared database OR close dedicated connection
|
||||
* 4. Null out references to help GC
|
||||
*
|
||||
* IMPORTANT: For shared databases, we only release the reference (decrement refCount),
|
||||
* NOT close the database. The database stays open for other sessions.
|
||||
* For in-memory databases (tests), we close the dedicated connection.
|
||||
*/
|
||||
async close(): Promise<void> {
|
||||
// Wait for initialization to complete (or fail) before cleanup
|
||||
// This prevents race conditions where close runs while init is in progress
|
||||
try {
|
||||
await this.initialized;
|
||||
} catch (error) {
|
||||
// Initialization failed - that's OK, we still need to clean up
|
||||
logger.debug('Initialization had failed, proceeding with cleanup', {
|
||||
error: error instanceof Error ? error.message : String(error)
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
await this.server.close();
|
||||
|
||||
// Use destroy() not clear() - also stops the cleanup timer
|
||||
this.cache.destroy();
|
||||
|
||||
// Close database connection before nullifying reference
|
||||
if (this.db) {
|
||||
// Handle database cleanup based on whether it's shared or dedicated
|
||||
if (this.useSharedDatabase && this.sharedDbState) {
|
||||
// Shared database: release reference, don't close
|
||||
// The database stays open for other sessions
|
||||
releaseSharedDatabase(this.sharedDbState);
|
||||
logger.debug('Released shared database reference');
|
||||
} else if (this.db) {
|
||||
// Dedicated database (in-memory for tests): close it
|
||||
try {
|
||||
this.db.close();
|
||||
} catch (dbError) {
|
||||
@@ -271,6 +296,7 @@ export class N8NDocumentationMCPServer {
|
||||
this.repository = null;
|
||||
this.templateService = null;
|
||||
this.earlyLogger = null;
|
||||
this.sharedDbState = null;
|
||||
} catch (error) {
|
||||
// Log but don't throw - cleanup should be best-effort
|
||||
logger.warn('Error closing MCP server', { error: error instanceof Error ? error.message : String(error) });
|
||||
@@ -286,23 +312,32 @@ export class N8NDocumentationMCPServer {
|
||||
|
||||
logger.debug('Database initialization starting...', { dbPath });
|
||||
|
||||
this.db = await createDatabaseAdapter(dbPath);
|
||||
logger.debug('Database adapter created');
|
||||
|
||||
// If using in-memory database for tests, initialize schema
|
||||
// For in-memory databases (tests), create a dedicated connection
|
||||
// For regular databases, use the shared connection to prevent memory leaks
|
||||
if (dbPath === ':memory:') {
|
||||
this.db = await createDatabaseAdapter(dbPath);
|
||||
logger.debug('Database adapter created (in-memory mode)');
|
||||
await this.initializeInMemorySchema();
|
||||
logger.debug('In-memory schema initialized');
|
||||
this.repository = new NodeRepository(this.db);
|
||||
this.templateService = new TemplateService(this.db);
|
||||
// Initialize similarity services for enhanced validation
|
||||
EnhancedConfigValidator.initializeSimilarityServices(this.repository);
|
||||
this.useSharedDatabase = false;
|
||||
} else {
|
||||
// Use shared database connection to prevent ~900MB memory leak per session
|
||||
// See: Memory leak fix - database was being duplicated per session
|
||||
const sharedState = await getSharedDatabase(dbPath);
|
||||
this.db = sharedState.db;
|
||||
this.repository = sharedState.repository;
|
||||
this.templateService = sharedState.templateService;
|
||||
this.sharedDbState = sharedState;
|
||||
this.useSharedDatabase = true;
|
||||
logger.debug('Using shared database connection');
|
||||
}
|
||||
|
||||
this.repository = new NodeRepository(this.db);
|
||||
logger.debug('Node repository initialized');
|
||||
|
||||
this.templateService = new TemplateService(this.db);
|
||||
logger.debug('Template service initialized');
|
||||
|
||||
// Initialize similarity services for enhanced validation
|
||||
EnhancedConfigValidator.initializeSimilarityServices(this.repository);
|
||||
logger.debug('Similarity services initialized');
|
||||
|
||||
// Checkpoint: Database connected (v2.18.3)
|
||||
@@ -3910,8 +3945,33 @@ Full documentation is being prepared. For now, use get_node_essentials for confi
|
||||
}
|
||||
|
||||
async shutdown(): Promise<void> {
|
||||
// Prevent double-shutdown
|
||||
if (this.isShutdown) {
|
||||
logger.debug('Shutdown already called, skipping');
|
||||
return;
|
||||
}
|
||||
this.isShutdown = true;
|
||||
|
||||
logger.info('Shutting down MCP server...');
|
||||
|
||||
|
||||
// Wait for initialization to complete (or fail) before cleanup
|
||||
// This prevents race conditions where shutdown runs while init is in progress
|
||||
try {
|
||||
await this.initialized;
|
||||
} catch (error) {
|
||||
// Initialization failed - that's OK, we still need to clean up
|
||||
logger.debug('Initialization had failed, proceeding with cleanup', {
|
||||
error: error instanceof Error ? error.message : String(error)
|
||||
});
|
||||
}
|
||||
|
||||
// Close MCP server connection (for consistency with close() method)
|
||||
try {
|
||||
await this.server.close();
|
||||
} catch (error) {
|
||||
logger.error('Error closing MCP server:', error);
|
||||
}
|
||||
|
||||
// Clean up cache timers to prevent memory leaks
|
||||
if (this.cache) {
|
||||
try {
|
||||
@@ -3921,15 +3981,31 @@ Full documentation is being prepared. For now, use get_node_essentials for confi
|
||||
logger.error('Error cleaning up cache:', error);
|
||||
}
|
||||
}
|
||||
|
||||
// Close database connection if it exists
|
||||
if (this.db) {
|
||||
|
||||
// Handle database cleanup based on whether it's shared or dedicated
|
||||
// For shared databases, we only release the reference (decrement refCount)
|
||||
// For dedicated databases (in-memory for tests), we close the connection
|
||||
if (this.useSharedDatabase && this.sharedDbState) {
|
||||
try {
|
||||
await this.db.close();
|
||||
releaseSharedDatabase(this.sharedDbState);
|
||||
logger.info('Released shared database reference');
|
||||
} catch (error) {
|
||||
logger.error('Error releasing shared database:', error);
|
||||
}
|
||||
} else if (this.db) {
|
||||
try {
|
||||
this.db.close();
|
||||
logger.info('Database connection closed');
|
||||
} catch (error) {
|
||||
logger.error('Error closing database:', error);
|
||||
}
|
||||
}
|
||||
|
||||
// Null out references to help garbage collection
|
||||
this.db = null;
|
||||
this.repository = null;
|
||||
this.templateService = null;
|
||||
this.earlyLogger = null;
|
||||
this.sharedDbState = null;
|
||||
}
|
||||
}
|
||||
@@ -58,6 +58,13 @@ export class TelemetryBatchProcessor {
|
||||
private flushTimes: number[] = [];
|
||||
private deadLetterQueue: (TelemetryEvent | WorkflowTelemetry | WorkflowMutationRecord)[] = [];
|
||||
private readonly maxDeadLetterSize = 100;
|
||||
// Track event listeners for proper cleanup to prevent memory leaks
|
||||
private eventListeners: {
|
||||
beforeExit?: () => void;
|
||||
sigint?: () => void;
|
||||
sigterm?: () => void;
|
||||
} = {};
|
||||
private started: boolean = false;
|
||||
|
||||
constructor(
|
||||
private supabase: SupabaseClient | null,
|
||||
@@ -72,6 +79,12 @@ export class TelemetryBatchProcessor {
|
||||
start(): void {
|
||||
if (!this.isEnabled() || !this.supabase) return;
|
||||
|
||||
// Guard against multiple starts (prevents event listener accumulation)
|
||||
if (this.started) {
|
||||
logger.debug('Telemetry batch processor already started, skipping');
|
||||
return;
|
||||
}
|
||||
|
||||
// Set up periodic flushing
|
||||
this.flushTimer = setInterval(() => {
|
||||
this.flush();
|
||||
@@ -83,17 +96,22 @@ export class TelemetryBatchProcessor {
|
||||
this.flushTimer.unref();
|
||||
}
|
||||
|
||||
// Set up process exit handlers
|
||||
process.on('beforeExit', () => this.flush());
|
||||
process.on('SIGINT', () => {
|
||||
// Set up process exit handlers with stored references for cleanup
|
||||
this.eventListeners.beforeExit = () => this.flush();
|
||||
this.eventListeners.sigint = () => {
|
||||
this.flush();
|
||||
process.exit(0);
|
||||
});
|
||||
process.on('SIGTERM', () => {
|
||||
};
|
||||
this.eventListeners.sigterm = () => {
|
||||
this.flush();
|
||||
process.exit(0);
|
||||
});
|
||||
};
|
||||
|
||||
process.on('beforeExit', this.eventListeners.beforeExit);
|
||||
process.on('SIGINT', this.eventListeners.sigint);
|
||||
process.on('SIGTERM', this.eventListeners.sigterm);
|
||||
|
||||
this.started = true;
|
||||
logger.debug('Telemetry batch processor started');
|
||||
}
|
||||
|
||||
@@ -105,6 +123,20 @@ export class TelemetryBatchProcessor {
|
||||
clearInterval(this.flushTimer);
|
||||
this.flushTimer = undefined;
|
||||
}
|
||||
|
||||
// Remove event listeners to prevent memory leaks
|
||||
if (this.eventListeners.beforeExit) {
|
||||
process.removeListener('beforeExit', this.eventListeners.beforeExit);
|
||||
}
|
||||
if (this.eventListeners.sigint) {
|
||||
process.removeListener('SIGINT', this.eventListeners.sigint);
|
||||
}
|
||||
if (this.eventListeners.sigterm) {
|
||||
process.removeListener('SIGTERM', this.eventListeners.sigterm);
|
||||
}
|
||||
this.eventListeners = {};
|
||||
this.started = false;
|
||||
|
||||
logger.debug('Telemetry batch processor stopped');
|
||||
}
|
||||
|
||||
|
||||
302
tests/unit/database/shared-database.test.ts
Normal file
302
tests/unit/database/shared-database.test.ts
Normal file
@@ -0,0 +1,302 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
|
||||
// Mock dependencies at module level
|
||||
const mockDb = {
|
||||
prepare: vi.fn().mockReturnValue({
|
||||
get: vi.fn(),
|
||||
all: vi.fn(),
|
||||
run: vi.fn()
|
||||
}),
|
||||
exec: vi.fn(),
|
||||
close: vi.fn(),
|
||||
pragma: vi.fn(),
|
||||
inTransaction: false,
|
||||
transaction: vi.fn(),
|
||||
checkFTS5Support: vi.fn()
|
||||
};
|
||||
|
||||
vi.mock('../../../src/database/database-adapter', () => ({
|
||||
createDatabaseAdapter: vi.fn().mockResolvedValue(mockDb)
|
||||
}));
|
||||
|
||||
vi.mock('../../../src/database/node-repository', () => ({
|
||||
NodeRepository: vi.fn().mockImplementation(() => ({
|
||||
getNodeTypes: vi.fn().mockReturnValue([])
|
||||
}))
|
||||
}));
|
||||
|
||||
vi.mock('../../../src/templates/template-service', () => ({
|
||||
TemplateService: vi.fn().mockImplementation(() => ({}))
|
||||
}));
|
||||
|
||||
vi.mock('../../../src/services/enhanced-config-validator', () => ({
|
||||
EnhancedConfigValidator: {
|
||||
initializeSimilarityServices: vi.fn()
|
||||
}
|
||||
}));
|
||||
|
||||
vi.mock('../../../src/utils/logger', () => ({
|
||||
logger: {
|
||||
debug: vi.fn(),
|
||||
info: vi.fn(),
|
||||
warn: vi.fn(),
|
||||
error: vi.fn()
|
||||
}
|
||||
}));
|
||||
|
||||
describe('Shared Database Module', () => {
|
||||
let sharedDbModule: typeof import('../../../src/database/shared-database');
|
||||
let createDatabaseAdapter: ReturnType<typeof vi.fn>;
|
||||
|
||||
beforeEach(async () => {
|
||||
// Reset all mocks
|
||||
vi.clearAllMocks();
|
||||
mockDb.close.mockReset();
|
||||
|
||||
// Reset modules to get fresh state
|
||||
vi.resetModules();
|
||||
|
||||
// Import fresh module
|
||||
sharedDbModule = await import('../../../src/database/shared-database');
|
||||
|
||||
// Get the mocked function
|
||||
const adapterModule = await import('../../../src/database/database-adapter');
|
||||
createDatabaseAdapter = adapterModule.createDatabaseAdapter as ReturnType<typeof vi.fn>;
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
// Clean up any shared state by closing
|
||||
try {
|
||||
await sharedDbModule.closeSharedDatabase();
|
||||
} catch {
|
||||
// Ignore errors during cleanup
|
||||
}
|
||||
});
|
||||
|
||||
describe('getSharedDatabase', () => {
|
||||
it('should initialize database on first call', async () => {
|
||||
const state = await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
|
||||
expect(state).toBeDefined();
|
||||
expect(state.db).toBe(mockDb);
|
||||
expect(state.dbPath).toBe('/path/to/db');
|
||||
expect(state.refCount).toBe(1);
|
||||
expect(state.initialized).toBe(true);
|
||||
expect(createDatabaseAdapter).toHaveBeenCalledWith('/path/to/db');
|
||||
});
|
||||
|
||||
it('should reuse existing connection and increment refCount', async () => {
|
||||
// First call initializes
|
||||
const state1 = await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
expect(state1.refCount).toBe(1);
|
||||
|
||||
// Second call reuses
|
||||
const state2 = await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
expect(state2.refCount).toBe(2);
|
||||
|
||||
// Same object
|
||||
expect(state1).toBe(state2);
|
||||
|
||||
// Only initialized once
|
||||
expect(createDatabaseAdapter).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should throw error when called with different path', async () => {
|
||||
await sharedDbModule.getSharedDatabase('/path/to/db1');
|
||||
|
||||
await expect(sharedDbModule.getSharedDatabase('/path/to/db2'))
|
||||
.rejects.toThrow('Shared database already initialized with different path');
|
||||
});
|
||||
|
||||
it('should handle concurrent initialization requests', async () => {
|
||||
// Start two requests concurrently
|
||||
const [state1, state2] = await Promise.all([
|
||||
sharedDbModule.getSharedDatabase('/path/to/db'),
|
||||
sharedDbModule.getSharedDatabase('/path/to/db')
|
||||
]);
|
||||
|
||||
// Both should get the same state
|
||||
expect(state1).toBe(state2);
|
||||
|
||||
// RefCount should be 2 (one for each call)
|
||||
expect(state1.refCount).toBe(2);
|
||||
|
||||
// Only one actual initialization
|
||||
expect(createDatabaseAdapter).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should handle initialization failure', async () => {
|
||||
createDatabaseAdapter.mockRejectedValueOnce(new Error('DB error'));
|
||||
|
||||
await expect(sharedDbModule.getSharedDatabase('/path/to/db'))
|
||||
.rejects.toThrow('DB error');
|
||||
|
||||
// After failure, should not be initialized
|
||||
expect(sharedDbModule.isSharedDatabaseInitialized()).toBe(false);
|
||||
});
|
||||
|
||||
it('should allow retry after initialization failure', async () => {
|
||||
// First call fails
|
||||
createDatabaseAdapter.mockRejectedValueOnce(new Error('DB error'));
|
||||
await expect(sharedDbModule.getSharedDatabase('/path/to/db'))
|
||||
.rejects.toThrow('DB error');
|
||||
|
||||
// Reset mock for successful call
|
||||
createDatabaseAdapter.mockResolvedValueOnce(mockDb);
|
||||
|
||||
// Second call succeeds
|
||||
const state = await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
|
||||
expect(state).toBeDefined();
|
||||
expect(state.initialized).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('releaseSharedDatabase', () => {
|
||||
it('should decrement refCount', async () => {
|
||||
const state = await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
expect(state.refCount).toBe(1);
|
||||
|
||||
sharedDbModule.releaseSharedDatabase(state);
|
||||
expect(sharedDbModule.getSharedDatabaseRefCount()).toBe(0);
|
||||
});
|
||||
|
||||
it('should not decrement below 0', async () => {
|
||||
const state = await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
|
||||
// Release once (refCount: 1 -> 0)
|
||||
sharedDbModule.releaseSharedDatabase(state);
|
||||
expect(sharedDbModule.getSharedDatabaseRefCount()).toBe(0);
|
||||
|
||||
// Release again (should stay at 0, not go negative)
|
||||
sharedDbModule.releaseSharedDatabase(state);
|
||||
expect(sharedDbModule.getSharedDatabaseRefCount()).toBe(0);
|
||||
});
|
||||
|
||||
it('should handle null state gracefully', () => {
|
||||
// Should not throw
|
||||
sharedDbModule.releaseSharedDatabase(null as any);
|
||||
});
|
||||
|
||||
it('should not close database when refCount hits 0', async () => {
|
||||
const state = await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
sharedDbModule.releaseSharedDatabase(state);
|
||||
|
||||
expect(sharedDbModule.getSharedDatabaseRefCount()).toBe(0);
|
||||
expect(mockDb.close).not.toHaveBeenCalled();
|
||||
|
||||
// Database should still be accessible
|
||||
expect(sharedDbModule.isSharedDatabaseInitialized()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('closeSharedDatabase', () => {
|
||||
it('should close database and clear state', async () => {
|
||||
// Get state
|
||||
await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
expect(sharedDbModule.isSharedDatabaseInitialized()).toBe(true);
|
||||
expect(sharedDbModule.getSharedDatabaseRefCount()).toBe(1);
|
||||
|
||||
await sharedDbModule.closeSharedDatabase();
|
||||
|
||||
// State should be cleared
|
||||
expect(sharedDbModule.isSharedDatabaseInitialized()).toBe(false);
|
||||
expect(sharedDbModule.getSharedDatabaseRefCount()).toBe(0);
|
||||
});
|
||||
|
||||
it('should handle close error gracefully', async () => {
|
||||
await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
mockDb.close.mockImplementationOnce(() => {
|
||||
throw new Error('Close error');
|
||||
});
|
||||
|
||||
// Should not throw
|
||||
await sharedDbModule.closeSharedDatabase();
|
||||
|
||||
// State should still be cleared
|
||||
expect(sharedDbModule.isSharedDatabaseInitialized()).toBe(false);
|
||||
});
|
||||
|
||||
it('should be idempotent when already closed', async () => {
|
||||
// Close without ever initializing
|
||||
await sharedDbModule.closeSharedDatabase();
|
||||
|
||||
// Should not throw
|
||||
await sharedDbModule.closeSharedDatabase();
|
||||
});
|
||||
|
||||
it('should allow re-initialization after close', async () => {
|
||||
// Initialize
|
||||
const state1 = await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
expect(state1.refCount).toBe(1);
|
||||
|
||||
// Close
|
||||
await sharedDbModule.closeSharedDatabase();
|
||||
expect(sharedDbModule.isSharedDatabaseInitialized()).toBe(false);
|
||||
|
||||
// Re-initialize
|
||||
const state2 = await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
expect(state2.refCount).toBe(1);
|
||||
expect(sharedDbModule.isSharedDatabaseInitialized()).toBe(true);
|
||||
|
||||
// Should be a new state object
|
||||
expect(state1).not.toBe(state2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('isSharedDatabaseInitialized', () => {
|
||||
it('should return false before initialization', () => {
|
||||
expect(sharedDbModule.isSharedDatabaseInitialized()).toBe(false);
|
||||
});
|
||||
|
||||
it('should return true after initialization', async () => {
|
||||
await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
expect(sharedDbModule.isSharedDatabaseInitialized()).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false after close', async () => {
|
||||
await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
await sharedDbModule.closeSharedDatabase();
|
||||
expect(sharedDbModule.isSharedDatabaseInitialized()).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getSharedDatabaseRefCount', () => {
|
||||
it('should return 0 before initialization', () => {
|
||||
expect(sharedDbModule.getSharedDatabaseRefCount()).toBe(0);
|
||||
});
|
||||
|
||||
it('should return correct refCount after multiple operations', async () => {
|
||||
const state = await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
expect(sharedDbModule.getSharedDatabaseRefCount()).toBe(1);
|
||||
|
||||
await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
expect(sharedDbModule.getSharedDatabaseRefCount()).toBe(2);
|
||||
|
||||
await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
expect(sharedDbModule.getSharedDatabaseRefCount()).toBe(3);
|
||||
|
||||
sharedDbModule.releaseSharedDatabase(state);
|
||||
expect(sharedDbModule.getSharedDatabaseRefCount()).toBe(2);
|
||||
});
|
||||
|
||||
it('should return 0 after close', async () => {
|
||||
await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
await sharedDbModule.closeSharedDatabase();
|
||||
expect(sharedDbModule.getSharedDatabaseRefCount()).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('SharedDatabaseState interface', () => {
|
||||
it('should expose correct properties', async () => {
|
||||
const state = await sharedDbModule.getSharedDatabase('/path/to/db');
|
||||
|
||||
expect(state).toHaveProperty('db');
|
||||
expect(state).toHaveProperty('repository');
|
||||
expect(state).toHaveProperty('templateService');
|
||||
expect(state).toHaveProperty('dbPath');
|
||||
expect(state).toHaveProperty('refCount');
|
||||
expect(state).toHaveProperty('initialized');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -333,13 +333,14 @@ describe('HTTP Server Session Management', () => {
|
||||
server = new SingleSessionHTTPServer();
|
||||
|
||||
// Mock expired sessions
|
||||
// Note: Default session timeout is 5 minutes (configurable via SESSION_TIMEOUT_MINUTES)
|
||||
const mockSessionMetadata = {
|
||||
'session-1': {
|
||||
lastAccess: new Date(Date.now() - 40 * 60 * 1000), // 40 minutes ago (expired)
|
||||
'session-1': {
|
||||
lastAccess: new Date(Date.now() - 10 * 60 * 1000), // 10 minutes ago (expired with 5 min timeout)
|
||||
createdAt: new Date(Date.now() - 60 * 60 * 1000)
|
||||
},
|
||||
'session-2': {
|
||||
lastAccess: new Date(Date.now() - 10 * 60 * 1000), // 10 minutes ago (not expired)
|
||||
'session-2': {
|
||||
lastAccess: new Date(Date.now() - 2 * 60 * 1000), // 2 minutes ago (not expired with 5 min timeout)
|
||||
createdAt: new Date(Date.now() - 20 * 60 * 1000)
|
||||
}
|
||||
};
|
||||
@@ -514,15 +515,16 @@ describe('HTTP Server Session Management', () => {
|
||||
|
||||
it('should get session metrics correctly', async () => {
|
||||
server = new SingleSessionHTTPServer();
|
||||
|
||||
|
||||
// Note: Default session timeout is 5 minutes (configurable via SESSION_TIMEOUT_MINUTES)
|
||||
const now = Date.now();
|
||||
(server as any).sessionMetadata = {
|
||||
'active-session': {
|
||||
lastAccess: new Date(now - 10 * 60 * 1000), // 10 minutes ago
|
||||
lastAccess: new Date(now - 2 * 60 * 1000), // 2 minutes ago (not expired with 5 min timeout)
|
||||
createdAt: new Date(now - 20 * 60 * 1000)
|
||||
},
|
||||
'expired-session': {
|
||||
lastAccess: new Date(now - 40 * 60 * 1000), // 40 minutes ago (expired)
|
||||
lastAccess: new Date(now - 10 * 60 * 1000), // 10 minutes ago (expired with 5 min timeout)
|
||||
createdAt: new Date(now - 60 * 60 * 1000)
|
||||
}
|
||||
};
|
||||
@@ -532,7 +534,7 @@ describe('HTTP Server Session Management', () => {
|
||||
};
|
||||
|
||||
const metrics = (server as any).getSessionMetrics();
|
||||
|
||||
|
||||
expect(metrics.totalSessions).toBe(2);
|
||||
expect(metrics.activeSessions).toBe(2);
|
||||
expect(metrics.expiredSessions).toBe(1);
|
||||
|
||||
Reference in New Issue
Block a user