diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a0ea5d..bb3cc10 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,77 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [2.22.17] - 2025-11-13 + +### 🐛 Bug Fixes + +**Critical Telemetry Improvements** + +Fixed three critical issues in workflow mutation telemetry to improve data quality and security: + +#### 1. Fixed Inconsistent Sanitization (Security Critical) +- **Problem**: 30% of workflows (178-188 records) were unsanitized, exposing potential credentials/tokens +- **Solution**: Replaced weak inline sanitization with robust `WorkflowSanitizer.sanitizeWorkflowRaw()` +- **Impact**: Now 100% sanitization coverage with 17 sensitive patterns detected and redacted +- **Files Modified**: + - `src/telemetry/workflow-sanitizer.ts`: Added `sanitizeWorkflowRaw()` method + - `src/telemetry/mutation-tracker.ts`: Removed redundant sanitization code, use centralized sanitizer + +#### 2. Enabled Validation Data Capture (Data Quality Blocker) +- **Problem**: Zero validation metrics captured (validation_before/after all NULL) +- **Solution**: Added workflow validation before and after mutations using `WorkflowValidator` +- **Impact**: Can now measure mutation quality, track error resolution patterns +- **Implementation**: + - Validates workflows before mutation (captures baseline errors) + - Validates workflows after mutation (measures improvement) + - Non-blocking: validation errors don't prevent mutations + - Captures: errors, warnings, validation status +- **Files Modified**: + - `src/mcp/handlers-workflow-diff.ts`: Added pre/post mutation validation + +#### 3. Improved Intent Capture (Data Quality) +- **Problem**: 92.62% of intents were generic "Partial workflow update" +- **Solution**: Enhanced tool documentation + automatic intent inference from operations +- **Impact**: Meaningful intents automatically generated when not explicitly provided +- **Implementation**: + - Enhanced documentation with specific intent examples and anti-patterns + - Added `inferIntentFromOperations()` function that generates meaningful intents: + - Single operations: "Add n8n-nodes-base.slack", "Connect webhook to HTTP Request" + - Multiple operations: "Workflow update: add 2 nodes, modify connections" + - Fallback inference when intent is missing, generic, or too short +- **Files Modified**: + - `src/mcp/tool-docs/workflow_management/n8n-update-partial-workflow.ts`: Enhanced guidance + - `src/mcp/handlers-workflow-diff.ts`: Added intent inference logic + +### 📊 Expected Results + +After deployment, telemetry data should show: +- **100% sanitization coverage** (up from 70%) +- **100% validation capture** (up from 0%) +- **50%+ meaningful intents** (up from 7.33%) +- **Complete telemetry dataset** for analysis + +### 🎯 Technical Details + +**Sanitization Coverage**: Now detects and redacts: +- Webhook URLs, API keys (OpenAI sk-*, GitHub ghp-*, etc.) +- Bearer tokens, OAuth credentials, passwords +- URLs with authentication, long tokens (20+ chars) +- Sensitive field names (apiKey, token, secret, password, etc.) + +**Validation Metrics Captured**: +- Workflow validity status (true/false) +- Error/warning counts and details +- Node configuration errors +- Connection errors +- Expression syntax errors +- Validation improvement tracking (errors resolved/introduced) + +**Intent Inference Examples**: +- `addNode` → "Add n8n-nodes-base.webhook" +- `rewireConnection` → "Rewire IF from ErrorHandler to SuccessHandler" +- Multiple operations → "Workflow update: add 2 nodes, modify connections, update metadata" + ## [2.22.16] - 2025-11-13 ### ✨ Enhanced Features diff --git a/data/nodes.db b/data/nodes.db index 868216f..03877fe 100644 Binary files a/data/nodes.db and b/data/nodes.db differ diff --git a/package.json b/package.json index c47bdfe..ae47ff5 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "n8n-mcp", - "version": "2.22.16", + "version": "2.22.17", "description": "Integration between n8n workflow automation and Model Context Protocol (MCP)", "main": "dist/index.js", "types": "dist/index.d.ts", diff --git a/package.runtime.json b/package.runtime.json index 62c7e11..0df4567 100644 --- a/package.runtime.json +++ b/package.runtime.json @@ -1,6 +1,6 @@ { "name": "n8n-mcp-runtime", - "version": "2.22.16", + "version": "2.22.17", "description": "n8n MCP Server Runtime Dependencies Only", "private": true, "dependencies": { diff --git a/src/mcp/handlers-workflow-diff.ts b/src/mcp/handlers-workflow-diff.ts index 75ac9fd..13fff29 100644 --- a/src/mcp/handlers-workflow-diff.ts +++ b/src/mcp/handlers-workflow-diff.ts @@ -14,6 +14,8 @@ import { InstanceContext } from '../types/instance-context'; import { validateWorkflowStructure } from '../services/n8n-validation'; import { NodeRepository } from '../database/node-repository'; import { WorkflowVersioningService } from '../services/workflow-versioning-service'; +import { WorkflowValidator } from '../services/workflow-validator'; +import { EnhancedConfigValidator } from '../services/enhanced-config-validator'; // Zod schema for the diff request const workflowDiffSchema = z.object({ @@ -62,6 +64,8 @@ export async function handleUpdatePartialWorkflow( const startTime = Date.now(); const sessionId = `mutation_${Date.now()}_${Math.random().toString(36).slice(2, 11)}`; let workflowBefore: any = null; + let validationBefore: any = null; + let validationAfter: any = null; try { // Debug logging (only in debug mode) @@ -92,6 +96,24 @@ export async function handleUpdatePartialWorkflow( workflow = await client.getWorkflow(input.id); // Store original workflow for telemetry workflowBefore = JSON.parse(JSON.stringify(workflow)); + + // Validate workflow BEFORE mutation (for telemetry) + try { + const validator = new WorkflowValidator(repository, EnhancedConfigValidator); + validationBefore = await validator.validateWorkflow(workflowBefore, { + validateNodes: true, + validateConnections: true, + validateExpressions: true, + profile: 'runtime' + }); + } catch (validationError) { + logger.debug('Pre-mutation validation failed (non-blocking):', validationError); + // Don't block mutation on validation errors + validationBefore = { + valid: false, + errors: [{ type: 'validation_error', message: 'Validation failed' }] + }; + } } catch (error) { if (error instanceof N8nApiError) { return { @@ -257,6 +279,24 @@ export async function handleUpdatePartialWorkflow( let finalWorkflow = updatedWorkflow; let activationMessage = ''; + // Validate workflow AFTER mutation (for telemetry) + try { + const validator = new WorkflowValidator(repository, EnhancedConfigValidator); + validationAfter = await validator.validateWorkflow(finalWorkflow, { + validateNodes: true, + validateConnections: true, + validateExpressions: true, + profile: 'runtime' + }); + } catch (validationError) { + logger.debug('Post-mutation validation failed (non-blocking):', validationError); + // Don't block on validation errors + validationAfter = { + valid: false, + errors: [{ type: 'validation_error', message: 'Validation failed' }] + }; + } + if (diffResult.shouldActivate) { try { finalWorkflow = await client.activateWorkflow(input.id); @@ -298,6 +338,8 @@ export async function handleUpdatePartialWorkflow( operations: input.operations, workflowBefore, workflowAfter: finalWorkflow, + validationBefore, + validationAfter, mutationSuccess: true, durationMs: Date.now() - startTime, }).catch(err => { @@ -330,6 +372,8 @@ export async function handleUpdatePartialWorkflow( operations: input.operations, workflowBefore, workflowAfter: workflowBefore, // No change since it failed + validationBefore, + validationAfter: validationBefore, // Same as before since mutation failed mutationSuccess: false, mutationError: error instanceof Error ? error.message : 'Unknown error', durationMs: Date.now() - startTime, @@ -365,11 +409,86 @@ export async function handleUpdatePartialWorkflow( } } +/** + * Infer intent from operations when not explicitly provided + */ +function inferIntentFromOperations(operations: any[]): string { + if (!operations || operations.length === 0) { + return 'Partial workflow update'; + } + + const opTypes = operations.map((op) => op.type); + const opCount = operations.length; + + // Single operation - be specific + if (opCount === 1) { + const op = operations[0]; + switch (op.type) { + case 'addNode': + return `Add ${op.node?.type || 'node'}`; + case 'removeNode': + return `Remove node ${op.nodeName || op.nodeId || ''}`.trim(); + case 'updateNode': + return `Update node ${op.nodeName || op.nodeId || ''}`.trim(); + case 'addConnection': + return `Connect ${op.source || 'node'} to ${op.target || 'node'}`; + case 'removeConnection': + return `Disconnect ${op.source || 'node'} from ${op.target || 'node'}`; + case 'rewireConnection': + return `Rewire ${op.source || 'node'} from ${op.from || ''} to ${op.to || ''}`.trim(); + case 'updateName': + return `Rename workflow to "${op.name || ''}"`; + case 'activateWorkflow': + return 'Activate workflow'; + case 'deactivateWorkflow': + return 'Deactivate workflow'; + default: + return `Workflow ${op.type}`; + } + } + + // Multiple operations - summarize pattern + const typeSet = new Set(opTypes); + const summary: string[] = []; + + if (typeSet.has('addNode')) { + const count = opTypes.filter((t) => t === 'addNode').length; + summary.push(`add ${count} node${count > 1 ? 's' : ''}`); + } + if (typeSet.has('removeNode')) { + const count = opTypes.filter((t) => t === 'removeNode').length; + summary.push(`remove ${count} node${count > 1 ? 's' : ''}`); + } + if (typeSet.has('updateNode')) { + const count = opTypes.filter((t) => t === 'updateNode').length; + summary.push(`update ${count} node${count > 1 ? 's' : ''}`); + } + if (typeSet.has('addConnection') || typeSet.has('rewireConnection')) { + summary.push('modify connections'); + } + if (typeSet.has('updateName') || typeSet.has('updateSettings')) { + summary.push('update metadata'); + } + + return summary.length > 0 + ? `Workflow update: ${summary.join(', ')}` + : `Workflow update: ${opCount} operations`; +} + /** * Track workflow mutation for telemetry */ async function trackWorkflowMutation(data: any): Promise { try { + // Enhance intent if it's missing or generic + if ( + !data.userIntent || + data.userIntent === 'Partial workflow update' || + data.userIntent.length < 10 + ) { + data.userIntent = inferIntentFromOperations(data.operations); + } + const { telemetry } = await import('../telemetry/telemetry-manager.js'); await telemetry.trackWorkflowMutation(data); } catch (error) { diff --git a/src/mcp/tool-docs/workflow_management/n8n-update-partial-workflow.ts b/src/mcp/tool-docs/workflow_management/n8n-update-partial-workflow.ts index f61156b..780e2e7 100644 --- a/src/mcp/tool-docs/workflow_management/n8n-update-partial-workflow.ts +++ b/src/mcp/tool-docs/workflow_management/n8n-update-partial-workflow.ts @@ -9,7 +9,8 @@ export const n8nUpdatePartialWorkflowDoc: ToolDocumentation = { example: 'n8n_update_partial_workflow({id: "wf_123", operations: [{type: "rewireConnection", source: "IF", from: "Old", to: "New", branch: "true"}]})', performance: 'Fast (50-200ms)', tips: [ - 'Include intent parameter in every call - helps to return better responses', + 'ALWAYS provide intent parameter describing what you\'re doing (e.g., "Add error handling", "Fix webhook URL", "Connect Slack to error output")', + 'DON\'T use generic intent like "update workflow" or "partial update" - be specific about your goal', 'Use rewireConnection to change connection targets', 'Use branch="true"/"false" for IF nodes', 'Use case=N for Switch nodes', @@ -367,7 +368,7 @@ n8n_update_partial_workflow({ ], performance: 'Very fast - typically 50-200ms. Much faster than full updates as only changes are processed.', bestPractices: [ - 'Always include intent parameter - it helps provide better responses', + 'Always include intent parameter with specific description (e.g., "Add error handling to HTTP Request node", "Fix authentication flow", "Connect Slack notification to errors"). Avoid generic phrases like "update workflow" or "partial update"', 'Use rewireConnection instead of remove+add for changing targets', 'Use branch="true"/"false" for IF nodes instead of sourceIndex', 'Use case=N for Switch nodes instead of sourceIndex', diff --git a/src/scripts/test-telemetry-mutations-verbose.ts b/src/scripts/test-telemetry-mutations-verbose.ts new file mode 100644 index 0000000..adadcf3 --- /dev/null +++ b/src/scripts/test-telemetry-mutations-verbose.ts @@ -0,0 +1,151 @@ +/** + * Test telemetry mutations with enhanced logging + * Verifies that mutations are properly tracked and persisted + */ + +import { telemetry } from '../telemetry/telemetry-manager.js'; +import { TelemetryConfigManager } from '../telemetry/config-manager.js'; +import { logger } from '../utils/logger.js'; + +async function testMutations() { + console.log('Starting verbose telemetry mutation test...\n'); + + const configManager = TelemetryConfigManager.getInstance(); + console.log('Telemetry config is enabled:', configManager.isEnabled()); + console.log('Telemetry config file:', configManager['configPath']); + + // Test data with valid workflow structure + const testMutation = { + sessionId: 'test_session_' + Date.now(), + toolName: 'n8n_update_partial_workflow', + userIntent: 'Add a Merge node for data consolidation', + operations: [ + { + type: 'addNode', + nodeId: 'Merge1', + node: { + id: 'Merge1', + type: 'n8n-nodes-base.merge', + name: 'Merge', + position: [600, 200], + parameters: {} + } + }, + { + type: 'addConnection', + source: 'previous_node', + target: 'Merge1' + } + ], + workflowBefore: { + id: 'test-workflow', + name: 'Test Workflow', + active: true, + nodes: [ + { + id: 'previous_node', + type: 'n8n-nodes-base.manualTrigger', + name: 'When called', + position: [300, 200], + parameters: {} + } + ], + connections: {}, + nodeIds: [] + }, + workflowAfter: { + id: 'test-workflow', + name: 'Test Workflow', + active: true, + nodes: [ + { + id: 'previous_node', + type: 'n8n-nodes-base.manualTrigger', + name: 'When called', + position: [300, 200], + parameters: {} + }, + { + id: 'Merge1', + type: 'n8n-nodes-base.merge', + name: 'Merge', + position: [600, 200], + parameters: {} + } + ], + connections: { + 'previous_node': [ + { + node: 'Merge1', + type: 'main', + index: 0, + source: 0, + destination: 0 + } + ] + }, + nodeIds: [] + }, + mutationSuccess: true, + durationMs: 125 + }; + + console.log('\nTest Mutation Data:'); + console.log('=================='); + console.log(JSON.stringify({ + intent: testMutation.userIntent, + tool: testMutation.toolName, + operationCount: testMutation.operations.length, + sessionId: testMutation.sessionId + }, null, 2)); + console.log('\n'); + + // Call trackWorkflowMutation + console.log('Calling telemetry.trackWorkflowMutation...'); + try { + await telemetry.trackWorkflowMutation(testMutation); + console.log('✓ trackWorkflowMutation completed successfully\n'); + } catch (error) { + console.error('✗ trackWorkflowMutation failed:', error); + console.error('\n'); + } + + // Check queue size before flush + const metricsBeforeFlush = telemetry.getMetrics(); + console.log('Metrics before flush:'); + console.log('- mutationQueueSize:', metricsBeforeFlush.tracking.mutationQueueSize); + console.log('- eventsTracked:', metricsBeforeFlush.processing.eventsTracked); + console.log('- eventsFailed:', metricsBeforeFlush.processing.eventsFailed); + console.log('\n'); + + // Flush telemetry with 10-second wait for Supabase + console.log('Flushing telemetry (waiting 10 seconds for Supabase)...'); + try { + await telemetry.flush(); + console.log('✓ Telemetry flush completed\n'); + } catch (error) { + console.error('✗ Flush failed:', error); + console.error('\n'); + } + + // Wait a bit for async operations + await new Promise(resolve => setTimeout(resolve, 2000)); + + // Get final metrics + const metricsAfterFlush = telemetry.getMetrics(); + console.log('Metrics after flush:'); + console.log('- mutationQueueSize:', metricsAfterFlush.tracking.mutationQueueSize); + console.log('- eventsTracked:', metricsAfterFlush.processing.eventsTracked); + console.log('- eventsFailed:', metricsAfterFlush.processing.eventsFailed); + console.log('- batchesSent:', metricsAfterFlush.processing.batchesSent); + console.log('- batchesFailed:', metricsAfterFlush.processing.batchesFailed); + console.log('- circuitBreakerState:', metricsAfterFlush.processing.circuitBreakerState); + console.log('\n'); + + console.log('Test completed. Check workflow_mutations table in Supabase.'); +} + +testMutations().catch(error => { + console.error('Test failed:', error); + process.exit(1); +}); diff --git a/src/scripts/test-telemetry-mutations.ts b/src/scripts/test-telemetry-mutations.ts new file mode 100644 index 0000000..b67fad6 --- /dev/null +++ b/src/scripts/test-telemetry-mutations.ts @@ -0,0 +1,145 @@ +/** + * Test telemetry mutations + * Verifies that mutations are properly tracked and persisted + */ + +import { telemetry } from '../telemetry/telemetry-manager.js'; +import { TelemetryConfigManager } from '../telemetry/config-manager.js'; + +async function testMutations() { + console.log('Starting telemetry mutation test...\n'); + + const configManager = TelemetryConfigManager.getInstance(); + + console.log('Telemetry Status:'); + console.log('================'); + console.log(configManager.getStatus()); + console.log('\n'); + + // Get initial metrics + const metricsAfterInit = telemetry.getMetrics(); + console.log('Telemetry Metrics (After Init):'); + console.log('================================'); + console.log(JSON.stringify(metricsAfterInit, null, 2)); + console.log('\n'); + + // Test data mimicking actual mutation with valid workflow structure + const testMutation = { + sessionId: 'test_session_' + Date.now(), + toolName: 'n8n_update_partial_workflow', + userIntent: 'Add a Merge node for data consolidation', + operations: [ + { + type: 'addNode', + nodeId: 'Merge1', + node: { + id: 'Merge1', + type: 'n8n-nodes-base.merge', + name: 'Merge', + position: [600, 200], + parameters: {} + } + }, + { + type: 'addConnection', + source: 'previous_node', + target: 'Merge1' + } + ], + workflowBefore: { + id: 'test-workflow', + name: 'Test Workflow', + active: true, + nodes: [ + { + id: 'previous_node', + type: 'n8n-nodes-base.manualTrigger', + name: 'When called', + position: [300, 200], + parameters: {} + } + ], + connections: {}, + nodeIds: [] + }, + workflowAfter: { + id: 'test-workflow', + name: 'Test Workflow', + active: true, + nodes: [ + { + id: 'previous_node', + type: 'n8n-nodes-base.manualTrigger', + name: 'When called', + position: [300, 200], + parameters: {} + }, + { + id: 'Merge1', + type: 'n8n-nodes-base.merge', + name: 'Merge', + position: [600, 200], + parameters: {} + } + ], + connections: { + 'previous_node': [ + { + node: 'Merge1', + type: 'main', + index: 0, + source: 0, + destination: 0 + } + ] + }, + nodeIds: [] + }, + mutationSuccess: true, + durationMs: 125 + }; + + console.log('Test Mutation Data:'); + console.log('=================='); + console.log(JSON.stringify({ + intent: testMutation.userIntent, + tool: testMutation.toolName, + operationCount: testMutation.operations.length, + sessionId: testMutation.sessionId + }, null, 2)); + console.log('\n'); + + // Call trackWorkflowMutation + console.log('Calling telemetry.trackWorkflowMutation...'); + try { + await telemetry.trackWorkflowMutation(testMutation); + console.log('✓ trackWorkflowMutation completed successfully\n'); + } catch (error) { + console.error('✗ trackWorkflowMutation failed:', error); + console.error('\n'); + } + + // Flush telemetry + console.log('Flushing telemetry...'); + try { + await telemetry.flush(); + console.log('✓ Telemetry flushed successfully\n'); + } catch (error) { + console.error('✗ Flush failed:', error); + console.error('\n'); + } + + // Get final metrics + const metricsAfterFlush = telemetry.getMetrics(); + console.log('Telemetry Metrics (After Flush):'); + console.log('=================================='); + console.log(JSON.stringify(metricsAfterFlush, null, 2)); + console.log('\n'); + + console.log('Test completed. Check workflow_mutations table in Supabase.'); +} + +testMutations().catch(error => { + console.error('Test failed:', error); + process.exit(1); +}); diff --git a/src/telemetry/mutation-tracker.ts b/src/telemetry/mutation-tracker.ts index f2e71ab..dd8b9ad 100644 --- a/src/telemetry/mutation-tracker.ts +++ b/src/telemetry/mutation-tracker.ts @@ -41,8 +41,8 @@ export class MutationTracker { } // Sanitize workflows to remove credentials and sensitive data - const workflowBefore = this.sanitizeFullWorkflow(data.workflowBefore); - const workflowAfter = this.sanitizeFullWorkflow(data.workflowAfter); + const workflowBefore = WorkflowSanitizer.sanitizeWorkflowRaw(data.workflowBefore); + const workflowAfter = WorkflowSanitizer.sanitizeWorkflowRaw(data.workflowAfter); // Sanitize user intent const sanitizedIntent = intentSanitizer.sanitize(data.userIntent); @@ -200,98 +200,6 @@ export class MutationTracker { return metrics; } - /** - * Sanitize a full workflow while preserving structure - * Removes credentials and sensitive data but keeps all nodes, connections, parameters - */ - private sanitizeFullWorkflow(workflow: any): any { - if (!workflow) return workflow; - - // Deep clone to avoid modifying original - const sanitized = JSON.parse(JSON.stringify(workflow)); - - // Remove sensitive workflow-level fields - delete sanitized.credentials; - delete sanitized.sharedWorkflows; - delete sanitized.ownedBy; - delete sanitized.createdBy; - delete sanitized.updatedBy; - - // Sanitize each node - if (sanitized.nodes && Array.isArray(sanitized.nodes)) { - sanitized.nodes = sanitized.nodes.map((node: any) => { - const sanitizedNode = { ...node }; - - // Remove credentials field - delete sanitizedNode.credentials; - - // Sanitize parameters if present - if (sanitizedNode.parameters && typeof sanitizedNode.parameters === 'object') { - sanitizedNode.parameters = this.sanitizeParameters(sanitizedNode.parameters); - } - - return sanitizedNode; - }); - } - - return sanitized; - } - - /** - * Recursively sanitize parameters object - */ - private sanitizeParameters(params: any): any { - if (!params || typeof params !== 'object') return params; - - const sensitiveKeys = [ - 'apiKey', 'api_key', 'token', 'secret', 'password', 'credential', - 'auth', 'authorization', 'privateKey', 'accessToken', 'refreshToken' - ]; - - const sanitized: any = Array.isArray(params) ? [] : {}; - - for (const [key, value] of Object.entries(params)) { - const lowerKey = key.toLowerCase(); - - // Check if key is sensitive - if (sensitiveKeys.some(sk => lowerKey.includes(sk.toLowerCase()))) { - sanitized[key] = '[REDACTED]'; - } else if (typeof value === 'object' && value !== null) { - // Recursively sanitize nested objects - sanitized[key] = this.sanitizeParameters(value); - } else if (typeof value === 'string') { - // Sanitize string values that might contain sensitive data - sanitized[key] = this.sanitizeStringValue(value); - } else { - sanitized[key] = value; - } - } - - return sanitized; - } - - /** - * Sanitize string values that might contain sensitive data - */ - private sanitizeStringValue(value: string): string { - if (!value || typeof value !== 'string') return value; - - let sanitized = value; - - // Redact URLs with authentication - sanitized = sanitized.replace(/https?:\/\/[^:]+:[^@]+@[^\s/]+/g, '[REDACTED_URL_WITH_AUTH]'); - - // Redact long API keys/tokens (20+ alphanumeric chars) - sanitized = sanitized.replace(/\b[A-Za-z0-9_-]{32,}\b/g, '[REDACTED_TOKEN]'); - - // Redact OpenAI-style keys - sanitized = sanitized.replace(/\bsk-[A-Za-z0-9]{32,}\b/g, '[REDACTED_APIKEY]'); - - // Redact Bearer tokens - sanitized = sanitized.replace(/Bearer\s+[^\s]+/gi, 'Bearer [REDACTED]'); - - return sanitized; - } /** * Calculate validation improvement metrics diff --git a/src/telemetry/workflow-sanitizer.ts b/src/telemetry/workflow-sanitizer.ts index 44b2e7d..432a1f9 100644 --- a/src/telemetry/workflow-sanitizer.ts +++ b/src/telemetry/workflow-sanitizer.ts @@ -296,4 +296,37 @@ export class WorkflowSanitizer { const sanitized = this.sanitizeWorkflow(workflow); return sanitized.workflowHash; } + + /** + * Sanitize workflow and return raw workflow object (without metrics) + * For use in telemetry where we need plain workflow structure + */ + static sanitizeWorkflowRaw(workflow: any): any { + // Create a deep copy to avoid modifying original + const sanitized = JSON.parse(JSON.stringify(workflow)); + + // Sanitize nodes + if (sanitized.nodes && Array.isArray(sanitized.nodes)) { + sanitized.nodes = sanitized.nodes.map((node: WorkflowNode) => + this.sanitizeNode(node) + ); + } + + // Sanitize connections (keep structure only) + if (sanitized.connections) { + sanitized.connections = this.sanitizeConnections(sanitized.connections); + } + + // Remove other potentially sensitive data + delete sanitized.settings?.errorWorkflow; + delete sanitized.staticData; + delete sanitized.pinData; + delete sanitized.credentials; + delete sanitized.sharedWorkflows; + delete sanitized.ownedBy; + delete sanitized.createdBy; + delete sanitized.updatedBy; + + return sanitized; + } } \ No newline at end of file