mirror of
https://github.com/czlonkowski/n8n-mcp.git
synced 2026-03-29 05:33:07 +00:00
refactor: streamline test suite — 33 fewer files, 11.9x faster (#670)
* refactor: streamline test suite - cut 33 files, enable parallel execution (11.9x speedup) Remove duplicate, low-value, and fragmented test files while preserving all meaningful coverage. Enable parallel test execution and remove the entire benchmark infrastructure. Key changes: - Consolidate workflow-validator tests (13 files -> 3) - Consolidate config-validator tests (9 files -> 3) - Consolidate telemetry tests (11 files -> 6) - Merge AI validator tests (2 files -> 1) - Remove example/demo test files, mock-testing files, and already-skipped tests - Remove benchmark infrastructure (10 files, CI workflow, 4 npm scripts) - Enable parallel test execution (remove singleThread: true) - Remove retry:2 that was masking flaky tests - Slim CI publish-results job Results: 224 -> 191 test files, 4690 -> 4303 tests, 121K -> 106K lines Local runtime: 319s -> 27s (11.9x speedup) Conceived by Romuald Członkowski - www.aiadvisors.pl/en Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com> * refactor: absorb config-validator satellite tests into consolidated file The previous commit deleted 4 config-validator satellite files. This properly merges their unique tests into the consolidated config-validator.test.ts, recovering 89 tests that were dropped during the bulk deletion. Deduplicates 5 tests that existed in both the satellite files and the security test file. Conceived by Romuald Członkowski - www.aiadvisors.pl/en Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com> * fix: delete missed benchmark-pr.yml workflow, fix flaky session test - Remove benchmark-pr.yml that referenced deleted benchmark:ci script - Fix session-persistence round-trip test using timestamps closer to now to avoid edge cases exposed by removing retry:2 Conceived by Romuald Członkowski - www.aiadvisors.pl/en Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com> * fix: rebuild FTS5 index after database rebuild to prevent stale rowid refs The FTS5 content-synced index could retain phantom rowid references from previous rebuild cycles, causing 'missing row N from content table' errors on MATCH queries. - Add explicit FTS5 rebuild command in rebuild script after all nodes saved - Add FTS5 rebuild in test beforeAll as defense-in-depth - Rebuild nodes.db with consistent FTS5 index Conceived by Romuald Członkowski - www.aiadvisors.pl/en Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com> * fix: use recent timestamps in all session persistence tests Session round-trip tests used timestamps 5-10 minutes in the past which could fail under CI load when combined with session timeout validation. Use timestamps 30 seconds in the past for all valid-session test data. Conceived by Romuald Członkowski - www.aiadvisors.pl/en Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com> --------- Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
committed by
GitHub
parent
07bd1d4cc2
commit
de2abaf89d
@@ -1,752 +0,0 @@
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import {
|
||||
validateAIAgent,
|
||||
validateChatTrigger,
|
||||
validateBasicLLMChain,
|
||||
buildReverseConnectionMap,
|
||||
getAIConnections,
|
||||
validateAISpecificNodes,
|
||||
type WorkflowNode,
|
||||
type WorkflowJson
|
||||
} from '@/services/ai-node-validator';
|
||||
|
||||
describe('AI Node Validator', () => {
|
||||
describe('buildReverseConnectionMap', () => {
|
||||
it('should build reverse connections for AI language model', () => {
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
|
||||
expect(reverseMap.get('AI Agent')).toEqual([
|
||||
{
|
||||
sourceName: 'OpenAI',
|
||||
sourceType: 'ai_languageModel',
|
||||
type: 'ai_languageModel',
|
||||
index: 0
|
||||
}
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle multiple AI connections to same node', () => {
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
},
|
||||
'HTTP Request Tool': {
|
||||
'ai_tool': [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
},
|
||||
'Window Buffer Memory': {
|
||||
'ai_memory': [[{ node: 'AI Agent', type: 'ai_memory', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const agentConnections = reverseMap.get('AI Agent');
|
||||
|
||||
expect(agentConnections).toHaveLength(3);
|
||||
expect(agentConnections).toContainEqual(
|
||||
expect.objectContaining({ type: 'ai_languageModel' })
|
||||
);
|
||||
expect(agentConnections).toContainEqual(
|
||||
expect.objectContaining({ type: 'ai_tool' })
|
||||
);
|
||||
expect(agentConnections).toContainEqual(
|
||||
expect.objectContaining({ type: 'ai_memory' })
|
||||
);
|
||||
});
|
||||
|
||||
it('should skip empty source names', () => {
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [],
|
||||
connections: {
|
||||
'': {
|
||||
'main': [[{ node: 'Target', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
|
||||
expect(reverseMap.has('Target')).toBe(false);
|
||||
});
|
||||
|
||||
it('should skip empty target node names', () => {
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [],
|
||||
connections: {
|
||||
'Source': {
|
||||
'main': [[{ node: '', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
|
||||
expect(reverseMap.size).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getAIConnections', () => {
|
||||
it('should filter AI connections from all incoming connections', () => {
|
||||
const reverseMap = new Map();
|
||||
reverseMap.set('AI Agent', [
|
||||
{ sourceName: 'Chat Trigger', type: 'main', index: 0 },
|
||||
{ sourceName: 'OpenAI', type: 'ai_languageModel', index: 0 },
|
||||
{ sourceName: 'HTTP Tool', type: 'ai_tool', index: 0 }
|
||||
]);
|
||||
|
||||
const aiConnections = getAIConnections('AI Agent', reverseMap);
|
||||
|
||||
expect(aiConnections).toHaveLength(2);
|
||||
expect(aiConnections).not.toContainEqual(
|
||||
expect.objectContaining({ type: 'main' })
|
||||
);
|
||||
});
|
||||
|
||||
it('should filter by specific AI connection type', () => {
|
||||
const reverseMap = new Map();
|
||||
reverseMap.set('AI Agent', [
|
||||
{ sourceName: 'OpenAI', type: 'ai_languageModel', index: 0 },
|
||||
{ sourceName: 'Tool1', type: 'ai_tool', index: 0 },
|
||||
{ sourceName: 'Tool2', type: 'ai_tool', index: 1 }
|
||||
]);
|
||||
|
||||
const toolConnections = getAIConnections('AI Agent', reverseMap, 'ai_tool');
|
||||
|
||||
expect(toolConnections).toHaveLength(2);
|
||||
expect(toolConnections.every(c => c.type === 'ai_tool')).toBe(true);
|
||||
});
|
||||
|
||||
it('should return empty array for node with no connections', () => {
|
||||
const reverseMap = new Map();
|
||||
|
||||
const connections = getAIConnections('Unknown Node', reverseMap);
|
||||
|
||||
expect(connections).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateAIAgent', () => {
|
||||
it('should error on missing language model connection', () => {
|
||||
const node: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [node],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(node, reverseMap, workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
message: expect.stringContaining('language model')
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should accept single language model connection', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: { promptType: 'auto' }
|
||||
};
|
||||
|
||||
const model: WorkflowNode = {
|
||||
id: 'llm1',
|
||||
name: 'OpenAI',
|
||||
type: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
|
||||
position: [0, -100],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent, model],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
const languageModelErrors = issues.filter(i =>
|
||||
i.severity === 'error' && i.message.includes('language model')
|
||||
);
|
||||
expect(languageModelErrors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should accept dual language model connection for fallback', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: { promptType: 'auto' },
|
||||
typeVersion: 1.7
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent],
|
||||
connections: {
|
||||
'OpenAI GPT-4': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
},
|
||||
'OpenAI GPT-3.5': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 1 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
const excessModelErrors = issues.filter(i =>
|
||||
i.severity === 'error' && i.message.includes('more than 2')
|
||||
);
|
||||
expect(excessModelErrors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should error on more than 2 language model connections', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent],
|
||||
connections: {
|
||||
'Model1': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
},
|
||||
'Model2': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 1 }]]
|
||||
},
|
||||
'Model3': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 2 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
code: 'TOO_MANY_LANGUAGE_MODELS'
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should error on streaming mode with main output connections', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
promptType: 'auto',
|
||||
options: { streamResponse: true }
|
||||
}
|
||||
};
|
||||
|
||||
const responseNode: WorkflowNode = {
|
||||
id: 'response1',
|
||||
name: 'Response Node',
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
position: [200, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent, responseNode],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
},
|
||||
'AI Agent': {
|
||||
'main': [[{ node: 'Response Node', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
code: 'STREAMING_WITH_MAIN_OUTPUT'
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should error on missing prompt text for define promptType', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
promptType: 'define'
|
||||
}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
code: 'MISSING_PROMPT_TEXT'
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should info on short systemMessage', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
promptType: 'auto',
|
||||
systemMessage: 'Help user' // Too short (< 20 chars)
|
||||
}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'info',
|
||||
message: expect.stringContaining('systemMessage is very short')
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should error on multiple memory connections', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: { promptType: 'auto' }
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
},
|
||||
'Memory1': {
|
||||
'ai_memory': [[{ node: 'AI Agent', type: 'ai_memory', index: 0 }]]
|
||||
},
|
||||
'Memory2': {
|
||||
'ai_memory': [[{ node: 'AI Agent', type: 'ai_memory', index: 1 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
code: 'MULTIPLE_MEMORY_CONNECTIONS'
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should warn on high maxIterations', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
promptType: 'auto',
|
||||
maxIterations: 60 // Exceeds threshold of 50
|
||||
}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'warning',
|
||||
message: expect.stringContaining('maxIterations')
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should validate output parser with hasOutputParser flag', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
promptType: 'auto',
|
||||
hasOutputParser: true
|
||||
}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
message: expect.stringContaining('output parser')
|
||||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateChatTrigger', () => {
|
||||
it('should error on streaming mode to non-AI-Agent target', () => {
|
||||
const trigger: WorkflowNode = {
|
||||
id: 'chat1',
|
||||
name: 'Chat Trigger',
|
||||
type: '@n8n/n8n-nodes-langchain.chatTrigger',
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
options: { responseMode: 'streaming' }
|
||||
}
|
||||
};
|
||||
|
||||
const codeNode: WorkflowNode = {
|
||||
id: 'code1',
|
||||
name: 'Code',
|
||||
type: 'n8n-nodes-base.code',
|
||||
position: [200, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [trigger, codeNode],
|
||||
connections: {
|
||||
'Chat Trigger': {
|
||||
'main': [[{ node: 'Code', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateChatTrigger(trigger, workflow, reverseMap);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
code: 'STREAMING_WRONG_TARGET'
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should pass valid Chat Trigger with streaming to AI Agent', () => {
|
||||
const trigger: WorkflowNode = {
|
||||
id: 'chat1',
|
||||
name: 'Chat Trigger',
|
||||
type: '@n8n/n8n-nodes-langchain.chatTrigger',
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
options: { responseMode: 'streaming' }
|
||||
}
|
||||
};
|
||||
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [200, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [trigger, agent],
|
||||
connections: {
|
||||
'Chat Trigger': {
|
||||
'main': [[{ node: 'AI Agent', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateChatTrigger(trigger, workflow, reverseMap);
|
||||
|
||||
const errors = issues.filter(i => i.severity === 'error');
|
||||
expect(errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should error on missing outgoing connections', () => {
|
||||
const trigger: WorkflowNode = {
|
||||
id: 'chat1',
|
||||
name: 'Chat Trigger',
|
||||
type: '@n8n/n8n-nodes-langchain.chatTrigger',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [trigger],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateChatTrigger(trigger, workflow, reverseMap);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
code: 'MISSING_CONNECTIONS'
|
||||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateBasicLLMChain', () => {
|
||||
it('should error on missing language model connection', () => {
|
||||
const chain: WorkflowNode = {
|
||||
id: 'chain1',
|
||||
name: 'LLM Chain',
|
||||
type: '@n8n/n8n-nodes-langchain.chainLlm',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [chain],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateBasicLLMChain(chain, reverseMap);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
message: expect.stringContaining('language model')
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should pass valid LLM Chain', () => {
|
||||
const chain: WorkflowNode = {
|
||||
id: 'chain1',
|
||||
name: 'LLM Chain',
|
||||
type: '@n8n/n8n-nodes-langchain.chainLlm',
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
prompt: 'Summarize the following text: {{$json.text}}'
|
||||
}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [chain],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'LLM Chain', type: 'ai_languageModel', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateBasicLLMChain(chain, reverseMap);
|
||||
|
||||
const errors = issues.filter(i => i.severity === 'error');
|
||||
expect(errors).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateAISpecificNodes', () => {
|
||||
it('should validate complete AI Agent workflow', () => {
|
||||
const chatTrigger: WorkflowNode = {
|
||||
id: 'chat1',
|
||||
name: 'Chat Trigger',
|
||||
type: '@n8n/n8n-nodes-langchain.chatTrigger',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [200, 0],
|
||||
parameters: {
|
||||
promptType: 'auto'
|
||||
}
|
||||
};
|
||||
|
||||
const model: WorkflowNode = {
|
||||
id: 'llm1',
|
||||
name: 'OpenAI',
|
||||
type: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
|
||||
position: [200, -100],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const httpTool: WorkflowNode = {
|
||||
id: 'tool1',
|
||||
name: 'Weather API',
|
||||
type: '@n8n/n8n-nodes-langchain.toolHttpRequest',
|
||||
position: [200, 100],
|
||||
parameters: {
|
||||
toolDescription: 'Get current weather for a city',
|
||||
method: 'GET',
|
||||
url: 'https://api.weather.com/v1/current?city={city}',
|
||||
placeholderDefinitions: {
|
||||
values: [
|
||||
{ name: 'city', description: 'City name' }
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [chatTrigger, agent, model, httpTool],
|
||||
connections: {
|
||||
'Chat Trigger': {
|
||||
'main': [[{ node: 'AI Agent', type: 'main', index: 0 }]]
|
||||
},
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
},
|
||||
'Weather API': {
|
||||
'ai_tool': [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const issues = validateAISpecificNodes(workflow);
|
||||
|
||||
const errors = issues.filter(i => i.severity === 'error');
|
||||
expect(errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should detect missing language model in workflow', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const issues = validateAISpecificNodes(workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
message: expect.stringContaining('language model')
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should validate all AI tool sub-nodes in workflow', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: { promptType: 'auto' }
|
||||
};
|
||||
|
||||
const invalidTool: WorkflowNode = {
|
||||
id: 'tool1',
|
||||
name: 'Bad Tool',
|
||||
type: '@n8n/n8n-nodes-langchain.toolHttpRequest',
|
||||
position: [0, 100],
|
||||
parameters: {} // Missing toolDescription and url
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent, invalidTool],
|
||||
connections: {
|
||||
'Model': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
},
|
||||
'Bad Tool': {
|
||||
'ai_tool': [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const issues = validateAISpecificNodes(workflow);
|
||||
|
||||
// Should have errors from missing toolDescription and url
|
||||
expect(issues.filter(i => i.severity === 'error').length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,4 +1,14 @@
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import {
|
||||
validateAIAgent,
|
||||
validateChatTrigger,
|
||||
validateBasicLLMChain,
|
||||
buildReverseConnectionMap,
|
||||
getAIConnections,
|
||||
validateAISpecificNodes,
|
||||
type WorkflowNode,
|
||||
type WorkflowJson
|
||||
} from '@/services/ai-node-validator';
|
||||
import {
|
||||
validateHTTPRequestTool,
|
||||
validateCodeTool,
|
||||
@@ -12,9 +22,748 @@ import {
|
||||
validateWikipediaTool,
|
||||
validateSearXngTool,
|
||||
validateWolframAlphaTool,
|
||||
type WorkflowNode
|
||||
} from '@/services/ai-tool-validators';
|
||||
|
||||
describe('AI Node Validator', () => {
|
||||
describe('buildReverseConnectionMap', () => {
|
||||
it('should build reverse connections for AI language model', () => {
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
|
||||
expect(reverseMap.get('AI Agent')).toEqual([
|
||||
{
|
||||
sourceName: 'OpenAI',
|
||||
sourceType: 'ai_languageModel',
|
||||
type: 'ai_languageModel',
|
||||
index: 0
|
||||
}
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle multiple AI connections to same node', () => {
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
},
|
||||
'HTTP Request Tool': {
|
||||
'ai_tool': [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
},
|
||||
'Window Buffer Memory': {
|
||||
'ai_memory': [[{ node: 'AI Agent', type: 'ai_memory', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const agentConnections = reverseMap.get('AI Agent');
|
||||
|
||||
expect(agentConnections).toHaveLength(3);
|
||||
expect(agentConnections).toContainEqual(
|
||||
expect.objectContaining({ type: 'ai_languageModel' })
|
||||
);
|
||||
expect(agentConnections).toContainEqual(
|
||||
expect.objectContaining({ type: 'ai_tool' })
|
||||
);
|
||||
expect(agentConnections).toContainEqual(
|
||||
expect.objectContaining({ type: 'ai_memory' })
|
||||
);
|
||||
});
|
||||
|
||||
it('should skip empty source names', () => {
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [],
|
||||
connections: {
|
||||
'': {
|
||||
'main': [[{ node: 'Target', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
|
||||
expect(reverseMap.has('Target')).toBe(false);
|
||||
});
|
||||
|
||||
it('should skip empty target node names', () => {
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [],
|
||||
connections: {
|
||||
'Source': {
|
||||
'main': [[{ node: '', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
|
||||
expect(reverseMap.size).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getAIConnections', () => {
|
||||
it('should filter AI connections from all incoming connections', () => {
|
||||
const reverseMap = new Map();
|
||||
reverseMap.set('AI Agent', [
|
||||
{ sourceName: 'Chat Trigger', type: 'main', index: 0 },
|
||||
{ sourceName: 'OpenAI', type: 'ai_languageModel', index: 0 },
|
||||
{ sourceName: 'HTTP Tool', type: 'ai_tool', index: 0 }
|
||||
]);
|
||||
|
||||
const aiConnections = getAIConnections('AI Agent', reverseMap);
|
||||
|
||||
expect(aiConnections).toHaveLength(2);
|
||||
expect(aiConnections).not.toContainEqual(
|
||||
expect.objectContaining({ type: 'main' })
|
||||
);
|
||||
});
|
||||
|
||||
it('should filter by specific AI connection type', () => {
|
||||
const reverseMap = new Map();
|
||||
reverseMap.set('AI Agent', [
|
||||
{ sourceName: 'OpenAI', type: 'ai_languageModel', index: 0 },
|
||||
{ sourceName: 'Tool1', type: 'ai_tool', index: 0 },
|
||||
{ sourceName: 'Tool2', type: 'ai_tool', index: 1 }
|
||||
]);
|
||||
|
||||
const toolConnections = getAIConnections('AI Agent', reverseMap, 'ai_tool');
|
||||
|
||||
expect(toolConnections).toHaveLength(2);
|
||||
expect(toolConnections.every(c => c.type === 'ai_tool')).toBe(true);
|
||||
});
|
||||
|
||||
it('should return empty array for node with no connections', () => {
|
||||
const reverseMap = new Map();
|
||||
|
||||
const connections = getAIConnections('Unknown Node', reverseMap);
|
||||
|
||||
expect(connections).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateAIAgent', () => {
|
||||
it('should error on missing language model connection', () => {
|
||||
const node: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [node],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(node, reverseMap, workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
message: expect.stringContaining('language model')
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should accept single language model connection', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: { promptType: 'auto' }
|
||||
};
|
||||
|
||||
const model: WorkflowNode = {
|
||||
id: 'llm1',
|
||||
name: 'OpenAI',
|
||||
type: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
|
||||
position: [0, -100],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent, model],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
const languageModelErrors = issues.filter(i =>
|
||||
i.severity === 'error' && i.message.includes('language model')
|
||||
);
|
||||
expect(languageModelErrors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should accept dual language model connection for fallback', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: { promptType: 'auto' },
|
||||
typeVersion: 1.7
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent],
|
||||
connections: {
|
||||
'OpenAI GPT-4': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
},
|
||||
'OpenAI GPT-3.5': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 1 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
const excessModelErrors = issues.filter(i =>
|
||||
i.severity === 'error' && i.message.includes('more than 2')
|
||||
);
|
||||
expect(excessModelErrors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should error on more than 2 language model connections', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent],
|
||||
connections: {
|
||||
'Model1': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
},
|
||||
'Model2': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 1 }]]
|
||||
},
|
||||
'Model3': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 2 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
code: 'TOO_MANY_LANGUAGE_MODELS'
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should error on streaming mode with main output connections', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
promptType: 'auto',
|
||||
options: { streamResponse: true }
|
||||
}
|
||||
};
|
||||
|
||||
const responseNode: WorkflowNode = {
|
||||
id: 'response1',
|
||||
name: 'Response Node',
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
position: [200, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent, responseNode],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
},
|
||||
'AI Agent': {
|
||||
'main': [[{ node: 'Response Node', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
code: 'STREAMING_WITH_MAIN_OUTPUT'
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should error on missing prompt text for define promptType', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
promptType: 'define'
|
||||
}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
code: 'MISSING_PROMPT_TEXT'
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should info on short systemMessage', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
promptType: 'auto',
|
||||
systemMessage: 'Help user'
|
||||
}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'info',
|
||||
message: expect.stringContaining('systemMessage is very short')
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should error on multiple memory connections', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: { promptType: 'auto' }
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
},
|
||||
'Memory1': {
|
||||
'ai_memory': [[{ node: 'AI Agent', type: 'ai_memory', index: 0 }]]
|
||||
},
|
||||
'Memory2': {
|
||||
'ai_memory': [[{ node: 'AI Agent', type: 'ai_memory', index: 1 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
code: 'MULTIPLE_MEMORY_CONNECTIONS'
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should warn on high maxIterations', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
promptType: 'auto',
|
||||
maxIterations: 60
|
||||
}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'warning',
|
||||
message: expect.stringContaining('maxIterations')
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should validate output parser with hasOutputParser flag', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
promptType: 'auto',
|
||||
hasOutputParser: true
|
||||
}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateAIAgent(agent, reverseMap, workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
message: expect.stringContaining('output parser')
|
||||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateChatTrigger', () => {
|
||||
it('should error on streaming mode to non-AI-Agent target', () => {
|
||||
const trigger: WorkflowNode = {
|
||||
id: 'chat1',
|
||||
name: 'Chat Trigger',
|
||||
type: '@n8n/n8n-nodes-langchain.chatTrigger',
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
options: { responseMode: 'streaming' }
|
||||
}
|
||||
};
|
||||
|
||||
const codeNode: WorkflowNode = {
|
||||
id: 'code1',
|
||||
name: 'Code',
|
||||
type: 'n8n-nodes-base.code',
|
||||
position: [200, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [trigger, codeNode],
|
||||
connections: {
|
||||
'Chat Trigger': {
|
||||
'main': [[{ node: 'Code', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateChatTrigger(trigger, workflow, reverseMap);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
code: 'STREAMING_WRONG_TARGET'
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should pass valid Chat Trigger with streaming to AI Agent', () => {
|
||||
const trigger: WorkflowNode = {
|
||||
id: 'chat1',
|
||||
name: 'Chat Trigger',
|
||||
type: '@n8n/n8n-nodes-langchain.chatTrigger',
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
options: { responseMode: 'streaming' }
|
||||
}
|
||||
};
|
||||
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [200, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [trigger, agent],
|
||||
connections: {
|
||||
'Chat Trigger': {
|
||||
'main': [[{ node: 'AI Agent', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateChatTrigger(trigger, workflow, reverseMap);
|
||||
|
||||
const errors = issues.filter(i => i.severity === 'error');
|
||||
expect(errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should error on missing outgoing connections', () => {
|
||||
const trigger: WorkflowNode = {
|
||||
id: 'chat1',
|
||||
name: 'Chat Trigger',
|
||||
type: '@n8n/n8n-nodes-langchain.chatTrigger',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [trigger],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateChatTrigger(trigger, workflow, reverseMap);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
code: 'MISSING_CONNECTIONS'
|
||||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateBasicLLMChain', () => {
|
||||
it('should error on missing language model connection', () => {
|
||||
const chain: WorkflowNode = {
|
||||
id: 'chain1',
|
||||
name: 'LLM Chain',
|
||||
type: '@n8n/n8n-nodes-langchain.chainLlm',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [chain],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateBasicLLMChain(chain, reverseMap);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
message: expect.stringContaining('language model')
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should pass valid LLM Chain', () => {
|
||||
const chain: WorkflowNode = {
|
||||
id: 'chain1',
|
||||
name: 'LLM Chain',
|
||||
type: '@n8n/n8n-nodes-langchain.chainLlm',
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
prompt: 'Summarize the following text: {{$json.text}}'
|
||||
}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [chain],
|
||||
connections: {
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'LLM Chain', type: 'ai_languageModel', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const reverseMap = buildReverseConnectionMap(workflow);
|
||||
const issues = validateBasicLLMChain(chain, reverseMap);
|
||||
|
||||
const errors = issues.filter(i => i.severity === 'error');
|
||||
expect(errors).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateAISpecificNodes', () => {
|
||||
it('should validate complete AI Agent workflow', () => {
|
||||
const chatTrigger: WorkflowNode = {
|
||||
id: 'chat1',
|
||||
name: 'Chat Trigger',
|
||||
type: '@n8n/n8n-nodes-langchain.chatTrigger',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [200, 0],
|
||||
parameters: {
|
||||
promptType: 'auto'
|
||||
}
|
||||
};
|
||||
|
||||
const model: WorkflowNode = {
|
||||
id: 'llm1',
|
||||
name: 'OpenAI',
|
||||
type: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
|
||||
position: [200, -100],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const httpTool: WorkflowNode = {
|
||||
id: 'tool1',
|
||||
name: 'Weather API',
|
||||
type: '@n8n/n8n-nodes-langchain.toolHttpRequest',
|
||||
position: [200, 100],
|
||||
parameters: {
|
||||
toolDescription: 'Get current weather for a city',
|
||||
method: 'GET',
|
||||
url: 'https://api.weather.com/v1/current?city={city}',
|
||||
placeholderDefinitions: {
|
||||
values: [
|
||||
{ name: 'city', description: 'City name' }
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [chatTrigger, agent, model, httpTool],
|
||||
connections: {
|
||||
'Chat Trigger': {
|
||||
'main': [[{ node: 'AI Agent', type: 'main', index: 0 }]]
|
||||
},
|
||||
'OpenAI': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
},
|
||||
'Weather API': {
|
||||
'ai_tool': [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const issues = validateAISpecificNodes(workflow);
|
||||
|
||||
const errors = issues.filter(i => i.severity === 'error');
|
||||
expect(errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should detect missing language model in workflow', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const issues = validateAISpecificNodes(workflow);
|
||||
|
||||
expect(issues).toContainEqual(
|
||||
expect.objectContaining({
|
||||
severity: 'error',
|
||||
message: expect.stringContaining('language model')
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should validate all AI tool sub-nodes in workflow', () => {
|
||||
const agent: WorkflowNode = {
|
||||
id: 'agent1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
position: [0, 0],
|
||||
parameters: { promptType: 'auto' }
|
||||
};
|
||||
|
||||
const invalidTool: WorkflowNode = {
|
||||
id: 'tool1',
|
||||
name: 'Bad Tool',
|
||||
type: '@n8n/n8n-nodes-langchain.toolHttpRequest',
|
||||
position: [0, 100],
|
||||
parameters: {}
|
||||
};
|
||||
|
||||
const workflow: WorkflowJson = {
|
||||
nodes: [agent, invalidTool],
|
||||
connections: {
|
||||
'Model': {
|
||||
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
},
|
||||
'Bad Tool': {
|
||||
'ai_tool': [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const issues = validateAISpecificNodes(workflow);
|
||||
|
||||
expect(issues.filter(i => i.severity === 'error').length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('AI Tool Validators', () => {
|
||||
describe('validateHTTPRequestTool', () => {
|
||||
it('should error on missing toolDescription', () => {
|
||||
@@ -48,7 +797,7 @@ describe('AI Tool Validators', () => {
|
||||
parameters: {
|
||||
method: 'GET',
|
||||
url: 'https://api.weather.com/data',
|
||||
toolDescription: 'Weather' // Too short (7 chars, need 15)
|
||||
toolDescription: 'Weather'
|
||||
}
|
||||
};
|
||||
|
||||
@@ -120,7 +869,6 @@ describe('AI Tool Validators', () => {
|
||||
|
||||
const issues = validateHTTPRequestTool(node);
|
||||
|
||||
// Should not error on URL format when it contains expressions
|
||||
const urlErrors = issues.filter(i => i.code === 'INVALID_URL_FORMAT');
|
||||
expect(urlErrors).toHaveLength(0);
|
||||
});
|
||||
@@ -194,7 +942,6 @@ describe('AI Tool Validators', () => {
|
||||
|
||||
const issues = validateHTTPRequestTool(node);
|
||||
|
||||
// Should have no errors
|
||||
const errors = issues.filter(i => i.severity === 'error');
|
||||
expect(errors).toHaveLength(0);
|
||||
});
|
||||
@@ -327,7 +1074,7 @@ return { cost: cost.toFixed(2) };`,
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
toolDescription: 'Search through product documentation',
|
||||
topK: 25 // Exceeds threshold of 20
|
||||
topK: 25
|
||||
}
|
||||
};
|
||||
|
||||
@@ -456,7 +1203,7 @@ return { cost: cost.toFixed(2) };`,
|
||||
position: [0, 0],
|
||||
parameters: {
|
||||
toolDescription: 'Performs complex research tasks',
|
||||
maxIterations: 60 // Exceeds threshold of 50
|
||||
maxIterations: 60
|
||||
}
|
||||
};
|
||||
|
||||
@@ -565,7 +1312,6 @@ return { cost: cost.toFixed(2) };`,
|
||||
|
||||
const issues = validateCalculatorTool(node);
|
||||
|
||||
// Calculator Tool has built-in description, no validation needed
|
||||
expect(issues).toHaveLength(0);
|
||||
});
|
||||
|
||||
@@ -599,7 +1345,6 @@ return { cost: cost.toFixed(2) };`,
|
||||
|
||||
const issues = validateThinkTool(node);
|
||||
|
||||
// Think Tool has built-in description, no validation needed
|
||||
expect(issues).toHaveLength(0);
|
||||
});
|
||||
|
||||
@@ -1,879 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { ConfigValidator } from '@/services/config-validator';
|
||||
import type { ValidationResult, ValidationError, ValidationWarning } from '@/services/config-validator';
|
||||
|
||||
// Mock the database
|
||||
vi.mock('better-sqlite3');
|
||||
|
||||
describe('ConfigValidator - Basic Validation', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('validate', () => {
|
||||
it('should validate required fields for Slack message post', () => {
|
||||
const nodeType = 'nodes-base.slack';
|
||||
const config = {
|
||||
resource: 'message',
|
||||
operation: 'post'
|
||||
// Missing required 'channel' field
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
default: 'message',
|
||||
options: [
|
||||
{ name: 'Message', value: 'message' },
|
||||
{ name: 'Channel', value: 'channel' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'operation',
|
||||
type: 'options',
|
||||
required: true,
|
||||
default: 'post',
|
||||
displayOptions: {
|
||||
show: { resource: ['message'] }
|
||||
},
|
||||
options: [
|
||||
{ name: 'Post', value: 'post' },
|
||||
{ name: 'Update', value: 'update' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'channel',
|
||||
type: 'string',
|
||||
required: true,
|
||||
displayOptions: {
|
||||
show: {
|
||||
resource: ['message'],
|
||||
operation: ['post']
|
||||
}
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors).toHaveLength(1);
|
||||
expect(result.errors[0]).toMatchObject({
|
||||
type: 'missing_required',
|
||||
property: 'channel',
|
||||
message: "Required property 'channel' is missing",
|
||||
fix: 'Add channel to your configuration'
|
||||
});
|
||||
});
|
||||
|
||||
it('should validate successfully with all required fields', () => {
|
||||
const nodeType = 'nodes-base.slack';
|
||||
const config = {
|
||||
resource: 'message',
|
||||
operation: 'post',
|
||||
channel: '#general',
|
||||
text: 'Hello, Slack!'
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
default: 'message',
|
||||
options: [
|
||||
{ name: 'Message', value: 'message' },
|
||||
{ name: 'Channel', value: 'channel' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'operation',
|
||||
type: 'options',
|
||||
required: true,
|
||||
default: 'post',
|
||||
displayOptions: {
|
||||
show: { resource: ['message'] }
|
||||
},
|
||||
options: [
|
||||
{ name: 'Post', value: 'post' },
|
||||
{ name: 'Update', value: 'update' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'channel',
|
||||
type: 'string',
|
||||
required: true,
|
||||
displayOptions: {
|
||||
show: {
|
||||
resource: ['message'],
|
||||
operation: ['post']
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'text',
|
||||
type: 'string',
|
||||
default: '',
|
||||
displayOptions: {
|
||||
show: {
|
||||
resource: ['message'],
|
||||
operation: ['post']
|
||||
}
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle unknown node types gracefully', () => {
|
||||
const nodeType = 'nodes-base.unknown';
|
||||
const config = { field: 'value' };
|
||||
const properties: any[] = [];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
// May have warnings about unused properties
|
||||
});
|
||||
|
||||
it('should validate property types', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {
|
||||
numberField: 'not-a-number', // Should be number
|
||||
booleanField: 'yes' // Should be boolean
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'numberField', type: 'number' },
|
||||
{ name: 'booleanField', type: 'boolean' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.errors).toHaveLength(2);
|
||||
expect(result.errors.some(e =>
|
||||
e.property === 'numberField' &&
|
||||
e.type === 'invalid_type'
|
||||
)).toBe(true);
|
||||
expect(result.errors.some(e =>
|
||||
e.property === 'booleanField' &&
|
||||
e.type === 'invalid_type'
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate option values', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {
|
||||
selectField: 'invalid-option'
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'selectField',
|
||||
type: 'options',
|
||||
options: [
|
||||
{ name: 'Option A', value: 'a' },
|
||||
{ name: 'Option B', value: 'b' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.errors).toHaveLength(1);
|
||||
expect(result.errors[0]).toMatchObject({
|
||||
type: 'invalid_value',
|
||||
property: 'selectField',
|
||||
message: expect.stringContaining('Invalid value')
|
||||
});
|
||||
});
|
||||
|
||||
it('should check property visibility based on displayOptions', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {
|
||||
resource: 'user',
|
||||
userField: 'visible'
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
options: [
|
||||
{ name: 'User', value: 'user' },
|
||||
{ name: 'Post', value: 'post' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'userField',
|
||||
type: 'string',
|
||||
displayOptions: {
|
||||
show: { resource: ['user'] }
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'postField',
|
||||
type: 'string',
|
||||
displayOptions: {
|
||||
show: { resource: ['post'] }
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.visibleProperties).toContain('resource');
|
||||
expect(result.visibleProperties).toContain('userField');
|
||||
expect(result.hiddenProperties).toContain('postField');
|
||||
});
|
||||
|
||||
it('should handle empty properties array', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = { someField: 'value' };
|
||||
const properties: any[] = [];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle missing displayOptions gracefully', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = { field1: 'value1' };
|
||||
const properties = [
|
||||
{ name: 'field1', type: 'string' }
|
||||
// No displayOptions
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.visibleProperties).toContain('field1');
|
||||
});
|
||||
|
||||
it('should validate options with array format', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = { optionField: 'b' };
|
||||
const properties = [
|
||||
{
|
||||
name: 'optionField',
|
||||
type: 'options',
|
||||
options: [
|
||||
{ name: 'Option A', value: 'a' },
|
||||
{ name: 'Option B', value: 'b' },
|
||||
{ name: 'Option C', value: 'c' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases and additional coverage', () => {
|
||||
it('should handle null and undefined config values', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {
|
||||
nullField: null,
|
||||
undefinedField: undefined,
|
||||
validField: 'value'
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'nullField', type: 'string', required: true },
|
||||
{ name: 'undefinedField', type: 'string', required: true },
|
||||
{ name: 'validField', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.errors.some(e => e.property === 'nullField')).toBe(true);
|
||||
expect(result.errors.some(e => e.property === 'undefinedField')).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate nested displayOptions conditions', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {
|
||||
mode: 'advanced',
|
||||
resource: 'user',
|
||||
advancedUserField: 'value'
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'mode',
|
||||
type: 'options',
|
||||
options: [
|
||||
{ name: 'Simple', value: 'simple' },
|
||||
{ name: 'Advanced', value: 'advanced' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
displayOptions: {
|
||||
show: { mode: ['advanced'] }
|
||||
},
|
||||
options: [
|
||||
{ name: 'User', value: 'user' },
|
||||
{ name: 'Post', value: 'post' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'advancedUserField',
|
||||
type: 'string',
|
||||
displayOptions: {
|
||||
show: {
|
||||
mode: ['advanced'],
|
||||
resource: ['user']
|
||||
}
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.visibleProperties).toContain('advancedUserField');
|
||||
});
|
||||
|
||||
it('should handle hide conditions in displayOptions', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {
|
||||
showAdvanced: false,
|
||||
hiddenField: 'should-not-be-here'
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'showAdvanced',
|
||||
type: 'boolean'
|
||||
},
|
||||
{
|
||||
name: 'hiddenField',
|
||||
type: 'string',
|
||||
displayOptions: {
|
||||
hide: { showAdvanced: [false] }
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.hiddenProperties).toContain('hiddenField');
|
||||
expect(result.warnings.some(w =>
|
||||
w.property === 'hiddenField' &&
|
||||
w.type === 'inefficient'
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle internal properties that start with underscore', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {
|
||||
'@version': 1,
|
||||
'_internalField': 'value',
|
||||
normalField: 'value'
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'normalField', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
// Should not warn about @version or _internalField
|
||||
expect(result.warnings.some(w =>
|
||||
w.property === '@version' ||
|
||||
w.property === '_internalField'
|
||||
)).toBe(false);
|
||||
});
|
||||
|
||||
it('should warn about inefficient configured but hidden properties', () => {
|
||||
const nodeType = 'nodes-base.test'; // Changed from Code node
|
||||
const config = {
|
||||
mode: 'manual',
|
||||
automaticField: 'This will not be used'
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'mode',
|
||||
type: 'options',
|
||||
options: [
|
||||
{ name: 'Manual', value: 'manual' },
|
||||
{ name: 'Automatic', value: 'automatic' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'automaticField',
|
||||
type: 'string',
|
||||
displayOptions: {
|
||||
show: { mode: ['automatic'] }
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.warnings.some(w =>
|
||||
w.type === 'inefficient' &&
|
||||
w.property === 'automaticField' &&
|
||||
w.message.includes("won't be used")
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should suggest commonly used properties', () => {
|
||||
const nodeType = 'nodes-base.httpRequest';
|
||||
const config = {
|
||||
method: 'GET',
|
||||
url: 'https://api.example.com/data'
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'method', type: 'options' },
|
||||
{ name: 'url', type: 'string' },
|
||||
{ name: 'headers', type: 'json' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
// Common properties suggestion not implemented for headers
|
||||
expect(result.suggestions.length).toBeGreaterThanOrEqual(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('resourceLocator validation', () => {
|
||||
it('should reject string value when resourceLocator object is required', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: 'gpt-4o-mini' // Wrong - should be object with mode and value
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
displayName: 'Model',
|
||||
type: 'resourceLocator',
|
||||
required: true,
|
||||
default: { mode: 'list', value: 'gpt-4o-mini' }
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors).toHaveLength(1);
|
||||
expect(result.errors[0]).toMatchObject({
|
||||
type: 'invalid_type',
|
||||
property: 'model',
|
||||
message: expect.stringContaining('must be an object with \'mode\' and \'value\' properties')
|
||||
});
|
||||
expect(result.errors[0].fix).toContain('mode');
|
||||
expect(result.errors[0].fix).toContain('value');
|
||||
});
|
||||
|
||||
it('should accept valid resourceLocator with mode and value', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: {
|
||||
mode: 'list',
|
||||
value: 'gpt-4o-mini'
|
||||
}
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
displayName: 'Model',
|
||||
type: 'resourceLocator',
|
||||
required: true,
|
||||
default: { mode: 'list', value: 'gpt-4o-mini' }
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should reject null value for resourceLocator', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: null
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
type: 'resourceLocator',
|
||||
required: true
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e =>
|
||||
e.property === 'model' &&
|
||||
e.type === 'invalid_type'
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should reject array value for resourceLocator', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: ['gpt-4o-mini']
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
type: 'resourceLocator',
|
||||
required: true
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e =>
|
||||
e.property === 'model' &&
|
||||
e.type === 'invalid_type' &&
|
||||
e.message.includes('must be an object')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect missing mode property in resourceLocator', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: {
|
||||
value: 'gpt-4o-mini'
|
||||
// Missing mode property
|
||||
}
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
type: 'resourceLocator',
|
||||
required: true
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e =>
|
||||
e.property === 'model.mode' &&
|
||||
e.type === 'missing_required' &&
|
||||
e.message.includes('missing required property \'mode\'')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect missing value property in resourceLocator', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: {
|
||||
mode: 'list'
|
||||
// Missing value property
|
||||
}
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
displayName: 'Model',
|
||||
type: 'resourceLocator',
|
||||
required: true
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e =>
|
||||
e.property === 'model.value' &&
|
||||
e.type === 'missing_required' &&
|
||||
e.message.includes('missing required property \'value\'')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect invalid mode type in resourceLocator', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: {
|
||||
mode: 123, // Should be string
|
||||
value: 'gpt-4o-mini'
|
||||
}
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
type: 'resourceLocator',
|
||||
required: true
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e =>
|
||||
e.property === 'model.mode' &&
|
||||
e.type === 'invalid_type' &&
|
||||
e.message.includes('must be a string')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should accept resourceLocator with mode "id"', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: {
|
||||
mode: 'id',
|
||||
value: 'gpt-4o-2024-11-20'
|
||||
}
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
type: 'resourceLocator',
|
||||
required: true
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should reject number value when resourceLocator is required', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: 12345 // Wrong type
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
type: 'resourceLocator',
|
||||
required: true
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors[0].type).toBe('invalid_type');
|
||||
expect(result.errors[0].message).toContain('must be an object');
|
||||
});
|
||||
|
||||
it('should provide helpful fix suggestion for string to resourceLocator conversion', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: 'gpt-4o-mini'
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
type: 'resourceLocator',
|
||||
required: true
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.errors[0].fix).toContain('{ mode: "list", value: "gpt-4o-mini" }');
|
||||
expect(result.errors[0].fix).toContain('{ mode: "id", value: "gpt-4o-mini" }');
|
||||
});
|
||||
|
||||
it('should reject invalid mode values when schema defines allowed modes', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: {
|
||||
mode: 'invalid-mode',
|
||||
value: 'gpt-4o-mini'
|
||||
}
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
type: 'resourceLocator',
|
||||
required: true,
|
||||
// In real n8n, modes are at top level, not in typeOptions
|
||||
modes: [
|
||||
{ name: 'list', displayName: 'List' },
|
||||
{ name: 'id', displayName: 'ID' },
|
||||
{ name: 'url', displayName: 'URL' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e =>
|
||||
e.property === 'model.mode' &&
|
||||
e.type === 'invalid_value' &&
|
||||
e.message.includes('must be one of [list, id, url]')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle modes defined as array format', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: {
|
||||
mode: 'custom',
|
||||
value: 'gpt-4o-mini'
|
||||
}
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
type: 'resourceLocator',
|
||||
required: true,
|
||||
// Array format at top level (real n8n structure)
|
||||
modes: [
|
||||
{ name: 'list', displayName: 'List' },
|
||||
{ name: 'id', displayName: 'ID' },
|
||||
{ name: 'custom', displayName: 'Custom' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle malformed modes schema gracefully', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: {
|
||||
mode: 'any-mode',
|
||||
value: 'gpt-4o-mini'
|
||||
}
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
type: 'resourceLocator',
|
||||
required: true,
|
||||
modes: 'invalid-string' // Malformed schema at top level
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
// Should NOT crash, should skip validation
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors.some(e => e.property === 'model.mode')).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle empty modes definition gracefully', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: {
|
||||
mode: 'any-mode',
|
||||
value: 'gpt-4o-mini'
|
||||
}
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
type: 'resourceLocator',
|
||||
required: true,
|
||||
modes: {} // Empty object at top level
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
// Should skip validation with empty modes
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors.some(e => e.property === 'model.mode')).toBe(false);
|
||||
});
|
||||
|
||||
it('should skip mode validation when modes not provided', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: {
|
||||
mode: 'custom-mode',
|
||||
value: 'gpt-4o-mini'
|
||||
}
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
type: 'resourceLocator',
|
||||
required: true
|
||||
// No modes property - schema doesn't define modes
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
// Should accept any mode when schema doesn't define them
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should accept resourceLocator with mode "url"', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: {
|
||||
mode: 'url',
|
||||
value: 'https://api.example.com/models/custom'
|
||||
}
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
type: 'resourceLocator',
|
||||
required: true
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should detect empty resourceLocator object', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: {} // Empty object, missing both mode and value
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
type: 'resourceLocator',
|
||||
required: true
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.length).toBeGreaterThanOrEqual(2); // Both mode and value missing
|
||||
expect(result.errors.some(e => e.property === 'model.mode')).toBe(true);
|
||||
expect(result.errors.some(e => e.property === 'model.value')).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle resourceLocator with extra properties gracefully', () => {
|
||||
const nodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
const config = {
|
||||
model: {
|
||||
mode: 'list',
|
||||
value: 'gpt-4o-mini',
|
||||
extraProperty: 'ignored' // Extra properties should be ignored
|
||||
}
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'model',
|
||||
type: 'resourceLocator',
|
||||
required: true
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(true); // Should pass with extra properties
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,524 +0,0 @@
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import { ConfigValidator } from '../../../src/services/config-validator';
|
||||
|
||||
describe('ConfigValidator _cnd operators', () => {
|
||||
describe('isPropertyVisible with _cnd operators', () => {
|
||||
describe('eq operator', () => {
|
||||
it('should match when values are equal', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { status: [{ _cnd: { eq: 'active' } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { status: 'active' })).toBe(true);
|
||||
});
|
||||
|
||||
it('should not match when values are not equal', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { status: [{ _cnd: { eq: 'active' } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { status: 'inactive' })).toBe(false);
|
||||
});
|
||||
|
||||
it('should match numeric equality', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { eq: 1 } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1 })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 2 })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('not operator', () => {
|
||||
it('should match when values are not equal', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { status: [{ _cnd: { not: 'disabled' } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { status: 'active' })).toBe(true);
|
||||
});
|
||||
|
||||
it('should not match when values are equal', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { status: [{ _cnd: { not: 'disabled' } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { status: 'disabled' })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('gte operator (greater than or equal)', () => {
|
||||
it('should match when value is greater', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { gte: 1.1 } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 2.0 })).toBe(true);
|
||||
});
|
||||
|
||||
it('should match when value is equal', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { gte: 1.1 } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1.1 })).toBe(true);
|
||||
});
|
||||
|
||||
it('should not match when value is less', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { gte: 1.1 } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1.0 })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('lte operator (less than or equal)', () => {
|
||||
it('should match when value is less', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { lte: 2.0 } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1.5 })).toBe(true);
|
||||
});
|
||||
|
||||
it('should match when value is equal', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { lte: 2.0 } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 2.0 })).toBe(true);
|
||||
});
|
||||
|
||||
it('should not match when value is greater', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { lte: 2.0 } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 2.5 })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('gt operator (greater than)', () => {
|
||||
it('should match when value is greater', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { count: [{ _cnd: { gt: 5 } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { count: 10 })).toBe(true);
|
||||
});
|
||||
|
||||
it('should not match when value is equal', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { count: [{ _cnd: { gt: 5 } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { count: 5 })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('lt operator (less than)', () => {
|
||||
it('should match when value is less', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { count: [{ _cnd: { lt: 10 } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { count: 5 })).toBe(true);
|
||||
});
|
||||
|
||||
it('should not match when value is equal', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { count: [{ _cnd: { lt: 10 } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { count: 10 })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('between operator', () => {
|
||||
it('should match when value is within range', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { between: { from: 4, to: 4.6 } } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 4.3 })).toBe(true);
|
||||
});
|
||||
|
||||
it('should match when value equals lower bound', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { between: { from: 4, to: 4.6 } } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 4 })).toBe(true);
|
||||
});
|
||||
|
||||
it('should match when value equals upper bound', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { between: { from: 4, to: 4.6 } } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 4.6 })).toBe(true);
|
||||
});
|
||||
|
||||
it('should not match when value is below range', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { between: { from: 4, to: 4.6 } } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 3.9 })).toBe(false);
|
||||
});
|
||||
|
||||
it('should not match when value is above range', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { between: { from: 4, to: 4.6 } } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 5 })).toBe(false);
|
||||
});
|
||||
|
||||
it('should not match when between structure is null', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { between: null } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 4 })).toBe(false);
|
||||
});
|
||||
|
||||
it('should not match when between is missing from field', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { between: { to: 5 } } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 4 })).toBe(false);
|
||||
});
|
||||
|
||||
it('should not match when between is missing to field', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { between: { from: 3 } } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 4 })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('startsWith operator', () => {
|
||||
it('should match when string starts with prefix', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { name: [{ _cnd: { startsWith: 'test' } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { name: 'testUser' })).toBe(true);
|
||||
});
|
||||
|
||||
it('should not match when string does not start with prefix', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { name: [{ _cnd: { startsWith: 'test' } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { name: 'mytest' })).toBe(false);
|
||||
});
|
||||
|
||||
it('should not match non-string values', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { value: [{ _cnd: { startsWith: 'test' } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { value: 123 })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('endsWith operator', () => {
|
||||
it('should match when string ends with suffix', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { email: [{ _cnd: { endsWith: '@example.com' } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { email: 'user@example.com' })).toBe(true);
|
||||
});
|
||||
|
||||
it('should not match when string does not end with suffix', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { email: [{ _cnd: { endsWith: '@example.com' } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { email: 'user@other.com' })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('includes operator', () => {
|
||||
it('should match when string contains substring', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { eventId: [{ _cnd: { includes: '_' } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { eventId: 'event_123' })).toBe(true);
|
||||
});
|
||||
|
||||
it('should not match when string does not contain substring', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { eventId: [{ _cnd: { includes: '_' } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { eventId: 'event123' })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('regex operator', () => {
|
||||
it('should match when string matches regex pattern', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { id: [{ _cnd: { regex: '^[A-Z]{3}\\d{4}$' } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { id: 'ABC1234' })).toBe(true);
|
||||
});
|
||||
|
||||
it('should not match when string does not match regex pattern', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { id: [{ _cnd: { regex: '^[A-Z]{3}\\d{4}$' } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { id: 'abc1234' })).toBe(false);
|
||||
});
|
||||
|
||||
it('should not match when regex pattern is invalid', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { id: [{ _cnd: { regex: '[invalid(regex' } }] }
|
||||
}
|
||||
};
|
||||
// Invalid regex should return false without throwing
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { id: 'test' })).toBe(false);
|
||||
});
|
||||
|
||||
it('should not match non-string values', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { value: [{ _cnd: { regex: '\\d+' } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { value: 123 })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('exists operator', () => {
|
||||
it('should match when field exists and is not null', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { optionalField: [{ _cnd: { exists: true } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { optionalField: 'value' })).toBe(true);
|
||||
});
|
||||
|
||||
it('should match when field exists with value 0', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { optionalField: [{ _cnd: { exists: true } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { optionalField: 0 })).toBe(true);
|
||||
});
|
||||
|
||||
it('should match when field exists with empty string', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { optionalField: [{ _cnd: { exists: true } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { optionalField: '' })).toBe(true);
|
||||
});
|
||||
|
||||
it('should not match when field is undefined', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { optionalField: [{ _cnd: { exists: true } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { otherField: 'value' })).toBe(false);
|
||||
});
|
||||
|
||||
it('should not match when field is null', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { optionalField: [{ _cnd: { exists: true } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { optionalField: null })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('mixed plain values and _cnd conditions', () => {
|
||||
it('should match plain value in array with _cnd', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { status: ['active', { _cnd: { eq: 'pending' } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { status: 'active' })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { status: 'pending' })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { status: 'disabled' })).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle multiple conditions with AND logic', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: {
|
||||
'@version': [{ _cnd: { gte: 1.1 } }],
|
||||
mode: ['advanced']
|
||||
}
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 2.0, mode: 'advanced' })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 2.0, mode: 'basic' })).toBe(false);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1.0, mode: 'advanced' })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('hide conditions with _cnd', () => {
|
||||
it('should hide property when _cnd condition matches', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
hide: { '@version': [{ _cnd: { lt: 2.0 } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1.5 })).toBe(false);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 2.0 })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 2.5 })).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Execute Workflow Trigger scenario', () => {
|
||||
it('should show property when @version >= 1.1', () => {
|
||||
const prop = {
|
||||
name: 'inputSource',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { gte: 1.1 } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1.1 })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1.2 })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 2.0 })).toBe(true);
|
||||
});
|
||||
|
||||
it('should hide property when @version < 1.1', () => {
|
||||
const prop = {
|
||||
name: 'inputSource',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { gte: 1.1 } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1.0 })).toBe(false);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1 })).toBe(false);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 0.9 })).toBe(false);
|
||||
});
|
||||
|
||||
it('should show outdated version warning only for v1', () => {
|
||||
const prop = {
|
||||
name: 'outdatedVersionWarning',
|
||||
displayOptions: {
|
||||
show: { '@version': [{ _cnd: { eq: 1 } }] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1 })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1.1 })).toBe(false);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 2 })).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('backward compatibility with plain values', () => {
|
||||
it('should continue to work with plain value arrays', () => {
|
||||
const prop = {
|
||||
name: 'testField',
|
||||
displayOptions: {
|
||||
show: { resource: ['user', 'message'] }
|
||||
}
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { resource: 'user' })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { resource: 'message' })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { resource: 'channel' })).toBe(false);
|
||||
});
|
||||
|
||||
it('should work with properties without displayOptions', () => {
|
||||
const prop = {
|
||||
name: 'testField'
|
||||
};
|
||||
expect(ConfigValidator.isPropertyVisible(prop, {})).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,387 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { ConfigValidator } from '@/services/config-validator';
|
||||
import type { ValidationResult, ValidationError, ValidationWarning } from '@/services/config-validator';
|
||||
|
||||
// Mock the database
|
||||
vi.mock('better-sqlite3');
|
||||
|
||||
describe('ConfigValidator - Edge Cases', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('Null and Undefined Handling', () => {
|
||||
it('should handle null config gracefully', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = null as any;
|
||||
const properties: any[] = [];
|
||||
|
||||
expect(() => {
|
||||
ConfigValidator.validate(nodeType, config, properties);
|
||||
}).toThrow(TypeError);
|
||||
});
|
||||
|
||||
it('should handle undefined config gracefully', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = undefined as any;
|
||||
const properties: any[] = [];
|
||||
|
||||
expect(() => {
|
||||
ConfigValidator.validate(nodeType, config, properties);
|
||||
}).toThrow(TypeError);
|
||||
});
|
||||
|
||||
it('should handle null properties array gracefully', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {};
|
||||
const properties = null as any;
|
||||
|
||||
expect(() => {
|
||||
ConfigValidator.validate(nodeType, config, properties);
|
||||
}).toThrow(TypeError);
|
||||
});
|
||||
|
||||
it('should handle undefined properties array gracefully', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {};
|
||||
const properties = undefined as any;
|
||||
|
||||
expect(() => {
|
||||
ConfigValidator.validate(nodeType, config, properties);
|
||||
}).toThrow(TypeError);
|
||||
});
|
||||
|
||||
it('should handle properties with null values in config', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {
|
||||
nullField: null,
|
||||
undefinedField: undefined,
|
||||
validField: 'value'
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'nullField', type: 'string', required: true },
|
||||
{ name: 'undefinedField', type: 'string', required: true },
|
||||
{ name: 'validField', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
// Check that we have errors for both null and undefined required fields
|
||||
expect(result.errors.some(e => e.property === 'nullField')).toBe(true);
|
||||
expect(result.errors.some(e => e.property === 'undefinedField')).toBe(true);
|
||||
|
||||
// The actual error types might vary, so let's just ensure we caught the errors
|
||||
const nullFieldError = result.errors.find(e => e.property === 'nullField');
|
||||
const undefinedFieldError = result.errors.find(e => e.property === 'undefinedField');
|
||||
|
||||
expect(nullFieldError).toBeDefined();
|
||||
expect(undefinedFieldError).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Boundary Value Testing', () => {
|
||||
it('should handle empty arrays', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {
|
||||
arrayField: []
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'arrayField', type: 'collection' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle very large property arrays', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = { field1: 'value1' };
|
||||
const properties = Array(1000).fill(null).map((_, i) => ({
|
||||
name: `field${i}`,
|
||||
type: 'string'
|
||||
}));
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle deeply nested displayOptions', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {
|
||||
level1: 'a',
|
||||
level2: 'b',
|
||||
level3: 'c',
|
||||
deepField: 'value'
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'level1', type: 'options', options: ['a', 'b'] },
|
||||
{ name: 'level2', type: 'options', options: ['a', 'b'], displayOptions: { show: { level1: ['a'] } } },
|
||||
{ name: 'level3', type: 'options', options: ['a', 'b', 'c'], displayOptions: { show: { level1: ['a'], level2: ['b'] } } },
|
||||
{ name: 'deepField', type: 'string', displayOptions: { show: { level1: ['a'], level2: ['b'], level3: ['c'] } } }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.visibleProperties).toContain('deepField');
|
||||
});
|
||||
|
||||
it('should handle extremely long string values', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const longString = 'a'.repeat(10000);
|
||||
const config = {
|
||||
longField: longString
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'longField', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Invalid Data Type Handling', () => {
|
||||
it('should handle NaN values', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {
|
||||
numberField: NaN
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'numberField', type: 'number' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
// NaN is technically type 'number' in JavaScript, so type validation passes
|
||||
// The validator might not have specific NaN checking, so we check for warnings
|
||||
// or just verify it doesn't crash
|
||||
expect(result).toBeDefined();
|
||||
expect(() => result).not.toThrow();
|
||||
});
|
||||
|
||||
it('should handle Infinity values', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {
|
||||
numberField: Infinity
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'numberField', type: 'number' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
// Infinity is technically a valid number in JavaScript
|
||||
// The validator might not flag it as an error, so just verify it handles it
|
||||
expect(result).toBeDefined();
|
||||
expect(() => result).not.toThrow();
|
||||
});
|
||||
|
||||
it('should handle objects when expecting primitives', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {
|
||||
stringField: { nested: 'object' },
|
||||
numberField: { value: 123 }
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'stringField', type: 'string' },
|
||||
{ name: 'numberField', type: 'number' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.errors).toHaveLength(2);
|
||||
expect(result.errors.every(e => e.type === 'invalid_type')).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle circular references in config', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config: any = { field: 'value' };
|
||||
config.circular = config; // Create circular reference
|
||||
const properties = [
|
||||
{ name: 'field', type: 'string' },
|
||||
{ name: 'circular', type: 'json' }
|
||||
];
|
||||
|
||||
// Should not throw error
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Performance Boundaries', () => {
|
||||
it('should validate large config objects within reasonable time', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config: Record<string, any> = {};
|
||||
const properties: any[] = [];
|
||||
|
||||
// Create a large config with 1000 properties
|
||||
for (let i = 0; i < 1000; i++) {
|
||||
config[`field_${i}`] = `value_${i}`;
|
||||
properties.push({
|
||||
name: `field_${i}`,
|
||||
type: 'string'
|
||||
});
|
||||
}
|
||||
|
||||
const startTime = Date.now();
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
const endTime = Date.now();
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
expect(endTime - startTime).toBeLessThan(1000); // Should complete within 1 second
|
||||
});
|
||||
});
|
||||
|
||||
describe('Special Characters and Encoding', () => {
|
||||
it('should handle special characters in property values', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {
|
||||
specialField: 'Value with special chars: <>&"\'`\n\r\t'
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'specialField', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle unicode characters', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {
|
||||
unicodeField: '🚀 Unicode: 你好世界 مرحبا بالعالم'
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'unicodeField', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Complex Validation Scenarios', () => {
|
||||
it('should handle conflicting displayOptions conditions', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {
|
||||
mode: 'both',
|
||||
showField: true,
|
||||
conflictField: 'value'
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'mode', type: 'options', options: ['show', 'hide', 'both'] },
|
||||
{ name: 'showField', type: 'boolean' },
|
||||
{
|
||||
name: 'conflictField',
|
||||
type: 'string',
|
||||
displayOptions: {
|
||||
show: { mode: ['show'], showField: [true] },
|
||||
hide: { mode: ['hide'] }
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
// With mode='both', the field visibility depends on implementation
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
|
||||
it('should handle multiple validation profiles correctly', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'javascript',
|
||||
jsCode: 'const x = 1;'
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'jsCode', type: 'string' }
|
||||
];
|
||||
|
||||
// Should perform node-specific validation for Code nodes
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.warnings.some(w =>
|
||||
w.message.includes('No return statement found')
|
||||
)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Recovery and Resilience', () => {
|
||||
it('should continue validation after encountering errors', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = {
|
||||
field1: 'invalid-for-number',
|
||||
field2: null, // Required field missing
|
||||
field3: 'valid'
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'field1', type: 'number' },
|
||||
{ name: 'field2', type: 'string', required: true },
|
||||
{ name: 'field3', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
// Should have errors for field1 and field2, but field3 should be validated
|
||||
expect(result.errors.length).toBeGreaterThanOrEqual(2);
|
||||
|
||||
// Check that we have errors for field1 (type error) and field2 (required field)
|
||||
const field1Error = result.errors.find(e => e.property === 'field1');
|
||||
const field2Error = result.errors.find(e => e.property === 'field2');
|
||||
|
||||
expect(field1Error).toBeDefined();
|
||||
expect(field1Error?.type).toBe('invalid_type');
|
||||
|
||||
expect(field2Error).toBeDefined();
|
||||
// field2 is null, which might be treated as invalid_type rather than missing_required
|
||||
expect(['missing_required', 'invalid_type']).toContain(field2Error?.type);
|
||||
|
||||
expect(result.visibleProperties).toContain('field3');
|
||||
});
|
||||
|
||||
it('should handle malformed property definitions gracefully', () => {
|
||||
const nodeType = 'nodes-base.test';
|
||||
const config = { field: 'value' };
|
||||
const properties = [
|
||||
{ name: 'field', type: 'string' },
|
||||
{ /* Malformed property without name */ type: 'string' } as any,
|
||||
{ name: 'field2', /* Missing type */ } as any
|
||||
];
|
||||
|
||||
// Should handle malformed properties without crashing
|
||||
// Note: null properties will cause errors in the current implementation
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
expect(result).toBeDefined();
|
||||
expect(result.valid).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateBatch method implementation', () => {
|
||||
it('should validate multiple configs in batch if method exists', () => {
|
||||
// This test is for future implementation
|
||||
const configs = [
|
||||
{ nodeType: 'nodes-base.test', config: { field: 'value1' }, properties: [] },
|
||||
{ nodeType: 'nodes-base.test', config: { field: 'value2' }, properties: [] }
|
||||
];
|
||||
|
||||
// If validateBatch method is implemented in the future
|
||||
if ('validateBatch' in ConfigValidator) {
|
||||
const results = (ConfigValidator as any).validateBatch(configs);
|
||||
expect(results).toHaveLength(2);
|
||||
} else {
|
||||
// For now, just validate individually
|
||||
const results = configs.map(c =>
|
||||
ConfigValidator.validate(c.nodeType, c.config, c.properties)
|
||||
);
|
||||
expect(results).toHaveLength(2);
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,589 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { ConfigValidator } from '@/services/config-validator';
|
||||
import type { ValidationResult, ValidationError, ValidationWarning } from '@/services/config-validator';
|
||||
|
||||
// Mock the database
|
||||
vi.mock('better-sqlite3');
|
||||
|
||||
describe('ConfigValidator - Node-Specific Validation', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('HTTP Request node validation', () => {
|
||||
it('should perform HTTP Request specific validation', () => {
|
||||
const nodeType = 'nodes-base.httpRequest';
|
||||
const config = {
|
||||
method: 'POST',
|
||||
url: 'invalid-url', // Missing protocol
|
||||
sendBody: false
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'method', type: 'options' },
|
||||
{ name: 'url', type: 'string' },
|
||||
{ name: 'sendBody', type: 'boolean' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors).toHaveLength(1);
|
||||
expect(result.errors[0]).toMatchObject({
|
||||
type: 'invalid_value',
|
||||
property: 'url',
|
||||
message: 'URL must start with http:// or https://'
|
||||
});
|
||||
expect(result.warnings).toHaveLength(1);
|
||||
expect(result.warnings[0]).toMatchObject({
|
||||
type: 'missing_common',
|
||||
property: 'sendBody',
|
||||
message: 'POST requests typically send a body'
|
||||
});
|
||||
expect(result.autofix).toMatchObject({
|
||||
sendBody: true,
|
||||
contentType: 'json'
|
||||
});
|
||||
});
|
||||
|
||||
it('should validate HTTP Request with authentication in API URLs', () => {
|
||||
const nodeType = 'nodes-base.httpRequest';
|
||||
const config = {
|
||||
method: 'GET',
|
||||
url: 'https://api.github.com/user/repos',
|
||||
authentication: 'none'
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'method', type: 'options' },
|
||||
{ name: 'url', type: 'string' },
|
||||
{ name: 'authentication', type: 'options' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.warnings.some(w =>
|
||||
w.type === 'security' &&
|
||||
w.message.includes('API endpoints typically require authentication')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate JSON in HTTP Request body', () => {
|
||||
const nodeType = 'nodes-base.httpRequest';
|
||||
const config = {
|
||||
method: 'POST',
|
||||
url: 'https://api.example.com',
|
||||
contentType: 'json',
|
||||
body: '{"invalid": json}' // Invalid JSON
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'method', type: 'options' },
|
||||
{ name: 'url', type: 'string' },
|
||||
{ name: 'contentType', type: 'options' },
|
||||
{ name: 'body', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.errors.some(e =>
|
||||
e.property === 'body' &&
|
||||
e.message.includes('Invalid JSON')
|
||||
));
|
||||
});
|
||||
|
||||
it('should handle webhook-specific validation', () => {
|
||||
const nodeType = 'nodes-base.webhook';
|
||||
const config = {
|
||||
httpMethod: 'GET',
|
||||
path: 'webhook-endpoint' // Missing leading slash
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'httpMethod', type: 'options' },
|
||||
{ name: 'path', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.warnings.some(w =>
|
||||
w.property === 'path' &&
|
||||
w.message.includes('should start with /')
|
||||
));
|
||||
});
|
||||
});
|
||||
|
||||
describe('Code node validation', () => {
|
||||
it('should validate Code node configurations', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'javascript',
|
||||
jsCode: '' // Empty code
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'jsCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors).toHaveLength(1);
|
||||
expect(result.errors[0]).toMatchObject({
|
||||
type: 'missing_required',
|
||||
property: 'jsCode',
|
||||
message: 'Code cannot be empty'
|
||||
});
|
||||
});
|
||||
|
||||
it('should validate JavaScript syntax in Code node', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'javascript',
|
||||
jsCode: `
|
||||
const data = { foo: "bar" };
|
||||
if (data.foo { // Missing closing parenthesis
|
||||
return [{json: data}];
|
||||
}
|
||||
`
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'jsCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.errors.some(e => e.message.includes('Unbalanced')));
|
||||
expect(result.warnings).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should validate n8n-specific patterns in Code node', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'javascript',
|
||||
jsCode: `
|
||||
// Process data without returning
|
||||
const processedData = items.map(item => ({
|
||||
...item.json,
|
||||
processed: true
|
||||
}));
|
||||
// No output provided
|
||||
`
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'jsCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
// The warning should be about missing return statement
|
||||
expect(result.warnings.some(w => w.type === 'missing_common' && w.message.includes('No return statement found'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle empty code in Code node', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'javascript',
|
||||
jsCode: ' \n \t \n ' // Just whitespace
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'jsCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e =>
|
||||
e.type === 'missing_required' &&
|
||||
e.message.includes('Code cannot be empty')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate complex return patterns in Code node', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'javascript',
|
||||
jsCode: `
|
||||
return ["string1", "string2", "string3"];
|
||||
`
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'jsCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.warnings.some(w =>
|
||||
w.type === 'invalid_value' &&
|
||||
w.message.includes('Items must be objects with json property')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate Code node with $helpers usage', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'javascript',
|
||||
jsCode: `
|
||||
const workflow = $helpers.getWorkflowStaticData();
|
||||
workflow.counter = (workflow.counter || 0) + 1;
|
||||
return [{json: {count: workflow.counter}}];
|
||||
`
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'jsCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.warnings.some(w =>
|
||||
w.type === 'best_practice' &&
|
||||
w.message.includes('$helpers is only available in Code nodes')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect incorrect $helpers.getWorkflowStaticData usage', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'javascript',
|
||||
jsCode: `
|
||||
const data = $helpers.getWorkflowStaticData; // Missing parentheses
|
||||
return [{json: {data}}];
|
||||
`
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'jsCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.errors.some(e =>
|
||||
e.type === 'invalid_value' &&
|
||||
e.message.includes('getWorkflowStaticData requires parentheses')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate console.log usage', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'javascript',
|
||||
jsCode: `
|
||||
console.log('Debug info:', items);
|
||||
return items;
|
||||
`
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'jsCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.warnings.some(w =>
|
||||
w.type === 'best_practice' &&
|
||||
w.message.includes('console.log output appears in n8n execution logs')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate $json usage warning', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'javascript',
|
||||
jsCode: `
|
||||
const data = $json.myField;
|
||||
return [{json: {processed: data}}];
|
||||
`
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'jsCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.warnings.some(w =>
|
||||
w.type === 'best_practice' &&
|
||||
w.message.includes('$json only works in "Run Once for Each Item" mode')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should not warn about properties for Code nodes', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'javascript',
|
||||
jsCode: 'return items;',
|
||||
unusedProperty: 'this should not generate a warning for Code nodes'
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'jsCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
// Code nodes should skip the common issues check that warns about unused properties
|
||||
expect(result.warnings.some(w =>
|
||||
w.type === 'inefficient' &&
|
||||
w.property === 'unusedProperty'
|
||||
)).toBe(false);
|
||||
});
|
||||
|
||||
it('should validate crypto module usage', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'javascript',
|
||||
jsCode: `
|
||||
const uuid = crypto.randomUUID();
|
||||
return [{json: {id: uuid}}];
|
||||
`
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'jsCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.warnings.some(w =>
|
||||
w.type === 'invalid_value' &&
|
||||
w.message.includes('Using crypto without require')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should suggest error handling for complex code', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'javascript',
|
||||
jsCode: `
|
||||
const apiUrl = items[0].json.url;
|
||||
const response = await fetch(apiUrl);
|
||||
const data = await response.json();
|
||||
return [{json: data}];
|
||||
`
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'jsCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.suggestions.some(s =>
|
||||
s.includes('Consider adding error handling')
|
||||
));
|
||||
});
|
||||
|
||||
it('should suggest error handling for non-trivial code', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'javascript',
|
||||
jsCode: Array(10).fill('const x = 1;').join('\n') + '\nreturn items;'
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'jsCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.suggestions.some(s => s.includes('error handling')));
|
||||
});
|
||||
|
||||
it('should validate async operations without await', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'javascript',
|
||||
jsCode: `
|
||||
const promise = fetch('https://api.example.com');
|
||||
return [{json: {data: promise}}];
|
||||
`
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'jsCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.warnings.some(w =>
|
||||
w.type === 'best_practice' &&
|
||||
w.message.includes('Async operation without await')
|
||||
)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Python Code node validation', () => {
|
||||
it('should validate Python code syntax', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'python',
|
||||
pythonCode: `
|
||||
def process_data():
|
||||
return [{"json": {"test": True}] # Missing closing bracket
|
||||
`
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'pythonCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.errors.some(e =>
|
||||
e.type === 'syntax_error' &&
|
||||
e.message.includes('Unmatched bracket')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect mixed indentation in Python code', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'python',
|
||||
pythonCode: `
|
||||
def process():
|
||||
x = 1
|
||||
y = 2 # This line uses tabs
|
||||
return [{"json": {"x": x, "y": y}}]
|
||||
`
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'pythonCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.errors.some(e =>
|
||||
e.type === 'syntax_error' &&
|
||||
e.message.includes('Mixed indentation')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should warn about incorrect n8n return patterns', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'python',
|
||||
pythonCode: `
|
||||
result = {"data": "value"}
|
||||
return result # Should return array of objects with json key
|
||||
`
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'pythonCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.warnings.some(w =>
|
||||
w.type === 'invalid_value' &&
|
||||
w.message.includes('Must return array of objects with json key')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should warn about using external libraries in Python code', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'python',
|
||||
pythonCode: `
|
||||
import pandas as pd
|
||||
import requests
|
||||
|
||||
df = pd.DataFrame(items)
|
||||
response = requests.get('https://api.example.com')
|
||||
return [{"json": {"data": response.json()}}]
|
||||
`
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'pythonCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.warnings.some(w =>
|
||||
w.type === 'invalid_value' &&
|
||||
w.message.includes('External libraries not available')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate Python code with print statements', () => {
|
||||
const nodeType = 'nodes-base.code';
|
||||
const config = {
|
||||
language: 'python',
|
||||
pythonCode: `
|
||||
print("Debug:", items)
|
||||
processed = []
|
||||
for item in items:
|
||||
print(f"Processing: {item}")
|
||||
processed.append({"json": item["json"]})
|
||||
return processed
|
||||
`
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'language', type: 'options' },
|
||||
{ name: 'pythonCode', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.warnings.some(w =>
|
||||
w.type === 'best_practice' &&
|
||||
w.message.includes('print() output appears in n8n execution logs')
|
||||
)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Database node validation', () => {
|
||||
it('should validate database query security', () => {
|
||||
const nodeType = 'nodes-base.postgres';
|
||||
const config = {
|
||||
query: 'DELETE FROM users;' // Missing WHERE clause
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'query', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.warnings.some(w =>
|
||||
w.type === 'security' &&
|
||||
w.message.includes('DELETE query without WHERE clause')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should check for SQL injection vulnerabilities', () => {
|
||||
const nodeType = 'nodes-base.mysql';
|
||||
const config = {
|
||||
query: 'SELECT * FROM users WHERE id = ${userId}'
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'query', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.warnings.some(w =>
|
||||
w.type === 'security' &&
|
||||
w.message.includes('SQL injection')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate SQL SELECT * performance warning', () => {
|
||||
const nodeType = 'nodes-base.postgres';
|
||||
const config = {
|
||||
query: 'SELECT * FROM large_table WHERE status = "active"'
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'query', type: 'string' }
|
||||
];
|
||||
|
||||
const result = ConfigValidator.validate(nodeType, config, properties);
|
||||
|
||||
expect(result.suggestions.some(s =>
|
||||
s.includes('Consider selecting specific columns')
|
||||
)).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
667
tests/unit/services/config-validator.test.ts
Normal file
667
tests/unit/services/config-validator.test.ts
Normal file
@@ -0,0 +1,667 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { ConfigValidator } from '@/services/config-validator';
|
||||
import type { ValidationResult, ValidationError, ValidationWarning } from '@/services/config-validator';
|
||||
|
||||
// Mock the database
|
||||
vi.mock('better-sqlite3');
|
||||
|
||||
describe('ConfigValidator', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
// ─── Basic Validation ───────────────────────────────────────────────
|
||||
|
||||
describe('validate', () => {
|
||||
it('should validate required fields for Slack message post', () => {
|
||||
const config = { resource: 'message', operation: 'post' };
|
||||
const properties = [
|
||||
{ name: 'resource', type: 'options', required: true, default: 'message', options: [{ name: 'Message', value: 'message' }, { name: 'Channel', value: 'channel' }] },
|
||||
{ name: 'operation', type: 'options', required: true, default: 'post', displayOptions: { show: { resource: ['message'] } }, options: [{ name: 'Post', value: 'post' }, { name: 'Update', value: 'update' }] },
|
||||
{ name: 'channel', type: 'string', required: true, displayOptions: { show: { resource: ['message'], operation: ['post'] } } }
|
||||
];
|
||||
const result = ConfigValidator.validate('nodes-base.slack', config, properties);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors).toHaveLength(1);
|
||||
expect(result.errors[0]).toMatchObject({ type: 'missing_required', property: 'channel', message: "Required property 'channel' is missing", fix: 'Add channel to your configuration' });
|
||||
});
|
||||
|
||||
it('should validate successfully with all required fields', () => {
|
||||
const config = { resource: 'message', operation: 'post', channel: '#general', text: 'Hello, Slack!' };
|
||||
const properties = [
|
||||
{ name: 'resource', type: 'options', required: true, default: 'message', options: [{ name: 'Message', value: 'message' }, { name: 'Channel', value: 'channel' }] },
|
||||
{ name: 'operation', type: 'options', required: true, default: 'post', displayOptions: { show: { resource: ['message'] } }, options: [{ name: 'Post', value: 'post' }, { name: 'Update', value: 'update' }] },
|
||||
{ name: 'channel', type: 'string', required: true, displayOptions: { show: { resource: ['message'], operation: ['post'] } } },
|
||||
{ name: 'text', type: 'string', default: '', displayOptions: { show: { resource: ['message'], operation: ['post'] } } }
|
||||
];
|
||||
const result = ConfigValidator.validate('nodes-base.slack', config, properties);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle unknown node types gracefully', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.unknown', { field: 'value' }, []);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should validate property types', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.test', { numberField: 'not-a-number', booleanField: 'yes' }, [{ name: 'numberField', type: 'number' }, { name: 'booleanField', type: 'boolean' }]);
|
||||
expect(result.errors).toHaveLength(2);
|
||||
expect(result.errors.some(e => e.property === 'numberField' && e.type === 'invalid_type')).toBe(true);
|
||||
expect(result.errors.some(e => e.property === 'booleanField' && e.type === 'invalid_type')).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate option values', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.test', { selectField: 'invalid-option' }, [{ name: 'selectField', type: 'options', options: [{ name: 'Option A', value: 'a' }, { name: 'Option B', value: 'b' }] }]);
|
||||
expect(result.errors).toHaveLength(1);
|
||||
expect(result.errors[0]).toMatchObject({ type: 'invalid_value', property: 'selectField', message: expect.stringContaining('Invalid value') });
|
||||
});
|
||||
|
||||
it('should check property visibility based on displayOptions', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.test', { resource: 'user', userField: 'visible' }, [
|
||||
{ name: 'resource', type: 'options', options: [{ name: 'User', value: 'user' }, { name: 'Post', value: 'post' }] },
|
||||
{ name: 'userField', type: 'string', displayOptions: { show: { resource: ['user'] } } },
|
||||
{ name: 'postField', type: 'string', displayOptions: { show: { resource: ['post'] } } }
|
||||
]);
|
||||
expect(result.visibleProperties).toContain('resource');
|
||||
expect(result.visibleProperties).toContain('userField');
|
||||
expect(result.hiddenProperties).toContain('postField');
|
||||
});
|
||||
|
||||
it('should handle empty properties array', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.test', { someField: 'value' }, []);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle missing displayOptions gracefully', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.test', { field1: 'value1' }, [{ name: 'field1', type: 'string' }]);
|
||||
expect(result.visibleProperties).toContain('field1');
|
||||
});
|
||||
|
||||
it('should validate options with array format', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.test', { optionField: 'b' }, [{ name: 'optionField', type: 'options', options: [{ name: 'Option A', value: 'a' }, { name: 'Option B', value: 'b' }, { name: 'Option C', value: 'c' }] }]);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Edge Cases and Additional Coverage ─────────────────────────────
|
||||
|
||||
describe('edge cases and additional coverage', () => {
|
||||
it('should handle null and undefined config values', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.test', { nullField: null, undefinedField: undefined, validField: 'value' }, [
|
||||
{ name: 'nullField', type: 'string', required: true },
|
||||
{ name: 'undefinedField', type: 'string', required: true },
|
||||
{ name: 'validField', type: 'string' }
|
||||
]);
|
||||
expect(result.errors.find(e => e.property === 'nullField')).toBeDefined();
|
||||
expect(result.errors.find(e => e.property === 'undefinedField')).toBeDefined();
|
||||
});
|
||||
|
||||
it('should validate nested displayOptions conditions', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.test', { mode: 'advanced', resource: 'user', advancedUserField: 'value' }, [
|
||||
{ name: 'mode', type: 'options', options: [{ name: 'Simple', value: 'simple' }, { name: 'Advanced', value: 'advanced' }] },
|
||||
{ name: 'resource', type: 'options', displayOptions: { show: { mode: ['advanced'] } }, options: [{ name: 'User', value: 'user' }, { name: 'Post', value: 'post' }] },
|
||||
{ name: 'advancedUserField', type: 'string', displayOptions: { show: { mode: ['advanced'], resource: ['user'] } } }
|
||||
]);
|
||||
expect(result.visibleProperties).toContain('advancedUserField');
|
||||
});
|
||||
|
||||
it('should handle hide conditions in displayOptions', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.test', { showAdvanced: false, hiddenField: 'should-not-be-here' }, [
|
||||
{ name: 'showAdvanced', type: 'boolean' },
|
||||
{ name: 'hiddenField', type: 'string', displayOptions: { hide: { showAdvanced: [false] } } }
|
||||
]);
|
||||
expect(result.hiddenProperties).toContain('hiddenField');
|
||||
expect(result.warnings.some(w => w.property === 'hiddenField' && w.type === 'inefficient')).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle internal properties that start with underscore', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.test', { '@version': 1, '_internalField': 'value', normalField: 'value' }, [{ name: 'normalField', type: 'string' }]);
|
||||
expect(result.warnings.some(w => w.property === '@version' || w.property === '_internalField')).toBe(false);
|
||||
});
|
||||
|
||||
it('should warn about inefficient configured but hidden properties', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.test', { mode: 'manual', automaticField: 'This will not be used' }, [
|
||||
{ name: 'mode', type: 'options', options: [{ name: 'Manual', value: 'manual' }, { name: 'Automatic', value: 'automatic' }] },
|
||||
{ name: 'automaticField', type: 'string', displayOptions: { show: { mode: ['automatic'] } } }
|
||||
]);
|
||||
expect(result.warnings.some(w => w.type === 'inefficient' && w.property === 'automaticField' && w.message.includes("won't be used"))).toBe(true);
|
||||
});
|
||||
|
||||
it('should suggest commonly used properties', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.httpRequest', { method: 'GET', url: 'https://api.example.com/data' }, [{ name: 'method', type: 'options' }, { name: 'url', type: 'string' }, { name: 'headers', type: 'json' }]);
|
||||
expect(result.suggestions.length).toBeGreaterThanOrEqual(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ─── ResourceLocator Validation ─────────────────────────────────────
|
||||
|
||||
describe('resourceLocator validation', () => {
|
||||
const rlNodeType = '@n8n/n8n-nodes-langchain.lmChatOpenAi';
|
||||
|
||||
it('should reject string value when resourceLocator object is required', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: 'gpt-4o-mini' }, [{ name: 'model', displayName: 'Model', type: 'resourceLocator', required: true, default: { mode: 'list', value: 'gpt-4o-mini' } }]);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors).toHaveLength(1);
|
||||
expect(result.errors[0]).toMatchObject({ type: 'invalid_type', property: 'model', message: expect.stringContaining('must be an object with \'mode\' and \'value\' properties') });
|
||||
expect(result.errors[0].fix).toContain('mode');
|
||||
expect(result.errors[0].fix).toContain('value');
|
||||
});
|
||||
|
||||
it('should accept valid resourceLocator with mode and value', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: { mode: 'list', value: 'gpt-4o-mini' } }, [{ name: 'model', displayName: 'Model', type: 'resourceLocator', required: true, default: { mode: 'list', value: 'gpt-4o-mini' } }]);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should reject null value for resourceLocator', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: null }, [{ name: 'model', type: 'resourceLocator', required: true }]);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e => e.property === 'model' && e.type === 'invalid_type')).toBe(true);
|
||||
});
|
||||
|
||||
it('should reject array value for resourceLocator', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: ['gpt-4o-mini'] }, [{ name: 'model', type: 'resourceLocator', required: true }]);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e => e.property === 'model' && e.type === 'invalid_type' && e.message.includes('must be an object'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect missing mode property in resourceLocator', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: { value: 'gpt-4o-mini' } }, [{ name: 'model', type: 'resourceLocator', required: true }]);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e => e.property === 'model.mode' && e.type === 'missing_required' && e.message.includes('missing required property \'mode\''))).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect missing value property in resourceLocator', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: { mode: 'list' } }, [{ name: 'model', displayName: 'Model', type: 'resourceLocator', required: true }]);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e => e.property === 'model.value' && e.type === 'missing_required' && e.message.includes('missing required property \'value\''))).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect invalid mode type in resourceLocator', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: { mode: 123, value: 'gpt-4o-mini' } }, [{ name: 'model', type: 'resourceLocator', required: true }]);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e => e.property === 'model.mode' && e.type === 'invalid_type' && e.message.includes('must be a string'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should accept resourceLocator with mode "id"', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: { mode: 'id', value: 'gpt-4o-2024-11-20' } }, [{ name: 'model', type: 'resourceLocator', required: true }]);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should reject number value when resourceLocator is required', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: 12345 }, [{ name: 'model', type: 'resourceLocator', required: true }]);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors[0].type).toBe('invalid_type');
|
||||
expect(result.errors[0].message).toContain('must be an object');
|
||||
});
|
||||
|
||||
it('should provide helpful fix suggestion for string to resourceLocator conversion', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: 'gpt-4o-mini' }, [{ name: 'model', type: 'resourceLocator', required: true }]);
|
||||
expect(result.errors[0].fix).toContain('{ mode: "list", value: "gpt-4o-mini" }');
|
||||
expect(result.errors[0].fix).toContain('{ mode: "id", value: "gpt-4o-mini" }');
|
||||
});
|
||||
|
||||
it('should reject invalid mode values when schema defines allowed modes', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: { mode: 'invalid-mode', value: 'gpt-4o-mini' } }, [{ name: 'model', type: 'resourceLocator', required: true, modes: [{ name: 'list', displayName: 'List' }, { name: 'id', displayName: 'ID' }, { name: 'url', displayName: 'URL' }] }]);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e => e.property === 'model.mode' && e.type === 'invalid_value' && e.message.includes('must be one of [list, id, url]'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle modes defined as array format', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: { mode: 'custom', value: 'gpt-4o-mini' } }, [{ name: 'model', type: 'resourceLocator', required: true, modes: [{ name: 'list', displayName: 'List' }, { name: 'id', displayName: 'ID' }, { name: 'custom', displayName: 'Custom' }] }]);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle malformed modes schema gracefully', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: { mode: 'any-mode', value: 'gpt-4o-mini' } }, [{ name: 'model', type: 'resourceLocator', required: true, modes: 'invalid-string' }]);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors.some(e => e.property === 'model.mode')).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle empty modes definition gracefully', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: { mode: 'any-mode', value: 'gpt-4o-mini' } }, [{ name: 'model', type: 'resourceLocator', required: true, modes: {} }]);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors.some(e => e.property === 'model.mode')).toBe(false);
|
||||
});
|
||||
|
||||
it('should skip mode validation when modes not provided', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: { mode: 'custom-mode', value: 'gpt-4o-mini' } }, [{ name: 'model', type: 'resourceLocator', required: true }]);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should accept resourceLocator with mode "url"', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: { mode: 'url', value: 'https://api.example.com/models/custom' } }, [{ name: 'model', type: 'resourceLocator', required: true }]);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should detect empty resourceLocator object', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: {} }, [{ name: 'model', type: 'resourceLocator', required: true }]);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.length).toBeGreaterThanOrEqual(2);
|
||||
expect(result.errors.some(e => e.property === 'model.mode')).toBe(true);
|
||||
expect(result.errors.some(e => e.property === 'model.value')).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle resourceLocator with extra properties gracefully', () => {
|
||||
const result = ConfigValidator.validate(rlNodeType, { model: { mode: 'list', value: 'gpt-4o-mini', extraProperty: 'ignored' } }, [{ name: 'model', type: 'resourceLocator', required: true }]);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ─── _cnd Operators (from config-validator-cnd) ─────────────────────
|
||||
|
||||
describe('_cnd operators', () => {
|
||||
describe('eq operator', () => {
|
||||
it('should match when values are equal', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'testField', displayOptions: { show: { status: [{ _cnd: { eq: 'active' } }] } } }, { status: 'active' })).toBe(true);
|
||||
});
|
||||
it('should not match when values are not equal', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'testField', displayOptions: { show: { status: [{ _cnd: { eq: 'active' } }] } } }, { status: 'inactive' })).toBe(false);
|
||||
});
|
||||
it('should match numeric equality', () => {
|
||||
const prop = { name: 'testField', displayOptions: { show: { '@version': [{ _cnd: { eq: 1 } }] } } };
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1 })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 2 })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('not operator', () => {
|
||||
it('should match when values are not equal', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'testField', displayOptions: { show: { status: [{ _cnd: { not: 'disabled' } }] } } }, { status: 'active' })).toBe(true);
|
||||
});
|
||||
it('should not match when values are equal', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'testField', displayOptions: { show: { status: [{ _cnd: { not: 'disabled' } }] } } }, { status: 'disabled' })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('gte operator', () => {
|
||||
it('should match when value is greater', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { '@version': [{ _cnd: { gte: 1.1 } }] } } }, { '@version': 2.0 })).toBe(true);
|
||||
});
|
||||
it('should match when value is equal', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { '@version': [{ _cnd: { gte: 1.1 } }] } } }, { '@version': 1.1 })).toBe(true);
|
||||
});
|
||||
it('should not match when value is less', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { '@version': [{ _cnd: { gte: 1.1 } }] } } }, { '@version': 1.0 })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('lte operator', () => {
|
||||
it('should match when value is less', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { '@version': [{ _cnd: { lte: 2.0 } }] } } }, { '@version': 1.5 })).toBe(true);
|
||||
});
|
||||
it('should match when value is equal', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { '@version': [{ _cnd: { lte: 2.0 } }] } } }, { '@version': 2.0 })).toBe(true);
|
||||
});
|
||||
it('should not match when value is greater', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { '@version': [{ _cnd: { lte: 2.0 } }] } } }, { '@version': 2.5 })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('gt operator', () => {
|
||||
it('should match when value is greater', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { count: [{ _cnd: { gt: 5 } }] } } }, { count: 10 })).toBe(true);
|
||||
});
|
||||
it('should not match when value is equal', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { count: [{ _cnd: { gt: 5 } }] } } }, { count: 5 })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('lt operator', () => {
|
||||
it('should match when value is less', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { count: [{ _cnd: { lt: 10 } }] } } }, { count: 5 })).toBe(true);
|
||||
});
|
||||
it('should not match when value is equal', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { count: [{ _cnd: { lt: 10 } }] } } }, { count: 10 })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('between operator', () => {
|
||||
it('should match when value is within range', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { '@version': [{ _cnd: { between: { from: 4, to: 4.6 } } }] } } }, { '@version': 4.3 })).toBe(true);
|
||||
});
|
||||
it('should match when value equals lower bound', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { '@version': [{ _cnd: { between: { from: 4, to: 4.6 } } }] } } }, { '@version': 4 })).toBe(true);
|
||||
});
|
||||
it('should match when value equals upper bound', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { '@version': [{ _cnd: { between: { from: 4, to: 4.6 } } }] } } }, { '@version': 4.6 })).toBe(true);
|
||||
});
|
||||
it('should not match when value is below range', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { '@version': [{ _cnd: { between: { from: 4, to: 4.6 } } }] } } }, { '@version': 3.9 })).toBe(false);
|
||||
});
|
||||
it('should not match when value is above range', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { '@version': [{ _cnd: { between: { from: 4, to: 4.6 } } }] } } }, { '@version': 5 })).toBe(false);
|
||||
});
|
||||
it('should not match when between structure is null', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { '@version': [{ _cnd: { between: null } }] } } }, { '@version': 4 })).toBe(false);
|
||||
});
|
||||
it('should not match when between is missing from field', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { '@version': [{ _cnd: { between: { to: 5 } } }] } } }, { '@version': 4 })).toBe(false);
|
||||
});
|
||||
it('should not match when between is missing to field', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { '@version': [{ _cnd: { between: { from: 3 } } }] } } }, { '@version': 4 })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('startsWith operator', () => {
|
||||
it('should match when string starts with prefix', () => { expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { name: [{ _cnd: { startsWith: 'test' } }] } } }, { name: 'testUser' })).toBe(true); });
|
||||
it('should not match when string does not start with prefix', () => { expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { name: [{ _cnd: { startsWith: 'test' } }] } } }, { name: 'mytest' })).toBe(false); });
|
||||
it('should not match non-string values', () => { expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { value: [{ _cnd: { startsWith: 'test' } }] } } }, { value: 123 })).toBe(false); });
|
||||
});
|
||||
|
||||
describe('endsWith operator', () => {
|
||||
it('should match when string ends with suffix', () => { expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { email: [{ _cnd: { endsWith: '@example.com' } }] } } }, { email: 'user@example.com' })).toBe(true); });
|
||||
it('should not match when string does not end with suffix', () => { expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { email: [{ _cnd: { endsWith: '@example.com' } }] } } }, { email: 'user@other.com' })).toBe(false); });
|
||||
});
|
||||
|
||||
describe('includes operator', () => {
|
||||
it('should match when string contains substring', () => { expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { eventId: [{ _cnd: { includes: '_' } }] } } }, { eventId: 'event_123' })).toBe(true); });
|
||||
it('should not match when string does not contain substring', () => { expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { eventId: [{ _cnd: { includes: '_' } }] } } }, { eventId: 'event123' })).toBe(false); });
|
||||
});
|
||||
|
||||
describe('regex operator', () => {
|
||||
it('should match when string matches regex pattern', () => { expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { id: [{ _cnd: { regex: '^[A-Z]{3}\\d{4}$' } }] } } }, { id: 'ABC1234' })).toBe(true); });
|
||||
it('should not match when string does not match regex pattern', () => { expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { id: [{ _cnd: { regex: '^[A-Z]{3}\\d{4}$' } }] } } }, { id: 'abc1234' })).toBe(false); });
|
||||
it('should not match when regex pattern is invalid', () => { expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { id: [{ _cnd: { regex: '[invalid(regex' } }] } } }, { id: 'test' })).toBe(false); });
|
||||
it('should not match non-string values', () => { expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { value: [{ _cnd: { regex: '\\d+' } }] } } }, { value: 123 })).toBe(false); });
|
||||
});
|
||||
|
||||
describe('exists operator', () => {
|
||||
it('should match when field exists and is not null', () => { expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { optionalField: [{ _cnd: { exists: true } }] } } }, { optionalField: 'value' })).toBe(true); });
|
||||
it('should match when field exists with value 0', () => { expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { optionalField: [{ _cnd: { exists: true } }] } } }, { optionalField: 0 })).toBe(true); });
|
||||
it('should match when field exists with empty string', () => { expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { optionalField: [{ _cnd: { exists: true } }] } } }, { optionalField: '' })).toBe(true); });
|
||||
it('should not match when field is undefined', () => { expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { optionalField: [{ _cnd: { exists: true } }] } } }, { otherField: 'value' })).toBe(false); });
|
||||
it('should not match when field is null', () => { expect(ConfigValidator.isPropertyVisible({ name: 'f', displayOptions: { show: { optionalField: [{ _cnd: { exists: true } }] } } }, { optionalField: null })).toBe(false); });
|
||||
});
|
||||
|
||||
describe('mixed plain values and _cnd conditions', () => {
|
||||
it('should match plain value in array with _cnd', () => {
|
||||
const prop = { name: 'f', displayOptions: { show: { status: ['active', { _cnd: { eq: 'pending' } }] } } };
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { status: 'active' })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { status: 'pending' })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { status: 'disabled' })).toBe(false);
|
||||
});
|
||||
it('should handle multiple conditions with AND logic', () => {
|
||||
const prop = { name: 'f', displayOptions: { show: { '@version': [{ _cnd: { gte: 1.1 } }], mode: ['advanced'] } } };
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 2.0, mode: 'advanced' })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 2.0, mode: 'basic' })).toBe(false);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1.0, mode: 'advanced' })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('hide conditions with _cnd', () => {
|
||||
it('should hide property when _cnd condition matches', () => {
|
||||
const prop = { name: 'f', displayOptions: { hide: { '@version': [{ _cnd: { lt: 2.0 } }] } } };
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1.5 })).toBe(false);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 2.0 })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 2.5 })).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Execute Workflow Trigger scenario', () => {
|
||||
it('should show property when @version >= 1.1', () => {
|
||||
const prop = { name: 'inputSource', displayOptions: { show: { '@version': [{ _cnd: { gte: 1.1 } }] } } };
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1.1 })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1.2 })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 2.0 })).toBe(true);
|
||||
});
|
||||
it('should hide property when @version < 1.1', () => {
|
||||
const prop = { name: 'inputSource', displayOptions: { show: { '@version': [{ _cnd: { gte: 1.1 } }] } } };
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1.0 })).toBe(false);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1 })).toBe(false);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 0.9 })).toBe(false);
|
||||
});
|
||||
it('should show outdated version warning only for v1', () => {
|
||||
const prop = { name: 'outdatedVersionWarning', displayOptions: { show: { '@version': [{ _cnd: { eq: 1 } }] } } };
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1 })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 1.1 })).toBe(false);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { '@version': 2 })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('backward compatibility with plain values', () => {
|
||||
it('should continue to work with plain value arrays', () => {
|
||||
const prop = { name: 'f', displayOptions: { show: { resource: ['user', 'message'] } } };
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { resource: 'user' })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { resource: 'message' })).toBe(true);
|
||||
expect(ConfigValidator.isPropertyVisible(prop, { resource: 'channel' })).toBe(false);
|
||||
});
|
||||
it('should work with properties without displayOptions', () => {
|
||||
expect(ConfigValidator.isPropertyVisible({ name: 'f' }, {})).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Null/Undefined Handling (from edge-cases) ─────────────────────
|
||||
|
||||
describe('null and undefined handling', () => {
|
||||
it('should handle null config gracefully', () => { expect(() => { ConfigValidator.validate('nodes-base.test', null as any, []); }).toThrow(TypeError); });
|
||||
it('should handle undefined config gracefully', () => { expect(() => { ConfigValidator.validate('nodes-base.test', undefined as any, []); }).toThrow(TypeError); });
|
||||
it('should handle null properties array gracefully', () => { expect(() => { ConfigValidator.validate('nodes-base.test', {}, null as any); }).toThrow(TypeError); });
|
||||
it('should handle undefined properties array gracefully', () => { expect(() => { ConfigValidator.validate('nodes-base.test', {}, undefined as any); }).toThrow(TypeError); });
|
||||
});
|
||||
|
||||
// ─── Boundary Value Testing (from edge-cases) ─────────────────────
|
||||
|
||||
describe('boundary value testing', () => {
|
||||
it('should handle empty arrays', () => { expect(ConfigValidator.validate('nodes-base.test', { arrayField: [] }, [{ name: 'arrayField', type: 'collection' }]).valid).toBe(true); });
|
||||
it('should handle very large property arrays', () => { expect(ConfigValidator.validate('nodes-base.test', { field1: 'value1' }, Array(1000).fill(null).map((_, i) => ({ name: `field${i}`, type: 'string' }))).valid).toBe(true); });
|
||||
it('should handle deeply nested displayOptions', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.test', { level1: 'a', level2: 'b', level3: 'c', deepField: 'value' }, [
|
||||
{ name: 'level1', type: 'options', options: ['a', 'b'] },
|
||||
{ name: 'level2', type: 'options', options: ['a', 'b'], displayOptions: { show: { level1: ['a'] } } },
|
||||
{ name: 'level3', type: 'options', options: ['a', 'b', 'c'], displayOptions: { show: { level1: ['a'], level2: ['b'] } } },
|
||||
{ name: 'deepField', type: 'string', displayOptions: { show: { level1: ['a'], level2: ['b'], level3: ['c'] } } }
|
||||
]);
|
||||
expect(result.visibleProperties).toContain('deepField');
|
||||
});
|
||||
it('should handle extremely long string values', () => { expect(ConfigValidator.validate('nodes-base.test', { longField: 'a'.repeat(10000) }, [{ name: 'longField', type: 'string' }]).valid).toBe(true); });
|
||||
});
|
||||
|
||||
// ─── Invalid Data Type Handling (from edge-cases) ─────────────────
|
||||
|
||||
describe('invalid data type handling', () => {
|
||||
it('should handle NaN values', () => { expect(ConfigValidator.validate('nodes-base.test', { numberField: NaN }, [{ name: 'numberField', type: 'number' }])).toBeDefined(); });
|
||||
it('should handle Infinity values', () => { expect(ConfigValidator.validate('nodes-base.test', { numberField: Infinity }, [{ name: 'numberField', type: 'number' }])).toBeDefined(); });
|
||||
it('should handle objects when expecting primitives', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.test', { stringField: { nested: 'object' }, numberField: { value: 123 } }, [{ name: 'stringField', type: 'string' }, { name: 'numberField', type: 'number' }]);
|
||||
expect(result.errors).toHaveLength(2);
|
||||
expect(result.errors.every(e => e.type === 'invalid_type')).toBe(true);
|
||||
});
|
||||
it('should handle circular references in config', () => {
|
||||
const config: any = { field: 'value' };
|
||||
config.circular = config;
|
||||
expect(ConfigValidator.validate('nodes-base.test', config, [{ name: 'field', type: 'string' }, { name: 'circular', type: 'json' }])).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Performance Boundaries (from edge-cases) ─────────────────────
|
||||
|
||||
describe('performance boundaries', () => {
|
||||
it('should validate large config objects within reasonable time', () => {
|
||||
const config: Record<string, any> = {};
|
||||
const properties: any[] = [];
|
||||
for (let i = 0; i < 1000; i++) { config[`field_${i}`] = `value_${i}`; properties.push({ name: `field_${i}`, type: 'string' }); }
|
||||
const startTime = Date.now();
|
||||
const result = ConfigValidator.validate('nodes-base.test', config, properties);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(Date.now() - startTime).toBeLessThan(1000);
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Special Characters (from edge-cases) ─────────────────────────
|
||||
|
||||
describe('special characters and encoding', () => {
|
||||
it('should handle special characters in property values', () => { expect(ConfigValidator.validate('nodes-base.test', { specialField: 'Value with special chars: <>&"\'`\n\r\t' }, [{ name: 'specialField', type: 'string' }]).valid).toBe(true); });
|
||||
it('should handle unicode characters', () => { expect(ConfigValidator.validate('nodes-base.test', { unicodeField: 'Unicode: \u4F60\u597D\u4E16\u754C' }, [{ name: 'unicodeField', type: 'string' }]).valid).toBe(true); });
|
||||
});
|
||||
|
||||
// ─── Complex Validation Scenarios (from edge-cases) ───────────────
|
||||
|
||||
describe('complex validation scenarios', () => {
|
||||
it('should handle conflicting displayOptions conditions', () => {
|
||||
expect(ConfigValidator.validate('nodes-base.test', { mode: 'both', showField: true, conflictField: 'value' }, [
|
||||
{ name: 'mode', type: 'options', options: ['show', 'hide', 'both'] },
|
||||
{ name: 'showField', type: 'boolean' },
|
||||
{ name: 'conflictField', type: 'string', displayOptions: { show: { mode: ['show'], showField: [true] }, hide: { mode: ['hide'] } } }
|
||||
])).toBeDefined();
|
||||
});
|
||||
it('should handle multiple validation profiles correctly', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'javascript', jsCode: 'const x = 1;' }, [{ name: 'language', type: 'options' }, { name: 'jsCode', type: 'string' }]);
|
||||
expect(result.warnings.some(w => w.message.includes('No return statement found'))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Error Recovery (from edge-cases) ─────────────────────────────
|
||||
|
||||
describe('error recovery and resilience', () => {
|
||||
it('should continue validation after encountering errors', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.test', { field1: 'invalid-for-number', field2: null, field3: 'valid' }, [{ name: 'field1', type: 'number' }, { name: 'field2', type: 'string', required: true }, { name: 'field3', type: 'string' }]);
|
||||
expect(result.errors.length).toBeGreaterThanOrEqual(2);
|
||||
expect(result.errors.find(e => e.property === 'field1')?.type).toBe('invalid_type');
|
||||
expect(result.errors.find(e => e.property === 'field2')).toBeDefined();
|
||||
expect(result.visibleProperties).toContain('field3');
|
||||
});
|
||||
it('should handle malformed property definitions gracefully', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.test', { field: 'value' }, [{ name: 'field', type: 'string' }, { type: 'string' } as any, { name: 'field2' } as any]);
|
||||
expect(result).toBeDefined();
|
||||
expect(result.valid).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Batch Validation (from edge-cases) ───────────────────────────
|
||||
|
||||
describe('validateBatch method implementation', () => {
|
||||
it('should validate multiple configs in batch if method exists', () => {
|
||||
const configs = [{ nodeType: 'nodes-base.test', config: { field: 'value1' }, properties: [] as any[] }, { nodeType: 'nodes-base.test', config: { field: 'value2' }, properties: [] as any[] }];
|
||||
if ('validateBatch' in ConfigValidator) { expect((ConfigValidator as any).validateBatch(configs)).toHaveLength(2); }
|
||||
else { expect(configs.map(c => ConfigValidator.validate(c.nodeType, c.config, c.properties))).toHaveLength(2); }
|
||||
});
|
||||
});
|
||||
|
||||
// ─── HTTP Request Node (from node-specific) ──────────────────────
|
||||
|
||||
describe('HTTP Request node validation', () => {
|
||||
it('should perform HTTP Request specific validation', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.httpRequest', { method: 'POST', url: 'invalid-url', sendBody: false }, [{ name: 'method', type: 'options' }, { name: 'url', type: 'string' }, { name: 'sendBody', type: 'boolean' }]);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors).toHaveLength(1);
|
||||
expect(result.errors[0]).toMatchObject({ type: 'invalid_value', property: 'url', message: 'URL must start with http:// or https://' });
|
||||
expect(result.warnings).toHaveLength(1);
|
||||
expect(result.warnings[0]).toMatchObject({ type: 'missing_common', property: 'sendBody', message: 'POST requests typically send a body' });
|
||||
expect(result.autofix).toMatchObject({ sendBody: true, contentType: 'json' });
|
||||
});
|
||||
it('should validate JSON in HTTP Request body', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.httpRequest', { method: 'POST', url: 'https://api.example.com', contentType: 'json', body: '{"invalid": json}' }, [{ name: 'method', type: 'options' }, { name: 'url', type: 'string' }, { name: 'contentType', type: 'options' }, { name: 'body', type: 'string' }]);
|
||||
expect(result.errors.some(e => e.property === 'body' && e.message.includes('Invalid JSON')));
|
||||
});
|
||||
it('should handle webhook-specific validation', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.webhook', { httpMethod: 'GET', path: 'webhook-endpoint' }, [{ name: 'httpMethod', type: 'options' }, { name: 'path', type: 'string' }]);
|
||||
expect(result.warnings.some(w => w.property === 'path' && w.message.includes('should start with /')));
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Code Node (from node-specific) ──────────────────────────────
|
||||
|
||||
describe('Code node validation', () => {
|
||||
it('should validate Code node configurations', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'javascript', jsCode: '' }, [{ name: 'language', type: 'options' }, { name: 'jsCode', type: 'string' }]);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors).toHaveLength(1);
|
||||
expect(result.errors[0]).toMatchObject({ type: 'missing_required', property: 'jsCode', message: 'Code cannot be empty' });
|
||||
});
|
||||
it('should validate JavaScript syntax in Code node', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'javascript', jsCode: 'const data = { foo: "bar" };\nif (data.foo {\n return [{json: data}];\n}' }, [{ name: 'language', type: 'options' }, { name: 'jsCode', type: 'string' }]);
|
||||
expect(result.errors.some(e => e.message.includes('Unbalanced')));
|
||||
expect(result.warnings).toHaveLength(1);
|
||||
});
|
||||
it('should validate n8n-specific patterns in Code node', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'javascript', jsCode: 'const processedData = items.map(item => ({...item.json, processed: true}));' }, [{ name: 'language', type: 'options' }, { name: 'jsCode', type: 'string' }]);
|
||||
expect(result.warnings.some(w => w.type === 'missing_common' && w.message.includes('No return statement found'))).toBe(true);
|
||||
});
|
||||
it('should handle empty code in Code node', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'javascript', jsCode: ' \n \t \n ' }, [{ name: 'language', type: 'options' }, { name: 'jsCode', type: 'string' }]);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e => e.type === 'missing_required' && e.message.includes('Code cannot be empty'))).toBe(true);
|
||||
});
|
||||
it('should validate complex return patterns in Code node', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'javascript', jsCode: 'return ["string1", "string2", "string3"];' }, [{ name: 'language', type: 'options' }, { name: 'jsCode', type: 'string' }]);
|
||||
expect(result.warnings.some(w => w.type === 'invalid_value' && w.message.includes('Items must be objects with json property'))).toBe(true);
|
||||
});
|
||||
it('should validate Code node with $helpers usage', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'javascript', jsCode: 'const workflow = $helpers.getWorkflowStaticData();\nworkflow.counter = (workflow.counter || 0) + 1;\nreturn [{json: {count: workflow.counter}}];' }, [{ name: 'language', type: 'options' }, { name: 'jsCode', type: 'string' }]);
|
||||
expect(result.warnings.some(w => w.type === 'best_practice' && w.message.includes('$helpers is only available in Code nodes'))).toBe(true);
|
||||
});
|
||||
it('should detect incorrect $helpers.getWorkflowStaticData usage', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'javascript', jsCode: 'const data = $helpers.getWorkflowStaticData;\nreturn [{json: {data}}];' }, [{ name: 'language', type: 'options' }, { name: 'jsCode', type: 'string' }]);
|
||||
expect(result.errors.some(e => e.type === 'invalid_value' && e.message.includes('getWorkflowStaticData requires parentheses'))).toBe(true);
|
||||
});
|
||||
it('should validate console.log usage', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'javascript', jsCode: "console.log('Debug info:', items);\nreturn items;" }, [{ name: 'language', type: 'options' }, { name: 'jsCode', type: 'string' }]);
|
||||
expect(result.warnings.some(w => w.type === 'best_practice' && w.message.includes('console.log output appears in n8n execution logs'))).toBe(true);
|
||||
});
|
||||
it('should validate $json usage warning', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'javascript', jsCode: 'const data = $json.myField;\nreturn [{json: {processed: data}}];' }, [{ name: 'language', type: 'options' }, { name: 'jsCode', type: 'string' }]);
|
||||
expect(result.warnings.some(w => w.type === 'best_practice' && w.message.includes('$json only works in "Run Once for Each Item" mode'))).toBe(true);
|
||||
});
|
||||
it('should not warn about properties for Code nodes', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'javascript', jsCode: 'return items;', unusedProperty: 'test' }, [{ name: 'language', type: 'options' }, { name: 'jsCode', type: 'string' }]);
|
||||
expect(result.warnings.some(w => w.type === 'inefficient' && w.property === 'unusedProperty')).toBe(false);
|
||||
});
|
||||
it('should suggest error handling for complex code', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'javascript', jsCode: "const apiUrl = items[0].json.url;\nconst response = await fetch(apiUrl);\nconst data = await response.json();\nreturn [{json: data}];" }, [{ name: 'language', type: 'options' }, { name: 'jsCode', type: 'string' }]);
|
||||
expect(result.suggestions.some(s => s.includes('Consider adding error handling')));
|
||||
});
|
||||
it('should suggest error handling for non-trivial code', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'javascript', jsCode: Array(10).fill('const x = 1;').join('\n') + '\nreturn items;' }, [{ name: 'language', type: 'options' }, { name: 'jsCode', type: 'string' }]);
|
||||
expect(result.suggestions.some(s => s.includes('error handling')));
|
||||
});
|
||||
it('should validate async operations without await', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'javascript', jsCode: "const promise = fetch('https://api.example.com');\nreturn [{json: {data: promise}}];" }, [{ name: 'language', type: 'options' }, { name: 'jsCode', type: 'string' }]);
|
||||
expect(result.warnings.some(w => w.type === 'best_practice' && w.message.includes('Async operation without await'))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Python Code Node (from node-specific) ──────────────────────
|
||||
|
||||
describe('Python Code node validation', () => {
|
||||
it('should validate Python code syntax', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'python', pythonCode: 'def process_data():\n return [{"json": {"test": True}]' }, [{ name: 'language', type: 'options' }, { name: 'pythonCode', type: 'string' }]);
|
||||
expect(result.errors.some(e => e.type === 'syntax_error' && e.message.includes('Unmatched bracket'))).toBe(true);
|
||||
});
|
||||
it('should detect mixed indentation in Python code', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'python', pythonCode: 'def process():\n x = 1\n\ty = 2\n return [{"json": {"x": x, "y": y}}]' }, [{ name: 'language', type: 'options' }, { name: 'pythonCode', type: 'string' }]);
|
||||
expect(result.errors.some(e => e.type === 'syntax_error' && e.message.includes('Mixed indentation'))).toBe(true);
|
||||
});
|
||||
it('should warn about incorrect n8n return patterns', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'python', pythonCode: 'result = {"data": "value"}\nreturn result' }, [{ name: 'language', type: 'options' }, { name: 'pythonCode', type: 'string' }]);
|
||||
expect(result.warnings.some(w => w.type === 'invalid_value' && w.message.includes('Must return array of objects with json key'))).toBe(true);
|
||||
});
|
||||
it('should warn about using external libraries in Python code', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'python', pythonCode: 'import pandas as pd\nimport requests\ndf = pd.DataFrame(items)\nresponse = requests.get("https://api.example.com")\nreturn [{"json": {"data": response.json()}}]' }, [{ name: 'language', type: 'options' }, { name: 'pythonCode', type: 'string' }]);
|
||||
expect(result.warnings.some(w => w.type === 'invalid_value' && w.message.includes('External libraries not available'))).toBe(true);
|
||||
});
|
||||
it('should validate Python code with print statements', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.code', { language: 'python', pythonCode: 'print("Debug:", items)\nprocessed = []\nfor item in items:\n print(f"Processing: {item}")\n processed.append({"json": item["json"]})\nreturn processed' }, [{ name: 'language', type: 'options' }, { name: 'pythonCode', type: 'string' }]);
|
||||
expect(result.warnings.some(w => w.type === 'best_practice' && w.message.includes('print() output appears in n8n execution logs'))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Database Node (from node-specific, non-security) ────────────
|
||||
|
||||
describe('Database node validation', () => {
|
||||
it('should validate SQL SELECT * performance warning', () => {
|
||||
const result = ConfigValidator.validate('nodes-base.postgres', { query: 'SELECT * FROM large_table WHERE status = "active"' }, [{ name: 'query', type: 'string' }]);
|
||||
expect(result.suggestions.some(s => s.includes('Consider selecting specific columns'))).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,714 +0,0 @@
|
||||
import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest';
|
||||
import { EnhancedConfigValidator } from '@/services/enhanced-config-validator';
|
||||
import { ResourceSimilarityService } from '@/services/resource-similarity-service';
|
||||
import { OperationSimilarityService } from '@/services/operation-similarity-service';
|
||||
import { NodeRepository } from '@/database/node-repository';
|
||||
|
||||
// Mock similarity services
|
||||
vi.mock('@/services/resource-similarity-service');
|
||||
vi.mock('@/services/operation-similarity-service');
|
||||
|
||||
describe('EnhancedConfigValidator - Integration Tests', () => {
|
||||
let mockResourceService: any;
|
||||
let mockOperationService: any;
|
||||
let mockRepository: any;
|
||||
|
||||
beforeEach(() => {
|
||||
mockRepository = {
|
||||
getNode: vi.fn(),
|
||||
getNodeOperations: vi.fn().mockReturnValue([]),
|
||||
getNodeResources: vi.fn().mockReturnValue([]),
|
||||
getOperationsForResource: vi.fn().mockReturnValue([]),
|
||||
getDefaultOperationForResource: vi.fn().mockReturnValue(undefined),
|
||||
getNodePropertyDefaults: vi.fn().mockReturnValue({})
|
||||
};
|
||||
|
||||
mockResourceService = {
|
||||
findSimilarResources: vi.fn().mockReturnValue([])
|
||||
};
|
||||
|
||||
mockOperationService = {
|
||||
findSimilarOperations: vi.fn().mockReturnValue([])
|
||||
};
|
||||
|
||||
// Mock the constructors to return our mock services
|
||||
vi.mocked(ResourceSimilarityService).mockImplementation(() => mockResourceService);
|
||||
vi.mocked(OperationSimilarityService).mockImplementation(() => mockOperationService);
|
||||
|
||||
// Initialize the similarity services (this will create the service instances)
|
||||
EnhancedConfigValidator.initializeSimilarityServices(mockRepository);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('similarity service integration', () => {
|
||||
it('should initialize similarity services when initializeSimilarityServices is called', () => {
|
||||
// Services should be created when initializeSimilarityServices was called in beforeEach
|
||||
expect(ResourceSimilarityService).toHaveBeenCalled();
|
||||
expect(OperationSimilarityService).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should use resource similarity service for invalid resource errors', () => {
|
||||
const config = {
|
||||
resource: 'invalidResource',
|
||||
operation: 'send'
|
||||
};
|
||||
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
options: [
|
||||
{ value: 'message', name: 'Message' },
|
||||
{ value: 'channel', name: 'Channel' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'operation',
|
||||
type: 'options',
|
||||
required: true,
|
||||
displayOptions: {
|
||||
show: {
|
||||
resource: ['message']
|
||||
}
|
||||
},
|
||||
options: [
|
||||
{ value: 'send', name: 'Send Message' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
// Mock resource similarity suggestions
|
||||
mockResourceService.findSimilarResources.mockReturnValue([
|
||||
{
|
||||
value: 'message',
|
||||
confidence: 0.8,
|
||||
reason: 'Similar resource name',
|
||||
availableOperations: ['send', 'update']
|
||||
}
|
||||
]);
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.slack',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(mockResourceService.findSimilarResources).toHaveBeenCalledWith(
|
||||
'nodes-base.slack',
|
||||
'invalidResource',
|
||||
expect.any(Number)
|
||||
);
|
||||
|
||||
// Should have suggestions in the result
|
||||
expect(result.suggestions).toBeDefined();
|
||||
expect(result.suggestions.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should use operation similarity service for invalid operation errors', () => {
|
||||
const config = {
|
||||
resource: 'message',
|
||||
operation: 'invalidOperation'
|
||||
};
|
||||
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
options: [
|
||||
{ value: 'message', name: 'Message' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'operation',
|
||||
type: 'options',
|
||||
required: true,
|
||||
displayOptions: {
|
||||
show: {
|
||||
resource: ['message']
|
||||
}
|
||||
},
|
||||
options: [
|
||||
{ value: 'send', name: 'Send Message' },
|
||||
{ value: 'update', name: 'Update Message' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
// Mock operation similarity suggestions
|
||||
mockOperationService.findSimilarOperations.mockReturnValue([
|
||||
{
|
||||
value: 'send',
|
||||
confidence: 0.9,
|
||||
reason: 'Very similar - likely a typo',
|
||||
resource: 'message'
|
||||
}
|
||||
]);
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.slack',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(mockOperationService.findSimilarOperations).toHaveBeenCalledWith(
|
||||
'nodes-base.slack',
|
||||
'invalidOperation',
|
||||
'message',
|
||||
expect.any(Number)
|
||||
);
|
||||
|
||||
// Should have suggestions in the result
|
||||
expect(result.suggestions).toBeDefined();
|
||||
expect(result.suggestions.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should handle similarity service errors gracefully', () => {
|
||||
const config = {
|
||||
resource: 'invalidResource',
|
||||
operation: 'send'
|
||||
};
|
||||
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
options: [
|
||||
{ value: 'message', name: 'Message' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
// Mock service to throw error
|
||||
mockResourceService.findSimilarResources.mockImplementation(() => {
|
||||
throw new Error('Service error');
|
||||
});
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.slack',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
// Should not crash and still provide basic validation
|
||||
expect(result).toBeDefined();
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should not call similarity services for valid configurations', () => {
|
||||
// Mock repository to return valid resources for this test
|
||||
mockRepository.getNodeResources.mockReturnValue([
|
||||
{ value: 'message', name: 'Message' },
|
||||
{ value: 'channel', name: 'Channel' }
|
||||
]);
|
||||
// Mock getNodeOperations to return valid operations
|
||||
mockRepository.getNodeOperations.mockReturnValue([
|
||||
{ value: 'send', name: 'Send Message' }
|
||||
]);
|
||||
|
||||
const config = {
|
||||
resource: 'message',
|
||||
operation: 'send',
|
||||
channel: '#general', // Add required field for Slack send
|
||||
text: 'Test message' // Add required field for Slack send
|
||||
};
|
||||
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
options: [
|
||||
{ value: 'message', name: 'Message' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'operation',
|
||||
type: 'options',
|
||||
required: true,
|
||||
displayOptions: {
|
||||
show: {
|
||||
resource: ['message']
|
||||
}
|
||||
},
|
||||
options: [
|
||||
{ value: 'send', name: 'Send Message' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.slack',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
// Should not call similarity services for valid config
|
||||
expect(mockResourceService.findSimilarResources).not.toHaveBeenCalled();
|
||||
expect(mockOperationService.findSimilarOperations).not.toHaveBeenCalled();
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should limit suggestion count when calling similarity services', () => {
|
||||
const config = {
|
||||
resource: 'invalidResource'
|
||||
};
|
||||
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
options: [
|
||||
{ value: 'message', name: 'Message' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.slack',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(mockResourceService.findSimilarResources).toHaveBeenCalledWith(
|
||||
'nodes-base.slack',
|
||||
'invalidResource',
|
||||
3 // Should limit to 3 suggestions
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('error enhancement with suggestions', () => {
|
||||
it('should enhance resource validation errors with suggestions', () => {
|
||||
const config = {
|
||||
resource: 'msgs' // Typo for 'message'
|
||||
};
|
||||
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
options: [
|
||||
{ value: 'message', name: 'Message' },
|
||||
{ value: 'channel', name: 'Channel' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
// Mock high-confidence suggestion
|
||||
mockResourceService.findSimilarResources.mockReturnValue([
|
||||
{
|
||||
value: 'message',
|
||||
confidence: 0.85,
|
||||
reason: 'Very similar - likely a typo',
|
||||
availableOperations: ['send', 'update', 'delete']
|
||||
}
|
||||
]);
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.slack',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
// Should have enhanced error with suggestion
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
expect(resourceError).toBeDefined();
|
||||
expect(resourceError!.suggestion).toBeDefined();
|
||||
expect(resourceError!.suggestion).toContain('message');
|
||||
});
|
||||
|
||||
it('should enhance operation validation errors with suggestions', () => {
|
||||
const config = {
|
||||
resource: 'message',
|
||||
operation: 'sned' // Typo for 'send'
|
||||
};
|
||||
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
options: [
|
||||
{ value: 'message', name: 'Message' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'operation',
|
||||
type: 'options',
|
||||
required: true,
|
||||
displayOptions: {
|
||||
show: {
|
||||
resource: ['message']
|
||||
}
|
||||
},
|
||||
options: [
|
||||
{ value: 'send', name: 'Send Message' },
|
||||
{ value: 'update', name: 'Update Message' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
// Mock high-confidence suggestion
|
||||
mockOperationService.findSimilarOperations.mockReturnValue([
|
||||
{
|
||||
value: 'send',
|
||||
confidence: 0.9,
|
||||
reason: 'Almost exact match - likely a typo',
|
||||
resource: 'message',
|
||||
description: 'Send Message'
|
||||
}
|
||||
]);
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.slack',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
// Should have enhanced error with suggestion
|
||||
const operationError = result.errors.find(e => e.property === 'operation');
|
||||
expect(operationError).toBeDefined();
|
||||
expect(operationError!.suggestion).toBeDefined();
|
||||
expect(operationError!.suggestion).toContain('send');
|
||||
});
|
||||
|
||||
it('should not enhance errors when no good suggestions are available', () => {
|
||||
const config = {
|
||||
resource: 'completelyWrongValue'
|
||||
};
|
||||
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
options: [
|
||||
{ value: 'message', name: 'Message' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
// Mock low-confidence suggestions
|
||||
mockResourceService.findSimilarResources.mockReturnValue([
|
||||
{
|
||||
value: 'message',
|
||||
confidence: 0.2, // Too low confidence
|
||||
reason: 'Possibly related resource'
|
||||
}
|
||||
]);
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.slack',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
// Should not enhance error due to low confidence
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
expect(resourceError).toBeDefined();
|
||||
expect(resourceError!.suggestion).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should provide multiple operation suggestions when resource is known', () => {
|
||||
const config = {
|
||||
resource: 'message',
|
||||
operation: 'invalidOp'
|
||||
};
|
||||
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
options: [
|
||||
{ value: 'message', name: 'Message' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'operation',
|
||||
type: 'options',
|
||||
required: true,
|
||||
displayOptions: {
|
||||
show: {
|
||||
resource: ['message']
|
||||
}
|
||||
},
|
||||
options: [
|
||||
{ value: 'send', name: 'Send Message' },
|
||||
{ value: 'update', name: 'Update Message' },
|
||||
{ value: 'delete', name: 'Delete Message' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
// Mock multiple suggestions
|
||||
mockOperationService.findSimilarOperations.mockReturnValue([
|
||||
{ value: 'send', confidence: 0.7, reason: 'Similar operation' },
|
||||
{ value: 'update', confidence: 0.6, reason: 'Similar operation' },
|
||||
{ value: 'delete', confidence: 0.5, reason: 'Similar operation' }
|
||||
]);
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.slack',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
// Should include multiple suggestions in the result
|
||||
expect(result.suggestions.length).toBeGreaterThan(2);
|
||||
const operationSuggestions = result.suggestions.filter(s =>
|
||||
s.includes('send') || s.includes('update') || s.includes('delete')
|
||||
);
|
||||
expect(operationSuggestions.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('confidence thresholds and filtering', () => {
|
||||
it('should only use high confidence resource suggestions', () => {
|
||||
const config = {
|
||||
resource: 'invalidResource'
|
||||
};
|
||||
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
options: [
|
||||
{ value: 'message', name: 'Message' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
// Mock mixed confidence suggestions
|
||||
mockResourceService.findSimilarResources.mockReturnValue([
|
||||
{ value: 'message1', confidence: 0.9, reason: 'High confidence' },
|
||||
{ value: 'message2', confidence: 0.4, reason: 'Low confidence' },
|
||||
{ value: 'message3', confidence: 0.7, reason: 'Medium confidence' }
|
||||
]);
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.slack',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
// Should only use suggestions above threshold
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
expect(resourceError?.suggestion).toBeDefined();
|
||||
// Should prefer high confidence suggestion
|
||||
expect(resourceError!.suggestion).toContain('message1');
|
||||
});
|
||||
|
||||
it('should only use high confidence operation suggestions', () => {
|
||||
const config = {
|
||||
resource: 'message',
|
||||
operation: 'invalidOperation'
|
||||
};
|
||||
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
options: [
|
||||
{ value: 'message', name: 'Message' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'operation',
|
||||
type: 'options',
|
||||
required: true,
|
||||
displayOptions: {
|
||||
show: {
|
||||
resource: ['message']
|
||||
}
|
||||
},
|
||||
options: [
|
||||
{ value: 'send', name: 'Send Message' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
// Mock mixed confidence suggestions
|
||||
mockOperationService.findSimilarOperations.mockReturnValue([
|
||||
{ value: 'send', confidence: 0.95, reason: 'Very high confidence' },
|
||||
{ value: 'post', confidence: 0.3, reason: 'Low confidence' }
|
||||
]);
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.slack',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
// Should only use high confidence suggestion
|
||||
const operationError = result.errors.find(e => e.property === 'operation');
|
||||
expect(operationError?.suggestion).toBeDefined();
|
||||
expect(operationError!.suggestion).toContain('send');
|
||||
expect(operationError!.suggestion).not.toContain('post');
|
||||
});
|
||||
});
|
||||
|
||||
describe('integration with existing validation logic', () => {
|
||||
it('should work with minimal validation mode', () => {
|
||||
// Mock repository to return empty resources
|
||||
mockRepository.getNodeResources.mockReturnValue([]);
|
||||
|
||||
const config = {
|
||||
resource: 'invalidResource'
|
||||
};
|
||||
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
options: [
|
||||
{ value: 'message', name: 'Message' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
mockResourceService.findSimilarResources.mockReturnValue([
|
||||
{ value: 'message', confidence: 0.8, reason: 'Similar' }
|
||||
]);
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.slack',
|
||||
config,
|
||||
properties,
|
||||
'minimal',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
// Should still enhance errors in minimal mode
|
||||
expect(mockResourceService.findSimilarResources).toHaveBeenCalled();
|
||||
expect(result.errors.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should work with strict validation profile', () => {
|
||||
// Mock repository to return valid resource but no operations
|
||||
mockRepository.getNodeResources.mockReturnValue([
|
||||
{ value: 'message', name: 'Message' }
|
||||
]);
|
||||
mockRepository.getOperationsForResource.mockReturnValue([]);
|
||||
|
||||
const config = {
|
||||
resource: 'message',
|
||||
operation: 'invalidOp'
|
||||
};
|
||||
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
options: [
|
||||
{ value: 'message', name: 'Message' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'operation',
|
||||
type: 'options',
|
||||
required: true,
|
||||
displayOptions: {
|
||||
show: {
|
||||
resource: ['message']
|
||||
}
|
||||
},
|
||||
options: [
|
||||
{ value: 'send', name: 'Send Message' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
mockOperationService.findSimilarOperations.mockReturnValue([
|
||||
{ value: 'send', confidence: 0.8, reason: 'Similar' }
|
||||
]);
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.slack',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'strict'
|
||||
);
|
||||
|
||||
// Should enhance errors regardless of profile
|
||||
expect(mockOperationService.findSimilarOperations).toHaveBeenCalled();
|
||||
const operationError = result.errors.find(e => e.property === 'operation');
|
||||
expect(operationError?.suggestion).toBeDefined();
|
||||
});
|
||||
|
||||
it('should preserve original error properties when enhancing', () => {
|
||||
const config = {
|
||||
resource: 'invalidResource'
|
||||
};
|
||||
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
options: [
|
||||
{ value: 'message', name: 'Message' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
mockResourceService.findSimilarResources.mockReturnValue([
|
||||
{ value: 'message', confidence: 0.8, reason: 'Similar' }
|
||||
]);
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.slack',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
|
||||
// Should preserve original error properties
|
||||
expect(resourceError?.type).toBeDefined();
|
||||
expect(resourceError?.property).toBe('resource');
|
||||
expect(resourceError?.message).toBeDefined();
|
||||
|
||||
// Should add suggestion without overriding other properties
|
||||
expect(resourceError?.suggestion).toBeDefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,421 +0,0 @@
|
||||
/**
|
||||
* Tests for EnhancedConfigValidator operation and resource validation
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { EnhancedConfigValidator } from '../../../src/services/enhanced-config-validator';
|
||||
import { NodeRepository } from '../../../src/database/node-repository';
|
||||
import { createTestDatabase } from '../../utils/database-utils';
|
||||
|
||||
describe('EnhancedConfigValidator - Operation and Resource Validation', () => {
|
||||
let repository: NodeRepository;
|
||||
let testDb: any;
|
||||
|
||||
beforeEach(async () => {
|
||||
testDb = await createTestDatabase();
|
||||
repository = testDb.nodeRepository;
|
||||
|
||||
// Initialize similarity services
|
||||
EnhancedConfigValidator.initializeSimilarityServices(repository);
|
||||
|
||||
// Add Google Drive test node
|
||||
const googleDriveNode = {
|
||||
nodeType: 'nodes-base.googleDrive',
|
||||
packageName: 'n8n-nodes-base',
|
||||
displayName: 'Google Drive',
|
||||
description: 'Access Google Drive',
|
||||
category: 'transform',
|
||||
style: 'declarative' as const,
|
||||
isAITool: false,
|
||||
isTrigger: false,
|
||||
isWebhook: false,
|
||||
isVersioned: true,
|
||||
version: '1',
|
||||
properties: [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
options: [
|
||||
{ value: 'file', name: 'File' },
|
||||
{ value: 'folder', name: 'Folder' },
|
||||
{ value: 'fileFolder', name: 'File & Folder' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'operation',
|
||||
type: 'options',
|
||||
required: true,
|
||||
displayOptions: {
|
||||
show: {
|
||||
resource: ['file']
|
||||
}
|
||||
},
|
||||
options: [
|
||||
{ value: 'copy', name: 'Copy' },
|
||||
{ value: 'delete', name: 'Delete' },
|
||||
{ value: 'download', name: 'Download' },
|
||||
{ value: 'list', name: 'List' },
|
||||
{ value: 'share', name: 'Share' },
|
||||
{ value: 'update', name: 'Update' },
|
||||
{ value: 'upload', name: 'Upload' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'operation',
|
||||
type: 'options',
|
||||
required: true,
|
||||
displayOptions: {
|
||||
show: {
|
||||
resource: ['folder']
|
||||
}
|
||||
},
|
||||
options: [
|
||||
{ value: 'create', name: 'Create' },
|
||||
{ value: 'delete', name: 'Delete' },
|
||||
{ value: 'share', name: 'Share' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'operation',
|
||||
type: 'options',
|
||||
required: true,
|
||||
displayOptions: {
|
||||
show: {
|
||||
resource: ['fileFolder']
|
||||
}
|
||||
},
|
||||
options: [
|
||||
{ value: 'search', name: 'Search' }
|
||||
]
|
||||
}
|
||||
],
|
||||
operations: [],
|
||||
credentials: []
|
||||
};
|
||||
|
||||
repository.saveNode(googleDriveNode);
|
||||
|
||||
// Add Slack test node
|
||||
const slackNode = {
|
||||
nodeType: 'nodes-base.slack',
|
||||
packageName: 'n8n-nodes-base',
|
||||
displayName: 'Slack',
|
||||
description: 'Send messages to Slack',
|
||||
category: 'communication',
|
||||
style: 'declarative' as const,
|
||||
isAITool: false,
|
||||
isTrigger: false,
|
||||
isWebhook: false,
|
||||
isVersioned: true,
|
||||
version: '2',
|
||||
properties: [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'options',
|
||||
required: true,
|
||||
options: [
|
||||
{ value: 'channel', name: 'Channel' },
|
||||
{ value: 'message', name: 'Message' },
|
||||
{ value: 'user', name: 'User' }
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'operation',
|
||||
type: 'options',
|
||||
required: true,
|
||||
displayOptions: {
|
||||
show: {
|
||||
resource: ['message']
|
||||
}
|
||||
},
|
||||
options: [
|
||||
{ value: 'send', name: 'Send' },
|
||||
{ value: 'update', name: 'Update' },
|
||||
{ value: 'delete', name: 'Delete' }
|
||||
]
|
||||
}
|
||||
],
|
||||
operations: [],
|
||||
credentials: []
|
||||
};
|
||||
|
||||
repository.saveNode(slackNode);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
// Clean up database
|
||||
if (testDb) {
|
||||
await testDb.cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
describe('Invalid Operations', () => {
|
||||
it('should detect invalid operation "listFiles" for Google Drive', () => {
|
||||
const config = {
|
||||
resource: 'fileFolder',
|
||||
operation: 'listFiles'
|
||||
};
|
||||
|
||||
const node = repository.getNode('nodes-base.googleDrive');
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.googleDrive',
|
||||
config,
|
||||
node.properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
|
||||
// Should have an error for invalid operation
|
||||
const operationError = result.errors.find(e => e.property === 'operation');
|
||||
expect(operationError).toBeDefined();
|
||||
expect(operationError!.message).toContain('Invalid operation "listFiles"');
|
||||
expect(operationError!.message).toContain('Did you mean');
|
||||
expect(operationError!.fix).toContain('search'); // Should suggest 'search' for fileFolder resource
|
||||
});
|
||||
|
||||
it('should provide suggestions for typos in operations', () => {
|
||||
const config = {
|
||||
resource: 'file',
|
||||
operation: 'downlod' // Typo: missing 'a'
|
||||
};
|
||||
|
||||
const node = repository.getNode('nodes-base.googleDrive');
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.googleDrive',
|
||||
config,
|
||||
node.properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
|
||||
const operationError = result.errors.find(e => e.property === 'operation');
|
||||
expect(operationError).toBeDefined();
|
||||
expect(operationError!.message).toContain('Did you mean "download"');
|
||||
});
|
||||
|
||||
it('should list valid operations for the resource', () => {
|
||||
const config = {
|
||||
resource: 'folder',
|
||||
operation: 'upload' // Invalid for folder resource
|
||||
};
|
||||
|
||||
const node = repository.getNode('nodes-base.googleDrive');
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.googleDrive',
|
||||
config,
|
||||
node.properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
|
||||
const operationError = result.errors.find(e => e.property === 'operation');
|
||||
expect(operationError).toBeDefined();
|
||||
expect(operationError!.fix).toContain('Valid operations for resource "folder"');
|
||||
expect(operationError!.fix).toContain('create');
|
||||
expect(operationError!.fix).toContain('delete');
|
||||
expect(operationError!.fix).toContain('share');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Invalid Resources', () => {
|
||||
it('should detect plural resource "files" and suggest singular', () => {
|
||||
const config = {
|
||||
resource: 'files', // Should be 'file'
|
||||
operation: 'list'
|
||||
};
|
||||
|
||||
const node = repository.getNode('nodes-base.googleDrive');
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.googleDrive',
|
||||
config,
|
||||
node.properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
expect(resourceError).toBeDefined();
|
||||
expect(resourceError!.message).toContain('Invalid resource "files"');
|
||||
expect(resourceError!.message).toContain('Did you mean "file"');
|
||||
expect(resourceError!.fix).toContain('Use singular');
|
||||
});
|
||||
|
||||
it('should suggest similar resources for typos', () => {
|
||||
const config = {
|
||||
resource: 'flie', // Typo
|
||||
operation: 'download'
|
||||
};
|
||||
|
||||
const node = repository.getNode('nodes-base.googleDrive');
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.googleDrive',
|
||||
config,
|
||||
node.properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
expect(resourceError).toBeDefined();
|
||||
expect(resourceError!.message).toContain('Did you mean "file"');
|
||||
});
|
||||
|
||||
it('should list valid resources when no match found', () => {
|
||||
const config = {
|
||||
resource: 'document', // Not a valid resource
|
||||
operation: 'create'
|
||||
};
|
||||
|
||||
const node = repository.getNode('nodes-base.googleDrive');
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.googleDrive',
|
||||
config,
|
||||
node.properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
expect(resourceError).toBeDefined();
|
||||
expect(resourceError!.fix).toContain('Valid resources:');
|
||||
expect(resourceError!.fix).toContain('file');
|
||||
expect(resourceError!.fix).toContain('folder');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Combined Resource and Operation Validation', () => {
|
||||
it('should validate both resource and operation together', () => {
|
||||
const config = {
|
||||
resource: 'files', // Invalid: should be singular
|
||||
operation: 'listFiles' // Invalid: should be 'list' or 'search'
|
||||
};
|
||||
|
||||
const node = repository.getNode('nodes-base.googleDrive');
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.googleDrive',
|
||||
config,
|
||||
node.properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.length).toBeGreaterThanOrEqual(2);
|
||||
|
||||
// Should have error for resource
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
expect(resourceError).toBeDefined();
|
||||
expect(resourceError!.message).toContain('files');
|
||||
|
||||
// Should have error for operation
|
||||
const operationError = result.errors.find(e => e.property === 'operation');
|
||||
expect(operationError).toBeDefined();
|
||||
expect(operationError!.message).toContain('listFiles');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Slack Node Validation', () => {
|
||||
it('should suggest "send" instead of "sendMessage"', () => {
|
||||
const config = {
|
||||
resource: 'message',
|
||||
operation: 'sendMessage' // Common mistake
|
||||
};
|
||||
|
||||
const node = repository.getNode('nodes-base.slack');
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.slack',
|
||||
config,
|
||||
node.properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
|
||||
const operationError = result.errors.find(e => e.property === 'operation');
|
||||
expect(operationError).toBeDefined();
|
||||
expect(operationError!.message).toContain('Did you mean "send"');
|
||||
});
|
||||
|
||||
it('should suggest singular "channel" instead of "channels"', () => {
|
||||
const config = {
|
||||
resource: 'channels', // Should be singular
|
||||
operation: 'create'
|
||||
};
|
||||
|
||||
const node = repository.getNode('nodes-base.slack');
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.slack',
|
||||
config,
|
||||
node.properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
expect(resourceError).toBeDefined();
|
||||
expect(resourceError!.message).toContain('Did you mean "channel"');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Valid Configurations', () => {
|
||||
it('should accept valid Google Drive configuration', () => {
|
||||
const config = {
|
||||
resource: 'file',
|
||||
operation: 'download'
|
||||
};
|
||||
|
||||
const node = repository.getNode('nodes-base.googleDrive');
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.googleDrive',
|
||||
config,
|
||||
node.properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
// Should not have errors for resource or operation
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
const operationError = result.errors.find(e => e.property === 'operation');
|
||||
expect(resourceError).toBeUndefined();
|
||||
expect(operationError).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should accept valid Slack configuration', () => {
|
||||
const config = {
|
||||
resource: 'message',
|
||||
operation: 'send'
|
||||
};
|
||||
|
||||
const node = repository.getNode('nodes-base.slack');
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.slack',
|
||||
config,
|
||||
node.properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
// Should not have errors for resource or operation
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
const operationError = result.errors.find(e => e.property === 'operation');
|
||||
expect(resourceError).toBeUndefined();
|
||||
expect(operationError).toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,684 +0,0 @@
|
||||
/**
|
||||
* Tests for EnhancedConfigValidator - Type Structure Validation
|
||||
*
|
||||
* Tests the integration of TypeStructureService into EnhancedConfigValidator
|
||||
* for validating complex types: filter, resourceMapper, assignmentCollection, resourceLocator
|
||||
*
|
||||
* @group unit
|
||||
* @group services
|
||||
* @group validation
|
||||
*/
|
||||
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import { EnhancedConfigValidator } from '@/services/enhanced-config-validator';
|
||||
|
||||
describe('EnhancedConfigValidator - Type Structure Validation', () => {
|
||||
describe('Filter Type Validation', () => {
|
||||
it('should validate valid filter configuration', () => {
|
||||
const config = {
|
||||
conditions: {
|
||||
combinator: 'and',
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
leftValue: '{{ $json.name }}',
|
||||
operator: { type: 'string', operation: 'equals' },
|
||||
rightValue: 'John',
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'conditions',
|
||||
type: 'filter',
|
||||
required: true,
|
||||
displayName: 'Conditions',
|
||||
default: {},
|
||||
},
|
||||
];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.filter',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should validate filter with multiple conditions', () => {
|
||||
const config = {
|
||||
conditions: {
|
||||
combinator: 'or',
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
leftValue: '{{ $json.age }}',
|
||||
operator: { type: 'number', operation: 'gt' },
|
||||
rightValue: 18,
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
leftValue: '{{ $json.country }}',
|
||||
operator: { type: 'string', operation: 'equals' },
|
||||
rightValue: 'US',
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'conditions', type: 'filter', required: true },
|
||||
];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.filter',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect missing combinator in filter', () => {
|
||||
const config = {
|
||||
conditions: {
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
operator: { type: 'string', operation: 'equals' },
|
||||
leftValue: 'test',
|
||||
rightValue: 'value',
|
||||
},
|
||||
],
|
||||
// Missing combinator
|
||||
},
|
||||
};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: true }];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.filter',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors).toContainEqual(
|
||||
expect.objectContaining({
|
||||
property: expect.stringMatching(/conditions/),
|
||||
type: 'invalid_configuration',
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should detect invalid combinator value', () => {
|
||||
const config = {
|
||||
conditions: {
|
||||
combinator: 'invalid', // Should be 'and' or 'or'
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
operator: { type: 'string', operation: 'equals' },
|
||||
leftValue: 'test',
|
||||
rightValue: 'value',
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: true }];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.filter',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Filter Operation Validation', () => {
|
||||
it('should validate string operations correctly', () => {
|
||||
const validOperations = [
|
||||
'equals',
|
||||
'notEquals',
|
||||
'contains',
|
||||
'notContains',
|
||||
'startsWith',
|
||||
'endsWith',
|
||||
'regex',
|
||||
];
|
||||
|
||||
for (const operation of validOperations) {
|
||||
const config = {
|
||||
conditions: {
|
||||
combinator: 'and',
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
operator: { type: 'string', operation },
|
||||
leftValue: 'test',
|
||||
rightValue: 'value',
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: true }];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.filter',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
}
|
||||
});
|
||||
|
||||
it('should reject invalid operation for string type', () => {
|
||||
const config = {
|
||||
conditions: {
|
||||
combinator: 'and',
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
operator: { type: 'string', operation: 'gt' }, // 'gt' is for numbers
|
||||
leftValue: 'test',
|
||||
rightValue: 'value',
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: true }];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.filter',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors).toContainEqual(
|
||||
expect.objectContaining({
|
||||
property: expect.stringContaining('operator.operation'),
|
||||
message: expect.stringContaining('not valid for type'),
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should validate number operations correctly', () => {
|
||||
const validOperations = ['equals', 'notEquals', 'gt', 'lt', 'gte', 'lte'];
|
||||
|
||||
for (const operation of validOperations) {
|
||||
const config = {
|
||||
conditions: {
|
||||
combinator: 'and',
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
operator: { type: 'number', operation },
|
||||
leftValue: 10,
|
||||
rightValue: 20,
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: true }];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.filter',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
}
|
||||
});
|
||||
|
||||
it('should reject string operations for number type', () => {
|
||||
const config = {
|
||||
conditions: {
|
||||
combinator: 'and',
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
operator: { type: 'number', operation: 'contains' }, // 'contains' is for strings
|
||||
leftValue: 10,
|
||||
rightValue: 20,
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: true }];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.filter',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
});
|
||||
|
||||
it('should validate boolean operations', () => {
|
||||
const config = {
|
||||
conditions: {
|
||||
combinator: 'and',
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
operator: { type: 'boolean', operation: 'true' },
|
||||
leftValue: '{{ $json.isActive }}',
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: true }];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.filter',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate dateTime operations', () => {
|
||||
const config = {
|
||||
conditions: {
|
||||
combinator: 'and',
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
operator: { type: 'dateTime', operation: 'after' },
|
||||
leftValue: '{{ $json.createdAt }}',
|
||||
rightValue: '2024-01-01',
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: true }];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.filter',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate array operations', () => {
|
||||
const config = {
|
||||
conditions: {
|
||||
combinator: 'and',
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
operator: { type: 'array', operation: 'contains' },
|
||||
leftValue: '{{ $json.tags }}',
|
||||
rightValue: 'urgent',
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: true }];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.filter',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('ResourceMapper Type Validation', () => {
|
||||
it('should validate valid resourceMapper configuration', () => {
|
||||
const config = {
|
||||
mapping: {
|
||||
mappingMode: 'defineBelow',
|
||||
value: {
|
||||
name: '{{ $json.fullName }}',
|
||||
email: '{{ $json.emailAddress }}',
|
||||
status: 'active',
|
||||
},
|
||||
},
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'mapping', type: 'resourceMapper', required: true },
|
||||
];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.httpRequest',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate autoMapInputData mode', () => {
|
||||
const config = {
|
||||
mapping: {
|
||||
mappingMode: 'autoMapInputData',
|
||||
value: {},
|
||||
},
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'mapping', type: 'resourceMapper', required: true },
|
||||
];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.httpRequest',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('AssignmentCollection Type Validation', () => {
|
||||
it('should validate valid assignmentCollection configuration', () => {
|
||||
const config = {
|
||||
assignments: {
|
||||
assignments: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'userName',
|
||||
value: '{{ $json.name }}',
|
||||
type: 'string',
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'userAge',
|
||||
value: 30,
|
||||
type: 'number',
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'assignments', type: 'assignmentCollection', required: true },
|
||||
];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.set',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect missing assignments array', () => {
|
||||
const config = {
|
||||
assignments: {
|
||||
// Missing assignments array
|
||||
},
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'assignments', type: 'assignmentCollection', required: true },
|
||||
];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.set',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('ResourceLocator Type Validation', () => {
|
||||
// TODO: Debug why resourceLocator tests fail - issue appears to be with base validator, not the new validation logic
|
||||
it.skip('should validate valid resourceLocator by ID', () => {
|
||||
const config = {
|
||||
resource: {
|
||||
mode: 'id',
|
||||
value: 'abc123',
|
||||
},
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'resourceLocator',
|
||||
required: true,
|
||||
displayName: 'Resource',
|
||||
default: { mode: 'list', value: '' },
|
||||
},
|
||||
];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.googleSheets',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
if (!result.valid) {
|
||||
console.log('DEBUG - ResourceLocator validation failed:');
|
||||
console.log('Errors:', JSON.stringify(result.errors, null, 2));
|
||||
}
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it.skip('should validate resourceLocator by URL', () => {
|
||||
const config = {
|
||||
resource: {
|
||||
mode: 'url',
|
||||
value: 'https://example.com/resource/123',
|
||||
},
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'resourceLocator',
|
||||
required: true,
|
||||
displayName: 'Resource',
|
||||
default: { mode: 'list', value: '' },
|
||||
},
|
||||
];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.googleSheets',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it.skip('should validate resourceLocator by list', () => {
|
||||
const config = {
|
||||
resource: {
|
||||
mode: 'list',
|
||||
value: 'item-from-dropdown',
|
||||
},
|
||||
};
|
||||
const properties = [
|
||||
{
|
||||
name: 'resource',
|
||||
type: 'resourceLocator',
|
||||
required: true,
|
||||
displayName: 'Resource',
|
||||
default: { mode: 'list', value: '' },
|
||||
},
|
||||
];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.googleSheets',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Edge Cases', () => {
|
||||
it('should handle null values gracefully', () => {
|
||||
const config = {
|
||||
conditions: null,
|
||||
};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: false }];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.filter',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
// Null is acceptable for non-required fields
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle undefined values gracefully', () => {
|
||||
const config = {};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: false }];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.filter',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle multiple special types in same config', () => {
|
||||
const config = {
|
||||
conditions: {
|
||||
combinator: 'and',
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
operator: { type: 'string', operation: 'equals' },
|
||||
leftValue: 'test',
|
||||
rightValue: 'value',
|
||||
},
|
||||
],
|
||||
},
|
||||
assignments: {
|
||||
assignments: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'result',
|
||||
value: 'processed',
|
||||
type: 'string',
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const properties = [
|
||||
{ name: 'conditions', type: 'filter', required: true },
|
||||
{ name: 'assignments', type: 'assignmentCollection', required: true },
|
||||
];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.custom',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'ai-friendly'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Validation Profiles', () => {
|
||||
it('should respect strict profile for type validation', () => {
|
||||
const config = {
|
||||
conditions: {
|
||||
combinator: 'and',
|
||||
conditions: [
|
||||
{
|
||||
id: '1',
|
||||
operator: { type: 'string', operation: 'gt' }, // Invalid operation
|
||||
leftValue: 'test',
|
||||
rightValue: 'value',
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: true }];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.filter',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'strict'
|
||||
);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.profile).toBe('strict');
|
||||
});
|
||||
|
||||
it('should respect minimal profile (less strict)', () => {
|
||||
const config = {
|
||||
conditions: {
|
||||
combinator: 'and',
|
||||
conditions: [], // Empty but valid
|
||||
},
|
||||
};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: true }];
|
||||
|
||||
const result = EnhancedConfigValidator.validateWithMode(
|
||||
'nodes-base.filter',
|
||||
config,
|
||||
properties,
|
||||
'operation',
|
||||
'minimal'
|
||||
);
|
||||
|
||||
expect(result.profile).toBe('minimal');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -2,7 +2,15 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { EnhancedConfigValidator, ValidationMode, ValidationProfile } from '@/services/enhanced-config-validator';
|
||||
import { ValidationError } from '@/services/config-validator';
|
||||
import { NodeSpecificValidators } from '@/services/node-specific-validators';
|
||||
import { ResourceSimilarityService } from '@/services/resource-similarity-service';
|
||||
import { OperationSimilarityService } from '@/services/operation-similarity-service';
|
||||
import { NodeRepository } from '@/database/node-repository';
|
||||
import { nodeFactory } from '@tests/fixtures/factories/node.factory';
|
||||
import { createTestDatabase } from '@tests/utils/database-utils';
|
||||
|
||||
// Mock similarity services
|
||||
vi.mock('@/services/resource-similarity-service');
|
||||
vi.mock('@/services/operation-similarity-service');
|
||||
|
||||
// Mock node-specific validators
|
||||
vi.mock('@/services/node-specific-validators', () => ({
|
||||
@@ -15,7 +23,8 @@ vi.mock('@/services/node-specific-validators', () => ({
|
||||
validateWebhook: vi.fn(),
|
||||
validatePostgres: vi.fn(),
|
||||
validateMySQL: vi.fn(),
|
||||
validateAIAgent: vi.fn()
|
||||
validateAIAgent: vi.fn(),
|
||||
validateSet: vi.fn()
|
||||
}
|
||||
}));
|
||||
|
||||
@@ -1168,4 +1177,506 @@ describe('EnhancedConfigValidator', () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Type Structure Validation (from enhanced-config-validator-type-structures) ───
|
||||
|
||||
describe('type structure validation', () => {
|
||||
describe('Filter Type Validation', () => {
|
||||
it('should validate valid filter configuration', () => {
|
||||
const config = {
|
||||
conditions: {
|
||||
combinator: 'and',
|
||||
conditions: [{ id: '1', leftValue: '{{ $json.name }}', operator: { type: 'string', operation: 'equals' }, rightValue: 'John' }],
|
||||
},
|
||||
};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: true, displayName: 'Conditions', default: {} }];
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.filter', config, properties, 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should validate filter with multiple conditions', () => {
|
||||
const config = {
|
||||
conditions: {
|
||||
combinator: 'or',
|
||||
conditions: [
|
||||
{ id: '1', leftValue: '{{ $json.age }}', operator: { type: 'number', operation: 'gt' }, rightValue: 18 },
|
||||
{ id: '2', leftValue: '{{ $json.country }}', operator: { type: 'string', operation: 'equals' }, rightValue: 'US' },
|
||||
],
|
||||
},
|
||||
};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: true }];
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.filter', config, properties, 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect missing combinator in filter', () => {
|
||||
const config = {
|
||||
conditions: {
|
||||
conditions: [{ id: '1', operator: { type: 'string', operation: 'equals' }, leftValue: 'test', rightValue: 'value' }],
|
||||
},
|
||||
};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: true }];
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.filter', config, properties, 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors).toContainEqual(expect.objectContaining({ property: expect.stringMatching(/conditions/), type: 'invalid_configuration' }));
|
||||
});
|
||||
|
||||
it('should detect invalid combinator value', () => {
|
||||
const config = {
|
||||
conditions: {
|
||||
combinator: 'invalid',
|
||||
conditions: [{ id: '1', operator: { type: 'string', operation: 'equals' }, leftValue: 'test', rightValue: 'value' }],
|
||||
},
|
||||
};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: true }];
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.filter', config, properties, 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Filter Operation Validation', () => {
|
||||
it('should validate string operations correctly', () => {
|
||||
for (const operation of ['equals', 'notEquals', 'contains', 'notContains', 'startsWith', 'endsWith', 'regex']) {
|
||||
const config = { conditions: { combinator: 'and', conditions: [{ id: '1', operator: { type: 'string', operation }, leftValue: 'test', rightValue: 'value' }] } };
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.filter', config, [{ name: 'conditions', type: 'filter', required: true }], 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(true);
|
||||
}
|
||||
});
|
||||
|
||||
it('should reject invalid operation for string type', () => {
|
||||
const config = { conditions: { combinator: 'and', conditions: [{ id: '1', operator: { type: 'string', operation: 'gt' }, leftValue: 'test', rightValue: 'value' }] } };
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.filter', config, [{ name: 'conditions', type: 'filter', required: true }], 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors).toContainEqual(expect.objectContaining({ property: expect.stringContaining('operator.operation'), message: expect.stringContaining('not valid for type') }));
|
||||
});
|
||||
|
||||
it('should validate number operations correctly', () => {
|
||||
for (const operation of ['equals', 'notEquals', 'gt', 'lt', 'gte', 'lte']) {
|
||||
const config = { conditions: { combinator: 'and', conditions: [{ id: '1', operator: { type: 'number', operation }, leftValue: 10, rightValue: 20 }] } };
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.filter', config, [{ name: 'conditions', type: 'filter', required: true }], 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(true);
|
||||
}
|
||||
});
|
||||
|
||||
it('should reject string operations for number type', () => {
|
||||
const config = { conditions: { combinator: 'and', conditions: [{ id: '1', operator: { type: 'number', operation: 'contains' }, leftValue: 10, rightValue: 20 }] } };
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.filter', config, [{ name: 'conditions', type: 'filter', required: true }], 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(false);
|
||||
});
|
||||
|
||||
it('should validate boolean operations', () => {
|
||||
const config = { conditions: { combinator: 'and', conditions: [{ id: '1', operator: { type: 'boolean', operation: 'true' }, leftValue: '{{ $json.isActive }}' }] } };
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.filter', config, [{ name: 'conditions', type: 'filter', required: true }], 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate dateTime operations', () => {
|
||||
const config = { conditions: { combinator: 'and', conditions: [{ id: '1', operator: { type: 'dateTime', operation: 'after' }, leftValue: '{{ $json.createdAt }}', rightValue: '2024-01-01' }] } };
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.filter', config, [{ name: 'conditions', type: 'filter', required: true }], 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate array operations', () => {
|
||||
const config = { conditions: { combinator: 'and', conditions: [{ id: '1', operator: { type: 'array', operation: 'contains' }, leftValue: '{{ $json.tags }}', rightValue: 'urgent' }] } };
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.filter', config, [{ name: 'conditions', type: 'filter', required: true }], 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('ResourceMapper Type Validation', () => {
|
||||
it('should validate valid resourceMapper configuration', () => {
|
||||
const config = { mapping: { mappingMode: 'defineBelow', value: { name: '{{ $json.fullName }}', email: '{{ $json.emailAddress }}', status: 'active' } } };
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.httpRequest', config, [{ name: 'mapping', type: 'resourceMapper', required: true }], 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate autoMapInputData mode', () => {
|
||||
const config = { mapping: { mappingMode: 'autoMapInputData', value: {} } };
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.httpRequest', config, [{ name: 'mapping', type: 'resourceMapper', required: true }], 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('AssignmentCollection Type Validation', () => {
|
||||
it('should validate valid assignmentCollection configuration', () => {
|
||||
const config = { assignments: { assignments: [{ id: '1', name: 'userName', value: '{{ $json.name }}', type: 'string' }, { id: '2', name: 'userAge', value: 30, type: 'number' }] } };
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.set', config, [{ name: 'assignments', type: 'assignmentCollection', required: true }], 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect missing assignments array', () => {
|
||||
const config = { assignments: {} };
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.set', config, [{ name: 'assignments', type: 'assignmentCollection', required: true }], 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('ResourceLocator Type Validation', () => {
|
||||
it.skip('should validate valid resourceLocator by ID', () => {
|
||||
const config = { resource: { mode: 'id', value: 'abc123' } };
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.googleSheets', config, [{ name: 'resource', type: 'resourceLocator', required: true, displayName: 'Resource', default: { mode: 'list', value: '' } }], 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it.skip('should validate resourceLocator by URL', () => {
|
||||
const config = { resource: { mode: 'url', value: 'https://example.com/resource/123' } };
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.googleSheets', config, [{ name: 'resource', type: 'resourceLocator', required: true, displayName: 'Resource', default: { mode: 'list', value: '' } }], 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it.skip('should validate resourceLocator by list', () => {
|
||||
const config = { resource: { mode: 'list', value: 'item-from-dropdown' } };
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.googleSheets', config, [{ name: 'resource', type: 'resourceLocator', required: true, displayName: 'Resource', default: { mode: 'list', value: '' } }], 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Type Structure Edge Cases', () => {
|
||||
it('should handle null values gracefully', () => {
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.filter', { conditions: null }, [{ name: 'conditions', type: 'filter', required: false }], 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle undefined values gracefully', () => {
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.filter', {}, [{ name: 'conditions', type: 'filter', required: false }], 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle multiple special types in same config', () => {
|
||||
const config = {
|
||||
conditions: { combinator: 'and', conditions: [{ id: '1', operator: { type: 'string', operation: 'equals' }, leftValue: 'test', rightValue: 'value' }] },
|
||||
assignments: { assignments: [{ id: '1', name: 'result', value: 'processed', type: 'string' }] },
|
||||
};
|
||||
const properties = [{ name: 'conditions', type: 'filter', required: true }, { name: 'assignments', type: 'assignmentCollection', required: true }];
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.custom', config, properties, 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Validation Profiles for Type Structures', () => {
|
||||
it('should respect strict profile for type validation', () => {
|
||||
const config = { conditions: { combinator: 'and', conditions: [{ id: '1', operator: { type: 'string', operation: 'gt' }, leftValue: 'test', rightValue: 'value' }] } };
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.filter', config, [{ name: 'conditions', type: 'filter', required: true }], 'operation', 'strict');
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.profile).toBe('strict');
|
||||
});
|
||||
|
||||
it('should respect minimal profile (less strict)', () => {
|
||||
const config = { conditions: { combinator: 'and', conditions: [] } };
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.filter', config, [{ name: 'conditions', type: 'filter', required: true }], 'operation', 'minimal');
|
||||
expect(result.profile).toBe('minimal');
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Integration Tests (from enhanced-config-validator-integration) ─────────
|
||||
|
||||
describe('EnhancedConfigValidator - Integration Tests', () => {
|
||||
let mockResourceService: any;
|
||||
let mockOperationService: any;
|
||||
let mockRepository: any;
|
||||
|
||||
beforeEach(() => {
|
||||
mockRepository = {
|
||||
getNode: vi.fn(),
|
||||
getNodeOperations: vi.fn().mockReturnValue([]),
|
||||
getNodeResources: vi.fn().mockReturnValue([]),
|
||||
getOperationsForResource: vi.fn().mockReturnValue([]),
|
||||
getDefaultOperationForResource: vi.fn().mockReturnValue(undefined),
|
||||
getNodePropertyDefaults: vi.fn().mockReturnValue({})
|
||||
};
|
||||
|
||||
mockResourceService = { findSimilarResources: vi.fn().mockReturnValue([]) };
|
||||
mockOperationService = { findSimilarOperations: vi.fn().mockReturnValue([]) };
|
||||
|
||||
vi.mocked(ResourceSimilarityService).mockImplementation(() => mockResourceService);
|
||||
vi.mocked(OperationSimilarityService).mockImplementation(() => mockOperationService);
|
||||
|
||||
EnhancedConfigValidator.initializeSimilarityServices(mockRepository);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('similarity service integration', () => {
|
||||
it('should initialize similarity services when initializeSimilarityServices is called', () => {
|
||||
expect(ResourceSimilarityService).toHaveBeenCalled();
|
||||
expect(OperationSimilarityService).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should use resource similarity service for invalid resource errors', () => {
|
||||
mockResourceService.findSimilarResources.mockReturnValue([{ value: 'message', confidence: 0.8, reason: 'Similar resource name', availableOperations: ['send', 'update'] }]);
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.slack', { resource: 'invalidResource', operation: 'send' }, [{ name: 'resource', type: 'options', required: true, options: [{ value: 'message', name: 'Message' }, { value: 'channel', name: 'Channel' }] }, { name: 'operation', type: 'options', required: true, displayOptions: { show: { resource: ['message'] } }, options: [{ value: 'send', name: 'Send Message' }] }], 'operation', 'ai-friendly');
|
||||
expect(mockResourceService.findSimilarResources).toHaveBeenCalledWith('nodes-base.slack', 'invalidResource', expect.any(Number));
|
||||
expect(result.suggestions.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should use operation similarity service for invalid operation errors', () => {
|
||||
mockOperationService.findSimilarOperations.mockReturnValue([{ value: 'send', confidence: 0.9, reason: 'Very similar - likely a typo', resource: 'message' }]);
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.slack', { resource: 'message', operation: 'invalidOperation' }, [{ name: 'resource', type: 'options', required: true, options: [{ value: 'message', name: 'Message' }] }, { name: 'operation', type: 'options', required: true, displayOptions: { show: { resource: ['message'] } }, options: [{ value: 'send', name: 'Send Message' }, { value: 'update', name: 'Update Message' }] }], 'operation', 'ai-friendly');
|
||||
expect(mockOperationService.findSimilarOperations).toHaveBeenCalledWith('nodes-base.slack', 'invalidOperation', 'message', expect.any(Number));
|
||||
expect(result.suggestions.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should handle similarity service errors gracefully', () => {
|
||||
mockResourceService.findSimilarResources.mockImplementation(() => { throw new Error('Service error'); });
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.slack', { resource: 'invalidResource', operation: 'send' }, [{ name: 'resource', type: 'options', required: true, options: [{ value: 'message', name: 'Message' }] }], 'operation', 'ai-friendly');
|
||||
expect(result).toBeDefined();
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should not call similarity services for valid configurations', () => {
|
||||
mockRepository.getNodeResources.mockReturnValue([{ value: 'message', name: 'Message' }, { value: 'channel', name: 'Channel' }]);
|
||||
mockRepository.getNodeOperations.mockReturnValue([{ value: 'send', name: 'Send Message' }]);
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.slack', { resource: 'message', operation: 'send', channel: '#general', text: 'Test message' }, [{ name: 'resource', type: 'options', required: true, options: [{ value: 'message', name: 'Message' }] }, { name: 'operation', type: 'options', required: true, displayOptions: { show: { resource: ['message'] } }, options: [{ value: 'send', name: 'Send Message' }] }], 'operation', 'ai-friendly');
|
||||
expect(mockResourceService.findSimilarResources).not.toHaveBeenCalled();
|
||||
expect(mockOperationService.findSimilarOperations).not.toHaveBeenCalled();
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should limit suggestion count when calling similarity services', () => {
|
||||
EnhancedConfigValidator.validateWithMode('nodes-base.slack', { resource: 'invalidResource' }, [{ name: 'resource', type: 'options', required: true, options: [{ value: 'message', name: 'Message' }] }], 'operation', 'ai-friendly');
|
||||
expect(mockResourceService.findSimilarResources).toHaveBeenCalledWith('nodes-base.slack', 'invalidResource', 3);
|
||||
});
|
||||
});
|
||||
|
||||
describe('error enhancement with suggestions', () => {
|
||||
it('should enhance resource validation errors with suggestions', () => {
|
||||
mockResourceService.findSimilarResources.mockReturnValue([{ value: 'message', confidence: 0.85, reason: 'Very similar - likely a typo', availableOperations: ['send', 'update', 'delete'] }]);
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.slack', { resource: 'msgs' }, [{ name: 'resource', type: 'options', required: true, options: [{ value: 'message', name: 'Message' }, { value: 'channel', name: 'Channel' }] }], 'operation', 'ai-friendly');
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
expect(resourceError).toBeDefined();
|
||||
expect(resourceError!.suggestion).toBeDefined();
|
||||
expect(resourceError!.suggestion).toContain('message');
|
||||
});
|
||||
|
||||
it('should enhance operation validation errors with suggestions', () => {
|
||||
mockOperationService.findSimilarOperations.mockReturnValue([{ value: 'send', confidence: 0.9, reason: 'Almost exact match - likely a typo', resource: 'message', description: 'Send Message' }]);
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.slack', { resource: 'message', operation: 'sned' }, [{ name: 'resource', type: 'options', required: true, options: [{ value: 'message', name: 'Message' }] }, { name: 'operation', type: 'options', required: true, displayOptions: { show: { resource: ['message'] } }, options: [{ value: 'send', name: 'Send Message' }, { value: 'update', name: 'Update Message' }] }], 'operation', 'ai-friendly');
|
||||
const operationError = result.errors.find(e => e.property === 'operation');
|
||||
expect(operationError).toBeDefined();
|
||||
expect(operationError!.suggestion).toBeDefined();
|
||||
expect(operationError!.suggestion).toContain('send');
|
||||
});
|
||||
|
||||
it('should not enhance errors when no good suggestions are available', () => {
|
||||
mockResourceService.findSimilarResources.mockReturnValue([{ value: 'message', confidence: 0.2, reason: 'Possibly related resource' }]);
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.slack', { resource: 'completelyWrongValue' }, [{ name: 'resource', type: 'options', required: true, options: [{ value: 'message', name: 'Message' }] }], 'operation', 'ai-friendly');
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
expect(resourceError).toBeDefined();
|
||||
expect(resourceError!.suggestion).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should provide multiple operation suggestions when resource is known', () => {
|
||||
mockOperationService.findSimilarOperations.mockReturnValue([{ value: 'send', confidence: 0.7, reason: 'Similar operation' }, { value: 'update', confidence: 0.6, reason: 'Similar operation' }, { value: 'delete', confidence: 0.5, reason: 'Similar operation' }]);
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.slack', { resource: 'message', operation: 'invalidOp' }, [{ name: 'resource', type: 'options', required: true, options: [{ value: 'message', name: 'Message' }] }, { name: 'operation', type: 'options', required: true, displayOptions: { show: { resource: ['message'] } }, options: [{ value: 'send', name: 'Send Message' }, { value: 'update', name: 'Update Message' }, { value: 'delete', name: 'Delete Message' }] }], 'operation', 'ai-friendly');
|
||||
expect(result.suggestions.length).toBeGreaterThan(2);
|
||||
expect(result.suggestions.filter(s => s.includes('send') || s.includes('update') || s.includes('delete')).length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('confidence thresholds and filtering', () => {
|
||||
it('should only use high confidence resource suggestions', () => {
|
||||
mockResourceService.findSimilarResources.mockReturnValue([{ value: 'message1', confidence: 0.9, reason: 'High confidence' }, { value: 'message2', confidence: 0.4, reason: 'Low confidence' }, { value: 'message3', confidence: 0.7, reason: 'Medium confidence' }]);
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.slack', { resource: 'invalidResource' }, [{ name: 'resource', type: 'options', required: true, options: [{ value: 'message', name: 'Message' }] }], 'operation', 'ai-friendly');
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
expect(resourceError?.suggestion).toBeDefined();
|
||||
expect(resourceError!.suggestion).toContain('message1');
|
||||
});
|
||||
|
||||
it('should only use high confidence operation suggestions', () => {
|
||||
mockOperationService.findSimilarOperations.mockReturnValue([{ value: 'send', confidence: 0.95, reason: 'Very high confidence' }, { value: 'post', confidence: 0.3, reason: 'Low confidence' }]);
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.slack', { resource: 'message', operation: 'invalidOperation' }, [{ name: 'resource', type: 'options', required: true, options: [{ value: 'message', name: 'Message' }] }, { name: 'operation', type: 'options', required: true, displayOptions: { show: { resource: ['message'] } }, options: [{ value: 'send', name: 'Send Message' }] }], 'operation', 'ai-friendly');
|
||||
const operationError = result.errors.find(e => e.property === 'operation');
|
||||
expect(operationError?.suggestion).toBeDefined();
|
||||
expect(operationError!.suggestion).toContain('send');
|
||||
expect(operationError!.suggestion).not.toContain('post');
|
||||
});
|
||||
});
|
||||
|
||||
describe('integration with existing validation logic', () => {
|
||||
it('should work with minimal validation mode', () => {
|
||||
mockRepository.getNodeResources.mockReturnValue([]);
|
||||
mockResourceService.findSimilarResources.mockReturnValue([{ value: 'message', confidence: 0.8, reason: 'Similar' }]);
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.slack', { resource: 'invalidResource' }, [{ name: 'resource', type: 'options', required: true, options: [{ value: 'message', name: 'Message' }] }], 'minimal', 'ai-friendly');
|
||||
expect(mockResourceService.findSimilarResources).toHaveBeenCalled();
|
||||
expect(result.errors.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should work with strict validation profile', () => {
|
||||
mockRepository.getNodeResources.mockReturnValue([{ value: 'message', name: 'Message' }]);
|
||||
mockRepository.getOperationsForResource.mockReturnValue([]);
|
||||
mockOperationService.findSimilarOperations.mockReturnValue([{ value: 'send', confidence: 0.8, reason: 'Similar' }]);
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.slack', { resource: 'message', operation: 'invalidOp' }, [{ name: 'resource', type: 'options', required: true, options: [{ value: 'message', name: 'Message' }] }, { name: 'operation', type: 'options', required: true, displayOptions: { show: { resource: ['message'] } }, options: [{ value: 'send', name: 'Send Message' }] }], 'operation', 'strict');
|
||||
expect(mockOperationService.findSimilarOperations).toHaveBeenCalled();
|
||||
const operationError = result.errors.find(e => e.property === 'operation');
|
||||
expect(operationError?.suggestion).toBeDefined();
|
||||
});
|
||||
|
||||
it('should preserve original error properties when enhancing', () => {
|
||||
mockResourceService.findSimilarResources.mockReturnValue([{ value: 'message', confidence: 0.8, reason: 'Similar' }]);
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.slack', { resource: 'invalidResource' }, [{ name: 'resource', type: 'options', required: true, options: [{ value: 'message', name: 'Message' }] }], 'operation', 'ai-friendly');
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
expect(resourceError?.type).toBeDefined();
|
||||
expect(resourceError?.property).toBe('resource');
|
||||
expect(resourceError?.message).toBeDefined();
|
||||
expect(resourceError?.suggestion).toBeDefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Operation and Resource Validation (from enhanced-config-validator-operations) ───
|
||||
|
||||
describe('EnhancedConfigValidator - Operation and Resource Validation', () => {
|
||||
let repository: NodeRepository;
|
||||
let testDb: any;
|
||||
|
||||
beforeEach(async () => {
|
||||
testDb = await createTestDatabase();
|
||||
repository = testDb.nodeRepository;
|
||||
|
||||
// Configure mocked similarity services to return empty arrays by default
|
||||
vi.mocked(ResourceSimilarityService).mockImplementation(() => ({
|
||||
findSimilarResources: vi.fn().mockReturnValue([])
|
||||
}) as any);
|
||||
vi.mocked(OperationSimilarityService).mockImplementation(() => ({
|
||||
findSimilarOperations: vi.fn().mockReturnValue([])
|
||||
}) as any);
|
||||
|
||||
EnhancedConfigValidator.initializeSimilarityServices(repository);
|
||||
|
||||
repository.saveNode({
|
||||
nodeType: 'nodes-base.googleDrive', packageName: 'n8n-nodes-base', displayName: 'Google Drive', description: 'Access Google Drive', category: 'transform', style: 'declarative' as const, isAITool: false, isTrigger: false, isWebhook: false, isVersioned: true, version: '1',
|
||||
properties: [
|
||||
{ name: 'resource', type: 'options', required: true, options: [{ value: 'file', name: 'File' }, { value: 'folder', name: 'Folder' }, { value: 'fileFolder', name: 'File & Folder' }] },
|
||||
{ name: 'operation', type: 'options', required: true, displayOptions: { show: { resource: ['file'] } }, options: [{ value: 'copy', name: 'Copy' }, { value: 'delete', name: 'Delete' }, { value: 'download', name: 'Download' }, { value: 'list', name: 'List' }, { value: 'share', name: 'Share' }, { value: 'update', name: 'Update' }, { value: 'upload', name: 'Upload' }] },
|
||||
{ name: 'operation', type: 'options', required: true, displayOptions: { show: { resource: ['folder'] } }, options: [{ value: 'create', name: 'Create' }, { value: 'delete', name: 'Delete' }, { value: 'share', name: 'Share' }] },
|
||||
{ name: 'operation', type: 'options', required: true, displayOptions: { show: { resource: ['fileFolder'] } }, options: [{ value: 'search', name: 'Search' }] }
|
||||
],
|
||||
operations: [], credentials: []
|
||||
});
|
||||
|
||||
repository.saveNode({
|
||||
nodeType: 'nodes-base.slack', packageName: 'n8n-nodes-base', displayName: 'Slack', description: 'Send messages to Slack', category: 'communication', style: 'declarative' as const, isAITool: false, isTrigger: false, isWebhook: false, isVersioned: true, version: '2',
|
||||
properties: [
|
||||
{ name: 'resource', type: 'options', required: true, options: [{ value: 'channel', name: 'Channel' }, { value: 'message', name: 'Message' }, { value: 'user', name: 'User' }] },
|
||||
{ name: 'operation', type: 'options', required: true, displayOptions: { show: { resource: ['message'] } }, options: [{ value: 'send', name: 'Send' }, { value: 'update', name: 'Update' }, { value: 'delete', name: 'Delete' }] }
|
||||
],
|
||||
operations: [], credentials: []
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (testDb) { await testDb.cleanup(); }
|
||||
});
|
||||
|
||||
describe('Invalid Operations', () => {
|
||||
it('should detect invalid operation for Google Drive fileFolder resource', () => {
|
||||
const node = repository.getNode('nodes-base.googleDrive');
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.googleDrive', { resource: 'fileFolder', operation: 'listFiles' }, node.properties, 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(false);
|
||||
const operationError = result.errors.find(e => e.property === 'operation');
|
||||
expect(operationError).toBeDefined();
|
||||
expect(operationError!.message).toContain('listFiles');
|
||||
});
|
||||
|
||||
it('should detect typos in operations', () => {
|
||||
const node = repository.getNode('nodes-base.googleDrive');
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.googleDrive', { resource: 'file', operation: 'downlod' }, node.properties, 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(false);
|
||||
const operationError = result.errors.find(e => e.property === 'operation');
|
||||
expect(operationError).toBeDefined();
|
||||
});
|
||||
|
||||
it('should list valid operations for the resource', () => {
|
||||
const node = repository.getNode('nodes-base.googleDrive');
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.googleDrive', { resource: 'folder', operation: 'upload' }, node.properties, 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(false);
|
||||
const operationError = result.errors.find(e => e.property === 'operation');
|
||||
expect(operationError).toBeDefined();
|
||||
expect(operationError!.fix).toContain('Valid operations for resource "folder"');
|
||||
expect(operationError!.fix).toContain('create');
|
||||
expect(operationError!.fix).toContain('delete');
|
||||
expect(operationError!.fix).toContain('share');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Invalid Resources', () => {
|
||||
it('should detect invalid plural resource "files"', () => {
|
||||
const node = repository.getNode('nodes-base.googleDrive');
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.googleDrive', { resource: 'files', operation: 'list' }, node.properties, 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(false);
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
expect(resourceError).toBeDefined();
|
||||
expect(resourceError!.message).toContain('files');
|
||||
});
|
||||
|
||||
it('should detect typos in resources', () => {
|
||||
const node = repository.getNode('nodes-base.googleDrive');
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.googleDrive', { resource: 'flie', operation: 'download' }, node.properties, 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(false);
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
expect(resourceError).toBeDefined();
|
||||
});
|
||||
|
||||
it('should list valid resources when no match found', () => {
|
||||
const node = repository.getNode('nodes-base.googleDrive');
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.googleDrive', { resource: 'document', operation: 'create' }, node.properties, 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(false);
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
expect(resourceError).toBeDefined();
|
||||
expect(resourceError!.fix).toContain('Valid resources:');
|
||||
expect(resourceError!.fix).toContain('file');
|
||||
expect(resourceError!.fix).toContain('folder');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Combined Resource and Operation Validation', () => {
|
||||
it('should validate both resource and operation together', () => {
|
||||
const node = repository.getNode('nodes-base.googleDrive');
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.googleDrive', { resource: 'files', operation: 'listFiles' }, node.properties, 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.length).toBeGreaterThanOrEqual(2);
|
||||
expect(result.errors.find(e => e.property === 'resource')).toBeDefined();
|
||||
expect(result.errors.find(e => e.property === 'operation')).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Slack Node Validation', () => {
|
||||
it('should detect invalid operation "sendMessage" for Slack', () => {
|
||||
const node = repository.getNode('nodes-base.slack');
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.slack', { resource: 'message', operation: 'sendMessage' }, node.properties, 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(false);
|
||||
const operationError = result.errors.find(e => e.property === 'operation');
|
||||
expect(operationError).toBeDefined();
|
||||
});
|
||||
|
||||
it('should detect invalid plural resource "channels" for Slack', () => {
|
||||
const node = repository.getNode('nodes-base.slack');
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.slack', { resource: 'channels', operation: 'create' }, node.properties, 'operation', 'ai-friendly');
|
||||
expect(result.valid).toBe(false);
|
||||
const resourceError = result.errors.find(e => e.property === 'resource');
|
||||
expect(resourceError).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Valid Configurations', () => {
|
||||
it('should accept valid Google Drive configuration', () => {
|
||||
const node = repository.getNode('nodes-base.googleDrive');
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.googleDrive', { resource: 'file', operation: 'download' }, node.properties, 'operation', 'ai-friendly');
|
||||
expect(result.errors.find(e => e.property === 'resource')).toBeUndefined();
|
||||
expect(result.errors.find(e => e.property === 'operation')).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should accept valid Slack configuration', () => {
|
||||
const node = repository.getNode('nodes-base.slack');
|
||||
const result = EnhancedConfigValidator.validateWithMode('nodes-base.slack', { resource: 'message', operation: 'send' }, node.properties, 'operation', 'ai-friendly');
|
||||
expect(result.errors.find(e => e.property === 'resource')).toBeUndefined();
|
||||
expect(result.errors.find(e => e.property === 'operation')).toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,865 +0,0 @@
|
||||
import { describe, it, expect, beforeEach, vi } from 'vitest';
|
||||
import { WorkflowValidator } from '@/services/workflow-validator';
|
||||
import { NodeRepository } from '@/database/node-repository';
|
||||
import { EnhancedConfigValidator } from '@/services/enhanced-config-validator';
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('@/database/node-repository');
|
||||
vi.mock('@/services/enhanced-config-validator');
|
||||
|
||||
describe('Loop Output Fix - Edge Cases', () => {
|
||||
let validator: WorkflowValidator;
|
||||
let mockNodeRepository: any;
|
||||
let mockNodeValidator: any;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
mockNodeRepository = {
|
||||
getNode: vi.fn((nodeType: string) => {
|
||||
// Default return
|
||||
if (nodeType === 'nodes-base.splitInBatches') {
|
||||
return {
|
||||
nodeType: 'nodes-base.splitInBatches',
|
||||
outputs: [
|
||||
{ displayName: 'Done', name: 'done' },
|
||||
{ displayName: 'Loop', name: 'loop' }
|
||||
],
|
||||
outputNames: ['done', 'loop'],
|
||||
properties: []
|
||||
};
|
||||
}
|
||||
return {
|
||||
nodeType,
|
||||
properties: []
|
||||
};
|
||||
})
|
||||
};
|
||||
|
||||
mockNodeValidator = {
|
||||
validateWithMode: vi.fn().mockReturnValue({
|
||||
errors: [],
|
||||
warnings: []
|
||||
})
|
||||
};
|
||||
|
||||
validator = new WorkflowValidator(mockNodeRepository, mockNodeValidator);
|
||||
});
|
||||
|
||||
describe('Nodes without outputs', () => {
|
||||
it('should handle nodes with null outputs gracefully', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.httpRequest',
|
||||
outputs: null,
|
||||
outputNames: null,
|
||||
properties: []
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'No Outputs Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'HTTP Request',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [100, 100],
|
||||
parameters: { url: 'https://example.com' }
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Set',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'HTTP Request': {
|
||||
main: [
|
||||
[{ node: 'Set', type: 'main', index: 0 }]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should not crash or produce output-related errors
|
||||
expect(result).toBeDefined();
|
||||
const outputErrors = result.errors.filter(e =>
|
||||
e.message?.includes('output') && !e.message?.includes('Connection')
|
||||
);
|
||||
expect(outputErrors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle nodes with undefined outputs gracefully', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.webhook',
|
||||
// outputs and outputNames are undefined
|
||||
properties: []
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'Undefined Outputs Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
expect(result).toBeDefined();
|
||||
expect(result.valid).toBeTruthy(); // Empty workflow with webhook should be valid
|
||||
});
|
||||
|
||||
it('should handle nodes with empty outputs array', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.customNode',
|
||||
outputs: [],
|
||||
outputNames: [],
|
||||
properties: []
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'Empty Outputs Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Custom Node',
|
||||
type: 'n8n-nodes-base.customNode',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Custom Node': {
|
||||
main: [
|
||||
[{ node: 'Custom Node', type: 'main', index: 0 }] // Self-reference
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should warn about self-reference but not crash
|
||||
const selfRefWarnings = result.warnings.filter(w =>
|
||||
w.message?.includes('self-referencing')
|
||||
);
|
||||
expect(selfRefWarnings).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Invalid connection indices', () => {
|
||||
it('should handle negative connection indices', async () => {
|
||||
// Use default mock that includes outputs for SplitInBatches
|
||||
|
||||
const workflow = {
|
||||
name: 'Negative Index Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Split In Batches',
|
||||
type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Set',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': {
|
||||
main: [
|
||||
[{ node: 'Set', type: 'main', index: -1 }] // Invalid negative index
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
const negativeIndexErrors = result.errors.filter(e =>
|
||||
e.message?.includes('Invalid connection index -1')
|
||||
);
|
||||
expect(negativeIndexErrors).toHaveLength(1);
|
||||
expect(negativeIndexErrors[0].message).toContain('must be non-negative');
|
||||
});
|
||||
|
||||
it('should handle very large connection indices', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.switch',
|
||||
outputs: [
|
||||
{ displayName: 'Output 1' },
|
||||
{ displayName: 'Output 2' }
|
||||
],
|
||||
properties: []
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'Large Index Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Switch',
|
||||
type: 'n8n-nodes-base.switch',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Set',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Switch': {
|
||||
main: [
|
||||
[{ node: 'Set', type: 'main', index: 999 }] // Very large index
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should validate without crashing (n8n allows large indices)
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Malformed connection structures', () => {
|
||||
it('should handle null connection objects', async () => {
|
||||
// Use default mock that includes outputs for SplitInBatches
|
||||
|
||||
const workflow = {
|
||||
name: 'Null Connections Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Split In Batches',
|
||||
type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': {
|
||||
main: [
|
||||
null, // Null output
|
||||
[{ node: 'NonExistent', type: 'main', index: 0 }]
|
||||
] as any
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should handle gracefully without crashing
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
|
||||
it('should handle missing connection properties', async () => {
|
||||
// Use default mock that includes outputs for SplitInBatches
|
||||
|
||||
const workflow = {
|
||||
name: 'Malformed Connections Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Split In Batches',
|
||||
type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Set',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Set' } as any, // Missing type and index
|
||||
{ type: 'main', index: 0 } as any, // Missing node
|
||||
{} as any // Empty object
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should handle malformed connections but report errors
|
||||
expect(result).toBeDefined();
|
||||
expect(result.errors.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Deep loop back detection limits', () => {
|
||||
it('should respect maxDepth limit in checkForLoopBack', async () => {
|
||||
// Use default mock that includes outputs for SplitInBatches
|
||||
|
||||
// Create a very deep chain that exceeds maxDepth (50)
|
||||
const nodes = [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Split In Batches',
|
||||
type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
}
|
||||
];
|
||||
|
||||
const connections: any = {
|
||||
'Split In Batches': {
|
||||
main: [
|
||||
[], // Done output
|
||||
[{ node: 'Node1', type: 'main', index: 0 }] // Loop output
|
||||
]
|
||||
}
|
||||
};
|
||||
|
||||
// Create chain of 60 nodes (exceeds maxDepth of 50)
|
||||
for (let i = 1; i <= 60; i++) {
|
||||
nodes.push({
|
||||
id: (i + 1).toString(),
|
||||
name: `Node${i}`,
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [100 + i * 50, 100],
|
||||
parameters: {}
|
||||
});
|
||||
|
||||
if (i < 60) {
|
||||
connections[`Node${i}`] = {
|
||||
main: [[{ node: `Node${i + 1}`, type: 'main', index: 0 }]]
|
||||
};
|
||||
} else {
|
||||
// Last node connects back to Split In Batches
|
||||
connections[`Node${i}`] = {
|
||||
main: [[{ node: 'Split In Batches', type: 'main', index: 0 }]]
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const workflow = {
|
||||
name: 'Deep Chain Workflow',
|
||||
nodes,
|
||||
connections
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should warn about missing loop back because depth limit prevents detection
|
||||
const loopBackWarnings = result.warnings.filter(w =>
|
||||
w.message?.includes('doesn\'t connect back')
|
||||
);
|
||||
expect(loopBackWarnings).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should handle circular references without infinite loops', async () => {
|
||||
// Use default mock that includes outputs for SplitInBatches
|
||||
|
||||
const workflow = {
|
||||
name: 'Circular Reference Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Split In Batches',
|
||||
type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'NodeA',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'NodeB',
|
||||
type: 'n8n-nodes-base.function',
|
||||
position: [500, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': {
|
||||
main: [
|
||||
[],
|
||||
[{ node: 'NodeA', type: 'main', index: 0 }]
|
||||
]
|
||||
},
|
||||
'NodeA': {
|
||||
main: [
|
||||
[{ node: 'NodeB', type: 'main', index: 0 }]
|
||||
]
|
||||
},
|
||||
'NodeB': {
|
||||
main: [
|
||||
[{ node: 'NodeA', type: 'main', index: 0 }] // Circular: B -> A -> B -> A ...
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should complete without hanging and warn about missing loop back
|
||||
expect(result).toBeDefined();
|
||||
const loopBackWarnings = result.warnings.filter(w =>
|
||||
w.message?.includes('doesn\'t connect back')
|
||||
);
|
||||
expect(loopBackWarnings).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should handle self-referencing nodes in loop back detection', async () => {
|
||||
// Use default mock that includes outputs for SplitInBatches
|
||||
|
||||
const workflow = {
|
||||
name: 'Self Reference Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Split In Batches',
|
||||
type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'SelfRef',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': {
|
||||
main: [
|
||||
[],
|
||||
[{ node: 'SelfRef', type: 'main', index: 0 }]
|
||||
]
|
||||
},
|
||||
'SelfRef': {
|
||||
main: [
|
||||
[{ node: 'SelfRef', type: 'main', index: 0 }] // Self-reference instead of loop back
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should warn about missing loop back and self-reference
|
||||
const loopBackWarnings = result.warnings.filter(w =>
|
||||
w.message?.includes('doesn\'t connect back')
|
||||
);
|
||||
const selfRefWarnings = result.warnings.filter(w =>
|
||||
w.message?.includes('self-referencing')
|
||||
);
|
||||
|
||||
expect(loopBackWarnings).toHaveLength(1);
|
||||
expect(selfRefWarnings).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Complex output structures', () => {
|
||||
it('should handle nodes with many outputs', async () => {
|
||||
const manyOutputs = Array.from({ length: 20 }, (_, i) => ({
|
||||
displayName: `Output ${i + 1}`,
|
||||
name: `output${i + 1}`,
|
||||
description: `Output number ${i + 1}`
|
||||
}));
|
||||
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.complexSwitch',
|
||||
outputs: manyOutputs,
|
||||
outputNames: manyOutputs.map(o => o.name),
|
||||
properties: []
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'Many Outputs Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Complex Switch',
|
||||
type: 'n8n-nodes-base.complexSwitch',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Set',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Complex Switch': {
|
||||
main: Array.from({ length: 20 }, () => [
|
||||
{ node: 'Set', type: 'main', index: 0 }
|
||||
])
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should handle without performance issues
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
|
||||
it('should handle mixed output types (main, error, ai_tool)', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.complexNode',
|
||||
outputs: [
|
||||
{ displayName: 'Main', type: 'main' },
|
||||
{ displayName: 'Error', type: 'error' }
|
||||
],
|
||||
properties: []
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'Mixed Output Types Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Complex Node',
|
||||
type: 'n8n-nodes-base.complexNode',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Main Handler',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 50],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Error Handler',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 150],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '4',
|
||||
name: 'Tool',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [500, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Complex Node': {
|
||||
main: [
|
||||
[{ node: 'Main Handler', type: 'main', index: 0 }]
|
||||
],
|
||||
error: [
|
||||
[{ node: 'Error Handler', type: 'main', index: 0 }]
|
||||
],
|
||||
ai_tool: [
|
||||
[{ node: 'Tool', type: 'main', index: 0 }]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should validate all connection types
|
||||
expect(result).toBeDefined();
|
||||
expect(result.statistics.validConnections).toBe(3);
|
||||
});
|
||||
});
|
||||
|
||||
describe('SplitInBatches specific edge cases', () => {
|
||||
it('should handle SplitInBatches with no connections', async () => {
|
||||
// Use default mock that includes outputs for SplitInBatches
|
||||
|
||||
const workflow = {
|
||||
name: 'Isolated SplitInBatches',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Split In Batches',
|
||||
type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should not produce SplitInBatches-specific warnings for isolated node
|
||||
const splitWarnings = result.warnings.filter(w =>
|
||||
w.message?.includes('SplitInBatches') ||
|
||||
w.message?.includes('loop') ||
|
||||
w.message?.includes('done')
|
||||
);
|
||||
expect(splitWarnings).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle SplitInBatches with only one output connected', async () => {
|
||||
// Use default mock that includes outputs for SplitInBatches
|
||||
|
||||
const workflow = {
|
||||
name: 'Single Output SplitInBatches',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Split In Batches',
|
||||
type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Final Action',
|
||||
type: 'n8n-nodes-base.emailSend',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': {
|
||||
main: [
|
||||
[{ node: 'Final Action', type: 'main', index: 0 }], // Only done output connected
|
||||
[] // Loop output empty
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should NOT warn about empty loop output (it's only a problem if loop connects to something but doesn't loop back)
|
||||
// An empty loop output is valid - it just means no looping occurs
|
||||
const loopWarnings = result.warnings.filter(w =>
|
||||
w.message?.includes('loop') && w.message?.includes('connect back')
|
||||
);
|
||||
expect(loopWarnings).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle SplitInBatches with both outputs to same node', async () => {
|
||||
// Use default mock that includes outputs for SplitInBatches
|
||||
|
||||
const workflow = {
|
||||
name: 'Same Target SplitInBatches',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Split In Batches',
|
||||
type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Multi Purpose',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': {
|
||||
main: [
|
||||
[{ node: 'Multi Purpose', type: 'main', index: 0 }], // Done -> Multi Purpose
|
||||
[{ node: 'Multi Purpose', type: 'main', index: 0 }] // Loop -> Multi Purpose
|
||||
]
|
||||
},
|
||||
'Multi Purpose': {
|
||||
main: [
|
||||
[{ node: 'Split In Batches', type: 'main', index: 0 }] // Loop back
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Both outputs go to same node which loops back - should be valid
|
||||
// No warnings about loop back since it does connect back
|
||||
const loopWarnings = result.warnings.filter(w =>
|
||||
w.message?.includes('loop') && w.message?.includes('connect back')
|
||||
);
|
||||
expect(loopWarnings).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should detect reversed outputs with processing node on done output', async () => {
|
||||
// Use default mock that includes outputs for SplitInBatches
|
||||
|
||||
const workflow = {
|
||||
name: 'Reversed SplitInBatches with Function Node',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Split In Batches',
|
||||
type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Process Function',
|
||||
type: 'n8n-nodes-base.function',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': {
|
||||
main: [
|
||||
[{ node: 'Process Function', type: 'main', index: 0 }], // Done -> Function (this is wrong)
|
||||
[] // Loop output empty
|
||||
]
|
||||
},
|
||||
'Process Function': {
|
||||
main: [
|
||||
[{ node: 'Split In Batches', type: 'main', index: 0 }] // Function connects back (indicates it should be on loop)
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should error about reversed outputs since function node on done output connects back
|
||||
const reversedErrors = result.errors.filter(e =>
|
||||
e.message?.includes('SplitInBatches outputs appear reversed')
|
||||
);
|
||||
expect(reversedErrors).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should handle non-existent node type gracefully', async () => {
|
||||
// Node doesn't exist in repository
|
||||
mockNodeRepository.getNode.mockReturnValue(null);
|
||||
|
||||
const workflow = {
|
||||
name: 'Unknown Node Type',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Unknown Node',
|
||||
type: 'n8n-nodes-base.unknownNode',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should report unknown node type error
|
||||
const unknownNodeErrors = result.errors.filter(e =>
|
||||
e.message?.includes('Unknown node type')
|
||||
);
|
||||
expect(unknownNodeErrors).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Performance edge cases', () => {
|
||||
it('should handle very large workflows efficiently', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.set',
|
||||
properties: []
|
||||
});
|
||||
|
||||
// Create workflow with 1000 nodes
|
||||
const nodes = Array.from({ length: 1000 }, (_, i) => ({
|
||||
id: `node${i}`,
|
||||
name: `Node ${i}`,
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [100 + (i % 50) * 50, 100 + Math.floor(i / 50) * 50],
|
||||
parameters: {}
|
||||
}));
|
||||
|
||||
// Create simple linear connections
|
||||
const connections: any = {};
|
||||
for (let i = 0; i < 999; i++) {
|
||||
connections[`Node ${i}`] = {
|
||||
main: [[{ node: `Node ${i + 1}`, type: 'main', index: 0 }]]
|
||||
};
|
||||
}
|
||||
|
||||
const workflow = {
|
||||
name: 'Large Workflow',
|
||||
nodes,
|
||||
connections
|
||||
};
|
||||
|
||||
const startTime = Date.now();
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
// Should complete within reasonable time (< 5 seconds)
|
||||
expect(duration).toBeLessThan(5000);
|
||||
expect(result).toBeDefined();
|
||||
expect(result.statistics.totalNodes).toBe(1000);
|
||||
});
|
||||
|
||||
it('should handle workflows with many SplitInBatches nodes', async () => {
|
||||
// Use default mock that includes outputs for SplitInBatches
|
||||
|
||||
// Create 100 SplitInBatches nodes
|
||||
const nodes = Array.from({ length: 100 }, (_, i) => ({
|
||||
id: `split${i}`,
|
||||
name: `Split ${i}`,
|
||||
type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100 + (i % 10) * 100, 100 + Math.floor(i / 10) * 100],
|
||||
parameters: {}
|
||||
}));
|
||||
|
||||
const connections: any = {};
|
||||
// Each split connects to the next one
|
||||
for (let i = 0; i < 99; i++) {
|
||||
connections[`Split ${i}`] = {
|
||||
main: [
|
||||
[{ node: `Split ${i + 1}`, type: 'main', index: 0 }], // Done -> next split
|
||||
[] // Empty loop
|
||||
]
|
||||
};
|
||||
}
|
||||
|
||||
const workflow = {
|
||||
name: 'Many SplitInBatches Workflow',
|
||||
nodes,
|
||||
connections
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should validate all nodes without performance issues
|
||||
expect(result).toBeDefined();
|
||||
expect(result.statistics.totalNodes).toBe(100);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,532 +0,0 @@
|
||||
import { describe, test, expect } from 'vitest';
|
||||
import { validateWorkflowStructure } from '@/services/n8n-validation';
|
||||
import type { Workflow } from '@/types/n8n-api';
|
||||
|
||||
describe('n8n-validation - Sticky Notes Bug Fix', () => {
|
||||
describe('sticky notes should be excluded from disconnected nodes validation', () => {
|
||||
test('should allow workflow with sticky notes and connected functional nodes', () => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Test Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/test' }
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'HTTP Request',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 3,
|
||||
position: [450, 300],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'sticky1',
|
||||
name: 'Documentation Note',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [250, 100],
|
||||
parameters: { content: 'This is a documentation note' }
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Webhook': {
|
||||
main: [[{ node: 'HTTP Request', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
// Should have no errors - sticky note should be ignored
|
||||
expect(errors).toEqual([]);
|
||||
});
|
||||
|
||||
test('should handle multiple sticky notes without errors', () => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Documented Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/test' }
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Process',
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 3,
|
||||
position: [450, 300],
|
||||
parameters: {}
|
||||
},
|
||||
// 10 sticky notes for documentation
|
||||
...Array.from({ length: 10 }, (_, i) => ({
|
||||
id: `sticky${i}`,
|
||||
name: `📝 Note ${i}`,
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [100 + i * 50, 100] as [number, number],
|
||||
parameters: { content: `Documentation note ${i}` }
|
||||
}))
|
||||
],
|
||||
connections: {
|
||||
'Webhook': {
|
||||
main: [[{ node: 'Process', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
expect(errors).toEqual([]);
|
||||
});
|
||||
|
||||
test('should handle all sticky note type variations', () => {
|
||||
const stickyTypes = [
|
||||
'n8n-nodes-base.stickyNote',
|
||||
'nodes-base.stickyNote',
|
||||
'@n8n/n8n-nodes-base.stickyNote'
|
||||
];
|
||||
|
||||
stickyTypes.forEach((stickyType, index) => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Test Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/test' }
|
||||
},
|
||||
{
|
||||
id: `sticky${index}`,
|
||||
name: `Note ${index}`,
|
||||
type: stickyType,
|
||||
typeVersion: 1,
|
||||
position: [250, 100],
|
||||
parameters: { content: `Note ${index}` }
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
// Sticky note should be ignored regardless of type variation
|
||||
expect(errors.every(e => !e.includes(`Note ${index}`))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
test('should handle complex workflow with multiple sticky notes (real-world scenario)', () => {
|
||||
// Simulates workflow like "POST /auth/login" with 4 sticky notes
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'POST /auth/login',
|
||||
nodes: [
|
||||
{
|
||||
id: 'webhook1',
|
||||
name: 'Webhook Trigger',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/auth/login', httpMethod: 'POST' }
|
||||
},
|
||||
{
|
||||
id: 'http1',
|
||||
name: 'Authenticate',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 3,
|
||||
position: [450, 300],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'respond1',
|
||||
name: 'Return Success',
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
typeVersion: 1,
|
||||
position: [650, 250],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'respond2',
|
||||
name: 'Return Error',
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
typeVersion: 1,
|
||||
position: [650, 350],
|
||||
parameters: {}
|
||||
},
|
||||
// 4 sticky notes for documentation
|
||||
{
|
||||
id: 'sticky1',
|
||||
name: '📝 Webhook Trigger',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [250, 150],
|
||||
parameters: { content: 'Receives login request' }
|
||||
},
|
||||
{
|
||||
id: 'sticky2',
|
||||
name: '📝 Authenticate with Supabase',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [450, 150],
|
||||
parameters: { content: 'Validates credentials' }
|
||||
},
|
||||
{
|
||||
id: 'sticky3',
|
||||
name: '📝 Return Tokens',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [650, 150],
|
||||
parameters: { content: 'Returns access and refresh tokens' }
|
||||
},
|
||||
{
|
||||
id: 'sticky4',
|
||||
name: '📝 Return Error',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [650, 450],
|
||||
parameters: { content: 'Returns error message' }
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Webhook Trigger': {
|
||||
main: [[{ node: 'Authenticate', type: 'main', index: 0 }]]
|
||||
},
|
||||
'Authenticate': {
|
||||
main: [
|
||||
[{ node: 'Return Success', type: 'main', index: 0 }],
|
||||
[{ node: 'Return Error', type: 'main', index: 0 }]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
// Should have no errors - all sticky notes should be ignored
|
||||
expect(errors).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('validation should still detect truly disconnected functional nodes', () => {
|
||||
test('should detect disconnected HTTP node but ignore sticky note', () => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Test Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/test' }
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Disconnected HTTP',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 3,
|
||||
position: [450, 300],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'sticky1',
|
||||
name: 'Sticky Note',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [250, 100],
|
||||
parameters: { content: 'Note' }
|
||||
}
|
||||
],
|
||||
connections: {} // No connections
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
// Should error on HTTP node, but NOT on sticky note
|
||||
expect(errors.length).toBeGreaterThan(0);
|
||||
const disconnectedError = errors.find(e => e.includes('Disconnected'));
|
||||
expect(disconnectedError).toBeDefined();
|
||||
expect(disconnectedError).toContain('Disconnected HTTP');
|
||||
expect(disconnectedError).not.toContain('Sticky Note');
|
||||
});
|
||||
|
||||
test('should detect multiple disconnected functional nodes but ignore sticky notes', () => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Test Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/test' }
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Disconnected HTTP',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 3,
|
||||
position: [450, 300],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Disconnected Set',
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 3,
|
||||
position: [650, 300],
|
||||
parameters: {}
|
||||
},
|
||||
// Multiple sticky notes that should be ignored
|
||||
{
|
||||
id: 'sticky1',
|
||||
name: 'Note 1',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [250, 100],
|
||||
parameters: { content: 'Note 1' }
|
||||
},
|
||||
{
|
||||
id: 'sticky2',
|
||||
name: 'Note 2',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [450, 100],
|
||||
parameters: { content: 'Note 2' }
|
||||
}
|
||||
],
|
||||
connections: {} // No connections
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
// Should error because there are no connections
|
||||
// When there are NO connections, validation shows "Multi-node workflow has no connections"
|
||||
// This is the expected behavior - it suggests connecting any two executable nodes
|
||||
expect(errors.length).toBeGreaterThan(0);
|
||||
const connectionError = errors.find(e => e.includes('no connections') || e.includes('Disconnected'));
|
||||
expect(connectionError).toBeDefined();
|
||||
// Error should NOT mention sticky notes
|
||||
expect(connectionError).not.toContain('Note 1');
|
||||
expect(connectionError).not.toContain('Note 2');
|
||||
});
|
||||
|
||||
test('should allow sticky notes but still validate functional node connections', () => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Test Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/test' }
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Connected HTTP',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 3,
|
||||
position: [450, 300],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Disconnected Set',
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 3,
|
||||
position: [650, 300],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'sticky1',
|
||||
name: 'Sticky Note',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [250, 100],
|
||||
parameters: { content: 'Note' }
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Webhook': {
|
||||
main: [[{ node: 'Connected HTTP', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
// Should error only on disconnected Set node
|
||||
expect(errors.length).toBeGreaterThan(0);
|
||||
const disconnectedError = errors.find(e => e.includes('Disconnected'));
|
||||
expect(disconnectedError).toBeDefined();
|
||||
expect(disconnectedError).toContain('Disconnected Set');
|
||||
expect(disconnectedError).not.toContain('Connected HTTP');
|
||||
expect(disconnectedError).not.toContain('Sticky Note');
|
||||
});
|
||||
});
|
||||
|
||||
describe('regression tests - ensure sticky notes work like in n8n UI', () => {
|
||||
test('single webhook with sticky notes should be valid (matches n8n UI behavior)', () => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Webhook Only with Notes',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/test' }
|
||||
},
|
||||
{
|
||||
id: 'sticky1',
|
||||
name: 'Usage Instructions',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [250, 100],
|
||||
parameters: { content: 'Call this webhook to trigger the workflow' }
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
// Webhook-only workflows are valid in n8n
|
||||
// Sticky notes should not affect this
|
||||
expect(errors).toEqual([]);
|
||||
});
|
||||
|
||||
test('workflow with only sticky notes should be invalid (no executable nodes)', () => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Only Notes',
|
||||
nodes: [
|
||||
{
|
||||
id: 'sticky1',
|
||||
name: 'Note 1',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [250, 100],
|
||||
parameters: { content: 'Note 1' }
|
||||
},
|
||||
{
|
||||
id: 'sticky2',
|
||||
name: 'Note 2',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [450, 100],
|
||||
parameters: { content: 'Note 2' }
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
// Should fail because there are no executable nodes
|
||||
expect(errors.length).toBeGreaterThan(0);
|
||||
expect(errors.some(e => e.includes('at least one executable node'))).toBe(true);
|
||||
});
|
||||
|
||||
test('complex production workflow structure should validate correctly', () => {
|
||||
// Tests a realistic production workflow structure
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Production API Endpoint',
|
||||
nodes: [
|
||||
// Functional nodes
|
||||
{
|
||||
id: 'webhook1',
|
||||
name: 'API Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/api/endpoint' }
|
||||
},
|
||||
{
|
||||
id: 'validate1',
|
||||
name: 'Validate Input',
|
||||
type: 'n8n-nodes-base.code',
|
||||
typeVersion: 2,
|
||||
position: [450, 300],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'branch1',
|
||||
name: 'Check Valid',
|
||||
type: 'n8n-nodes-base.if',
|
||||
typeVersion: 2,
|
||||
position: [650, 300],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'process1',
|
||||
name: 'Process Request',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 3,
|
||||
position: [850, 250],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'success1',
|
||||
name: 'Return Success',
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
typeVersion: 1,
|
||||
position: [1050, 250],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'error1',
|
||||
name: 'Return Error',
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
typeVersion: 1,
|
||||
position: [850, 350],
|
||||
parameters: {}
|
||||
},
|
||||
// Documentation sticky notes (11 notes like in real workflow)
|
||||
...Array.from({ length: 11 }, (_, i) => ({
|
||||
id: `sticky${i}`,
|
||||
name: `📝 Documentation ${i}`,
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [250 + i * 100, 100] as [number, number],
|
||||
parameters: { content: `Documentation section ${i}` }
|
||||
}))
|
||||
],
|
||||
connections: {
|
||||
'API Webhook': {
|
||||
main: [[{ node: 'Validate Input', type: 'main', index: 0 }]]
|
||||
},
|
||||
'Validate Input': {
|
||||
main: [[{ node: 'Check Valid', type: 'main', index: 0 }]]
|
||||
},
|
||||
'Check Valid': {
|
||||
main: [
|
||||
[{ node: 'Process Request', type: 'main', index: 0 }],
|
||||
[{ node: 'Return Error', type: 'main', index: 0 }]
|
||||
]
|
||||
},
|
||||
'Process Request': {
|
||||
main: [[{ node: 'Return Success', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
// Should be valid - all functional nodes connected, sticky notes ignored
|
||||
expect(errors).toEqual([]);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1830,4 +1830,513 @@ describe('n8n-validation', () => {
|
||||
expect(validateWorkflowStructure(forUpdate)).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Sticky Notes Bug Fix', () => {
|
||||
describe('sticky notes should be excluded from disconnected nodes validation', () => {
|
||||
it('should allow workflow with sticky notes and connected functional nodes', () => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Test Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/test' }
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'HTTP Request',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 3,
|
||||
position: [450, 300],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'sticky1',
|
||||
name: 'Documentation Note',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [250, 100],
|
||||
parameters: { content: 'This is a documentation note' }
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Webhook': {
|
||||
main: [[{ node: 'HTTP Request', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
expect(errors).toEqual([]);
|
||||
});
|
||||
|
||||
it('should handle multiple sticky notes without errors', () => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Documented Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/test' }
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Process',
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 3,
|
||||
position: [450, 300],
|
||||
parameters: {}
|
||||
},
|
||||
...Array.from({ length: 10 }, (_, i) => ({
|
||||
id: `sticky${i}`,
|
||||
name: `Note ${i}`,
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [100 + i * 50, 100] as [number, number],
|
||||
parameters: { content: `Documentation note ${i}` }
|
||||
}))
|
||||
],
|
||||
connections: {
|
||||
'Webhook': {
|
||||
main: [[{ node: 'Process', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
expect(errors).toEqual([]);
|
||||
});
|
||||
|
||||
it('should handle all sticky note type variations', () => {
|
||||
const stickyTypes = [
|
||||
'n8n-nodes-base.stickyNote',
|
||||
'nodes-base.stickyNote',
|
||||
'@n8n/n8n-nodes-base.stickyNote'
|
||||
];
|
||||
|
||||
stickyTypes.forEach((stickyType, index) => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Test Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/test' }
|
||||
},
|
||||
{
|
||||
id: `sticky${index}`,
|
||||
name: `Note ${index}`,
|
||||
type: stickyType,
|
||||
typeVersion: 1,
|
||||
position: [250, 100],
|
||||
parameters: { content: `Note ${index}` }
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
expect(errors.every(e => !e.includes(`Note ${index}`))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle complex workflow with multiple sticky notes (real-world scenario)', () => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'POST /auth/login',
|
||||
nodes: [
|
||||
{
|
||||
id: 'webhook1',
|
||||
name: 'Webhook Trigger',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/auth/login', httpMethod: 'POST' }
|
||||
},
|
||||
{
|
||||
id: 'http1',
|
||||
name: 'Authenticate',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 3,
|
||||
position: [450, 300],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'respond1',
|
||||
name: 'Return Success',
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
typeVersion: 1,
|
||||
position: [650, 250],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'respond2',
|
||||
name: 'Return Error',
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
typeVersion: 1,
|
||||
position: [650, 350],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'sticky1',
|
||||
name: 'Webhook Trigger Note',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [250, 150],
|
||||
parameters: { content: 'Receives login request' }
|
||||
},
|
||||
{
|
||||
id: 'sticky2',
|
||||
name: 'Authenticate with Supabase Note',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [450, 150],
|
||||
parameters: { content: 'Validates credentials' }
|
||||
},
|
||||
{
|
||||
id: 'sticky3',
|
||||
name: 'Return Tokens Note',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [650, 150],
|
||||
parameters: { content: 'Returns access and refresh tokens' }
|
||||
},
|
||||
{
|
||||
id: 'sticky4',
|
||||
name: 'Return Error Note',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [650, 450],
|
||||
parameters: { content: 'Returns error message' }
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Webhook Trigger': {
|
||||
main: [[{ node: 'Authenticate', type: 'main', index: 0 }]]
|
||||
},
|
||||
'Authenticate': {
|
||||
main: [
|
||||
[{ node: 'Return Success', type: 'main', index: 0 }],
|
||||
[{ node: 'Return Error', type: 'main', index: 0 }]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
expect(errors).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('validation should still detect truly disconnected functional nodes', () => {
|
||||
it('should detect disconnected HTTP node but ignore sticky note', () => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Test Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/test' }
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Disconnected HTTP',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 3,
|
||||
position: [450, 300],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'sticky1',
|
||||
name: 'Sticky Note',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [250, 100],
|
||||
parameters: { content: 'Note' }
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
expect(errors.length).toBeGreaterThan(0);
|
||||
const disconnectedError = errors.find(e => e.includes('Disconnected'));
|
||||
expect(disconnectedError).toBeDefined();
|
||||
expect(disconnectedError).toContain('Disconnected HTTP');
|
||||
expect(disconnectedError).not.toContain('Sticky Note');
|
||||
});
|
||||
|
||||
it('should detect multiple disconnected functional nodes but ignore sticky notes', () => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Test Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/test' }
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Disconnected HTTP',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 3,
|
||||
position: [450, 300],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Disconnected Set',
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 3,
|
||||
position: [650, 300],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'sticky1',
|
||||
name: 'Note 1',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [250, 100],
|
||||
parameters: { content: 'Note 1' }
|
||||
},
|
||||
{
|
||||
id: 'sticky2',
|
||||
name: 'Note 2',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [450, 100],
|
||||
parameters: { content: 'Note 2' }
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
expect(errors.length).toBeGreaterThan(0);
|
||||
const connectionError = errors.find(e => e.includes('no connections') || e.includes('Disconnected'));
|
||||
expect(connectionError).toBeDefined();
|
||||
expect(connectionError).not.toContain('Note 1');
|
||||
expect(connectionError).not.toContain('Note 2');
|
||||
});
|
||||
|
||||
it('should allow sticky notes but still validate functional node connections', () => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Test Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/test' }
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Connected HTTP',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 3,
|
||||
position: [450, 300],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Disconnected Set',
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 3,
|
||||
position: [650, 300],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'sticky1',
|
||||
name: 'Sticky Note',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [250, 100],
|
||||
parameters: { content: 'Note' }
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Webhook': {
|
||||
main: [[{ node: 'Connected HTTP', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
expect(errors.length).toBeGreaterThan(0);
|
||||
const disconnectedError = errors.find(e => e.includes('Disconnected'));
|
||||
expect(disconnectedError).toBeDefined();
|
||||
expect(disconnectedError).toContain('Disconnected Set');
|
||||
expect(disconnectedError).not.toContain('Connected HTTP');
|
||||
expect(disconnectedError).not.toContain('Sticky Note');
|
||||
});
|
||||
});
|
||||
|
||||
describe('regression tests - ensure sticky notes work like in n8n UI', () => {
|
||||
it('single webhook with sticky notes should be valid (matches n8n UI behavior)', () => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Webhook Only with Notes',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/test' }
|
||||
},
|
||||
{
|
||||
id: 'sticky1',
|
||||
name: 'Usage Instructions',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [250, 100],
|
||||
parameters: { content: 'Call this webhook to trigger the workflow' }
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
expect(errors).toEqual([]);
|
||||
});
|
||||
|
||||
it('workflow with only sticky notes should be invalid (no executable nodes)', () => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Only Notes',
|
||||
nodes: [
|
||||
{
|
||||
id: 'sticky1',
|
||||
name: 'Note 1',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [250, 100],
|
||||
parameters: { content: 'Note 1' }
|
||||
},
|
||||
{
|
||||
id: 'sticky2',
|
||||
name: 'Note 2',
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [450, 100],
|
||||
parameters: { content: 'Note 2' }
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
expect(errors.length).toBeGreaterThan(0);
|
||||
expect(errors.some(e => e.includes('at least one executable node'))).toBe(true);
|
||||
});
|
||||
|
||||
it('complex production workflow structure should validate correctly', () => {
|
||||
const workflow: Partial<Workflow> = {
|
||||
name: 'Production API Endpoint',
|
||||
nodes: [
|
||||
{
|
||||
id: 'webhook1',
|
||||
name: 'API Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300],
|
||||
parameters: { path: '/api/endpoint' }
|
||||
},
|
||||
{
|
||||
id: 'validate1',
|
||||
name: 'Validate Input',
|
||||
type: 'n8n-nodes-base.code',
|
||||
typeVersion: 2,
|
||||
position: [450, 300],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'branch1',
|
||||
name: 'Check Valid',
|
||||
type: 'n8n-nodes-base.if',
|
||||
typeVersion: 2,
|
||||
position: [650, 300],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'process1',
|
||||
name: 'Process Request',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 3,
|
||||
position: [850, 250],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'success1',
|
||||
name: 'Return Success',
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
typeVersion: 1,
|
||||
position: [1050, 250],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'error1',
|
||||
name: 'Return Error',
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
typeVersion: 1,
|
||||
position: [850, 350],
|
||||
parameters: {}
|
||||
},
|
||||
...Array.from({ length: 11 }, (_, i) => ({
|
||||
id: `sticky${i}`,
|
||||
name: `Documentation ${i}`,
|
||||
type: 'n8n-nodes-base.stickyNote',
|
||||
typeVersion: 1,
|
||||
position: [250 + i * 100, 100] as [number, number],
|
||||
parameters: { content: `Documentation section ${i}` }
|
||||
}))
|
||||
],
|
||||
connections: {
|
||||
'API Webhook': {
|
||||
main: [[{ node: 'Validate Input', type: 'main', index: 0 }]]
|
||||
},
|
||||
'Validate Input': {
|
||||
main: [[{ node: 'Check Valid', type: 'main', index: 0 }]]
|
||||
},
|
||||
'Check Valid': {
|
||||
main: [
|
||||
[{ node: 'Process Request', type: 'main', index: 0 }],
|
||||
[{ node: 'Return Error', type: 'main', index: 0 }]
|
||||
]
|
||||
},
|
||||
'Process Request': {
|
||||
main: [[{ node: 'Return Success', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const errors = validateWorkflowStructure(workflow);
|
||||
|
||||
expect(errors).toEqual([]);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,217 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { WorkflowValidator } from '@/services/workflow-validator';
|
||||
import { NodeRepository } from '@/database/node-repository';
|
||||
import { EnhancedConfigValidator } from '@/services/enhanced-config-validator';
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('@/database/node-repository');
|
||||
vi.mock('@/services/enhanced-config-validator');
|
||||
vi.mock('@/services/expression-validator');
|
||||
vi.mock('@/utils/logger');
|
||||
|
||||
describe('WorkflowValidator - AI Sub-Node Main Connection Detection', () => {
|
||||
let validator: WorkflowValidator;
|
||||
let mockNodeRepository: NodeRepository;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
mockNodeRepository = new NodeRepository({} as any) as any;
|
||||
|
||||
if (!mockNodeRepository.getAllNodes) {
|
||||
mockNodeRepository.getAllNodes = vi.fn();
|
||||
}
|
||||
if (!mockNodeRepository.getNode) {
|
||||
mockNodeRepository.getNode = vi.fn();
|
||||
}
|
||||
|
||||
const nodeTypes: Record<string, any> = {
|
||||
'nodes-base.manualTrigger': {
|
||||
type: 'nodes-base.manualTrigger',
|
||||
displayName: 'Manual Trigger',
|
||||
package: 'n8n-nodes-base',
|
||||
isTrigger: true,
|
||||
outputs: ['main'],
|
||||
properties: [],
|
||||
},
|
||||
'nodes-base.set': {
|
||||
type: 'nodes-base.set',
|
||||
displayName: 'Set',
|
||||
package: 'n8n-nodes-base',
|
||||
outputs: ['main'],
|
||||
properties: [],
|
||||
},
|
||||
'nodes-langchain.lmChatGoogleGemini': {
|
||||
type: 'nodes-langchain.lmChatGoogleGemini',
|
||||
displayName: 'Google Gemini Chat Model',
|
||||
package: '@n8n/n8n-nodes-langchain',
|
||||
outputs: ['ai_languageModel'],
|
||||
properties: [],
|
||||
},
|
||||
'nodes-langchain.memoryBufferWindow': {
|
||||
type: 'nodes-langchain.memoryBufferWindow',
|
||||
displayName: 'Window Buffer Memory',
|
||||
package: '@n8n/n8n-nodes-langchain',
|
||||
outputs: ['ai_memory'],
|
||||
properties: [],
|
||||
},
|
||||
'nodes-langchain.embeddingsOpenAi': {
|
||||
type: 'nodes-langchain.embeddingsOpenAi',
|
||||
displayName: 'Embeddings OpenAI',
|
||||
package: '@n8n/n8n-nodes-langchain',
|
||||
outputs: ['ai_embedding'],
|
||||
properties: [],
|
||||
},
|
||||
'nodes-langchain.agent': {
|
||||
type: 'nodes-langchain.agent',
|
||||
displayName: 'AI Agent',
|
||||
package: '@n8n/n8n-nodes-langchain',
|
||||
isAITool: true,
|
||||
outputs: ['main'],
|
||||
properties: [],
|
||||
},
|
||||
'nodes-langchain.openAi': {
|
||||
type: 'nodes-langchain.openAi',
|
||||
displayName: 'OpenAI',
|
||||
package: '@n8n/n8n-nodes-langchain',
|
||||
outputs: ['main'],
|
||||
properties: [],
|
||||
},
|
||||
'nodes-langchain.textClassifier': {
|
||||
type: 'nodes-langchain.textClassifier',
|
||||
displayName: 'Text Classifier',
|
||||
package: '@n8n/n8n-nodes-langchain',
|
||||
outputs: ['={{}}'], // Dynamic expression-based outputs
|
||||
properties: [],
|
||||
},
|
||||
'nodes-langchain.vectorStoreInMemory': {
|
||||
type: 'nodes-langchain.vectorStoreInMemory',
|
||||
displayName: 'In-Memory Vector Store',
|
||||
package: '@n8n/n8n-nodes-langchain',
|
||||
outputs: ['={{$parameter["mode"] === "retrieve" ? "main" : "ai_vectorStore"}}'],
|
||||
properties: [],
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(mockNodeRepository.getNode).mockImplementation((nodeType: string) => {
|
||||
return nodeTypes[nodeType] || null;
|
||||
});
|
||||
vi.mocked(mockNodeRepository.getAllNodes).mockReturnValue(Object.values(nodeTypes));
|
||||
|
||||
validator = new WorkflowValidator(
|
||||
mockNodeRepository,
|
||||
EnhancedConfigValidator as any
|
||||
);
|
||||
});
|
||||
|
||||
function makeWorkflow(sourceType: string, sourceName: string, connectionKey: string = 'main') {
|
||||
return {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Manual Trigger', type: 'n8n-nodes-base.manualTrigger', position: [0, 0], parameters: {} },
|
||||
{ id: '2', name: sourceName, type: sourceType, position: [200, 0], parameters: {} },
|
||||
{ id: '3', name: 'Set', type: 'n8n-nodes-base.set', position: [400, 0], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'Manual Trigger': {
|
||||
main: [[{ node: sourceName, type: 'main', index: 0 }]]
|
||||
},
|
||||
[sourceName]: {
|
||||
[connectionKey]: [[{ node: 'Set', type: connectionKey, index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
it('should flag LLM node (lmChatGoogleGemini) connected via main', async () => {
|
||||
const workflow = makeWorkflow(
|
||||
'n8n-nodes-langchain.lmChatGoogleGemini',
|
||||
'Google Gemini'
|
||||
);
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
const error = result.errors.find(e => e.code === 'AI_SUBNODE_MAIN_CONNECTION');
|
||||
expect(error).toBeDefined();
|
||||
expect(error!.message).toContain('ai_languageModel');
|
||||
expect(error!.message).toContain('AI sub-node');
|
||||
expect(error!.nodeName).toBe('Google Gemini');
|
||||
});
|
||||
|
||||
it('should flag memory node (memoryBufferWindow) connected via main', async () => {
|
||||
const workflow = makeWorkflow(
|
||||
'n8n-nodes-langchain.memoryBufferWindow',
|
||||
'Window Buffer Memory'
|
||||
);
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
const error = result.errors.find(e => e.code === 'AI_SUBNODE_MAIN_CONNECTION');
|
||||
expect(error).toBeDefined();
|
||||
expect(error!.message).toContain('ai_memory');
|
||||
});
|
||||
|
||||
it('should flag embeddings node connected via main', async () => {
|
||||
const workflow = makeWorkflow(
|
||||
'n8n-nodes-langchain.embeddingsOpenAi',
|
||||
'Embeddings OpenAI'
|
||||
);
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
const error = result.errors.find(e => e.code === 'AI_SUBNODE_MAIN_CONNECTION');
|
||||
expect(error).toBeDefined();
|
||||
expect(error!.message).toContain('ai_embedding');
|
||||
});
|
||||
|
||||
it('should NOT flag regular langchain nodes (agent, openAi) connected via main', async () => {
|
||||
const workflow1 = makeWorkflow('n8n-nodes-langchain.agent', 'AI Agent');
|
||||
const workflow2 = makeWorkflow('n8n-nodes-langchain.openAi', 'OpenAI');
|
||||
|
||||
const result1 = await validator.validateWorkflow(workflow1 as any);
|
||||
const result2 = await validator.validateWorkflow(workflow2 as any);
|
||||
|
||||
expect(result1.errors.find(e => e.code === 'AI_SUBNODE_MAIN_CONNECTION')).toBeUndefined();
|
||||
expect(result2.errors.find(e => e.code === 'AI_SUBNODE_MAIN_CONNECTION')).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should NOT flag dynamic-output nodes (expression-based outputs)', async () => {
|
||||
const workflow1 = makeWorkflow('n8n-nodes-langchain.textClassifier', 'Text Classifier');
|
||||
const workflow2 = makeWorkflow('n8n-nodes-langchain.vectorStoreInMemory', 'Vector Store');
|
||||
|
||||
const result1 = await validator.validateWorkflow(workflow1 as any);
|
||||
const result2 = await validator.validateWorkflow(workflow2 as any);
|
||||
|
||||
expect(result1.errors.find(e => e.code === 'AI_SUBNODE_MAIN_CONNECTION')).toBeUndefined();
|
||||
expect(result2.errors.find(e => e.code === 'AI_SUBNODE_MAIN_CONNECTION')).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should NOT flag AI sub-node connected via correct AI type', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Manual Trigger', type: 'n8n-nodes-base.manualTrigger', position: [0, 0], parameters: {} },
|
||||
{ id: '2', name: 'AI Agent', type: 'n8n-nodes-langchain.agent', position: [200, 0], parameters: {} },
|
||||
{ id: '3', name: 'Google Gemini', type: 'n8n-nodes-langchain.lmChatGoogleGemini', position: [200, 200], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'Manual Trigger': {
|
||||
main: [[{ node: 'AI Agent', type: 'main', index: 0 }]]
|
||||
},
|
||||
'Google Gemini': {
|
||||
ai_languageModel: [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
expect(result.errors.find(e => e.code === 'AI_SUBNODE_MAIN_CONNECTION')).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should NOT flag unknown/community nodes not in database', async () => {
|
||||
const workflow = makeWorkflow('n8n-nodes-community.someNode', 'Community Node');
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
expect(result.errors.find(e => e.code === 'AI_SUBNODE_MAIN_CONNECTION')).toBeUndefined();
|
||||
});
|
||||
});
|
||||
File diff suppressed because it is too large
Load Diff
@@ -915,4 +915,269 @@ describe('WorkflowValidator - Connection Validation (#620)', () => {
|
||||
expect(warning!.message).toContain('"unmatched" branch has no effect');
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Error Output Validation (absorbed from workflow-validator-error-outputs) ──
|
||||
|
||||
describe('Error Output Configuration', () => {
|
||||
it('should detect incorrect configuration - multiple nodes in same array', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Validate Input', type: 'n8n-nodes-base.set', typeVersion: 3.4, position: [-400, 64], parameters: {} },
|
||||
{ id: '2', name: 'Filter URLs', type: 'n8n-nodes-base.filter', typeVersion: 2.2, position: [-176, 64], parameters: {} },
|
||||
{ id: '3', name: 'Error Response1', type: 'n8n-nodes-base.respondToWebhook', typeVersion: 1.5, position: [-160, 240], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'Validate Input': {
|
||||
main: [[
|
||||
{ node: 'Filter URLs', type: 'main', index: 0 },
|
||||
{ node: 'Error Response1', type: 'main', index: 0 },
|
||||
]],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e =>
|
||||
e.message.includes('Incorrect error output configuration') &&
|
||||
e.message.includes('Error Response1') &&
|
||||
e.message.includes('appear to be error handlers but are in main[0]'),
|
||||
)).toBe(true);
|
||||
const errorMsg = result.errors.find(e => e.message.includes('Incorrect error output configuration'));
|
||||
expect(errorMsg?.message).toContain('INCORRECT (current)');
|
||||
expect(errorMsg?.message).toContain('CORRECT (should be)');
|
||||
});
|
||||
|
||||
it('should validate correct configuration - separate arrays', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Validate Input', type: 'n8n-nodes-base.set', typeVersion: 3.4, position: [-400, 64], parameters: {}, onError: 'continueErrorOutput' },
|
||||
{ id: '2', name: 'Filter URLs', type: 'n8n-nodes-base.filter', typeVersion: 2.2, position: [-176, 64], parameters: {} },
|
||||
{ id: '3', name: 'Error Response1', type: 'n8n-nodes-base.respondToWebhook', typeVersion: 1.5, position: [-160, 240], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'Validate Input': {
|
||||
main: [
|
||||
[{ node: 'Filter URLs', type: 'main', index: 0 }],
|
||||
[{ node: 'Error Response1', type: 'main', index: 0 }],
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.errors.some(e => e.message.includes('Incorrect error output configuration'))).toBe(false);
|
||||
});
|
||||
|
||||
it('should detect onError without error connections', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'HTTP Request', type: 'n8n-nodes-base.httpRequest', typeVersion: 4, position: [100, 100], parameters: {}, onError: 'continueErrorOutput' },
|
||||
{ id: '2', name: 'Process Data', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'HTTP Request': { main: [[{ node: 'Process Data', type: 'main', index: 0 }]] },
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.errors.some(e =>
|
||||
e.nodeName === 'HTTP Request' &&
|
||||
e.message.includes("has onError: 'continueErrorOutput' but no error output connections"),
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should warn about error connections without onError', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'HTTP Request', type: 'n8n-nodes-base.httpRequest', typeVersion: 4, position: [100, 100], parameters: {} },
|
||||
{ id: '2', name: 'Process Data', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
|
||||
{ id: '3', name: 'Error Handler', type: 'n8n-nodes-base.set', position: [300, 300], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'HTTP Request': {
|
||||
main: [
|
||||
[{ node: 'Process Data', type: 'main', index: 0 }],
|
||||
[{ node: 'Error Handler', type: 'main', index: 0 }],
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.warnings.some(w =>
|
||||
w.nodeName === 'HTTP Request' &&
|
||||
w.message.includes('error output connections in main[1] but missing onError'),
|
||||
)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Handler Detection', () => {
|
||||
it('should detect error handler nodes by name', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'API Call', type: 'n8n-nodes-base.httpRequest', position: [100, 100], parameters: {} },
|
||||
{ id: '2', name: 'Process Success', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
|
||||
{ id: '3', name: 'Handle Error', type: 'n8n-nodes-base.set', position: [300, 300], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'API Call': { main: [[{ node: 'Process Success', type: 'main', index: 0 }, { node: 'Handle Error', type: 'main', index: 0 }]] },
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.errors.some(e => e.message.includes('Handle Error') && e.message.includes('appear to be error handlers'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect error handler nodes by type', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Webhook', type: 'n8n-nodes-base.webhook', position: [100, 100], parameters: {} },
|
||||
{ id: '2', name: 'Process', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
|
||||
{ id: '3', name: 'Respond', type: 'n8n-nodes-base.respondToWebhook', position: [300, 300], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'Webhook': { main: [[{ node: 'Process', type: 'main', index: 0 }, { node: 'Respond', type: 'main', index: 0 }]] },
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.errors.some(e => e.message.includes('Respond') && e.message.includes('appear to be error handlers'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should not flag non-error nodes in main[0]', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Start', type: 'n8n-nodes-base.manualTrigger', position: [100, 100], parameters: {} },
|
||||
{ id: '2', name: 'First Process', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
|
||||
{ id: '3', name: 'Second Process', type: 'n8n-nodes-base.set', position: [300, 200], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'Start': { main: [[{ node: 'First Process', type: 'main', index: 0 }, { node: 'Second Process', type: 'main', index: 0 }]] },
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.errors.some(e => e.message.includes('Incorrect error output configuration'))).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Complex Error Patterns', () => {
|
||||
it('should handle multiple error handlers correctly in main[1]', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'HTTP Request', type: 'n8n-nodes-base.httpRequest', position: [100, 100], parameters: {}, onError: 'continueErrorOutput' },
|
||||
{ id: '2', name: 'Process', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
|
||||
{ id: '3', name: 'Log Error', type: 'n8n-nodes-base.set', position: [300, 200], parameters: {} },
|
||||
{ id: '4', name: 'Send Error Email', type: 'n8n-nodes-base.emailSend', position: [300, 300], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'HTTP Request': {
|
||||
main: [
|
||||
[{ node: 'Process', type: 'main', index: 0 }],
|
||||
[{ node: 'Log Error', type: 'main', index: 0 }, { node: 'Send Error Email', type: 'main', index: 0 }],
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.errors.some(e => e.message.includes('Incorrect error output configuration'))).toBe(false);
|
||||
});
|
||||
|
||||
it('should detect mixed success and error handlers in main[0]', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'API Request', type: 'n8n-nodes-base.httpRequest', position: [100, 100], parameters: {} },
|
||||
{ id: '2', name: 'Transform Data', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
|
||||
{ id: '3', name: 'Store Data', type: 'n8n-nodes-base.set', position: [500, 100], parameters: {} },
|
||||
{ id: '4', name: 'Error Notification', type: 'n8n-nodes-base.emailSend', position: [300, 300], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'API Request': {
|
||||
main: [[
|
||||
{ node: 'Transform Data', type: 'main', index: 0 },
|
||||
{ node: 'Store Data', type: 'main', index: 0 },
|
||||
{ node: 'Error Notification', type: 'main', index: 0 },
|
||||
]],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.errors.some(e =>
|
||||
e.message.includes('Error Notification') && e.message.includes('appear to be error handlers but are in main[0]'),
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle nested error handling (error handlers with their own errors)', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Primary API', type: 'n8n-nodes-base.httpRequest', position: [100, 100], parameters: {}, onError: 'continueErrorOutput' },
|
||||
{ id: '2', name: 'Success Handler', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
|
||||
{ id: '3', name: 'Error Logger', type: 'n8n-nodes-base.httpRequest', position: [300, 200], parameters: {}, onError: 'continueErrorOutput' },
|
||||
{ id: '4', name: 'Fallback Error', type: 'n8n-nodes-base.set', position: [500, 250], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'Primary API': { main: [[{ node: 'Success Handler', type: 'main', index: 0 }], [{ node: 'Error Logger', type: 'main', index: 0 }]] },
|
||||
'Error Logger': { main: [[], [{ node: 'Fallback Error', type: 'main', index: 0 }]] },
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.errors.some(e => e.message.includes('Incorrect error output configuration'))).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle workflows with only error outputs (no success path)', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Risky Operation', type: 'n8n-nodes-base.httpRequest', position: [100, 100], parameters: {}, onError: 'continueErrorOutput' },
|
||||
{ id: '2', name: 'Error Handler Only', type: 'n8n-nodes-base.set', position: [300, 200], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'Risky Operation': { main: [[], [{ node: 'Error Handler Only', type: 'main', index: 0 }]] },
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.errors.some(e => e.message.includes('Incorrect error output configuration'))).toBe(false);
|
||||
expect(result.errors.some(e => e.message.includes("has onError: 'continueErrorOutput' but no error output connections"))).toBe(false);
|
||||
});
|
||||
|
||||
it('should not flag legitimate parallel processing nodes', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Data Source', type: 'n8n-nodes-base.webhook', position: [100, 100], parameters: {} },
|
||||
{ id: '2', name: 'Process A', type: 'n8n-nodes-base.set', position: [300, 50], parameters: {} },
|
||||
{ id: '3', name: 'Process B', type: 'n8n-nodes-base.set', position: [300, 150], parameters: {} },
|
||||
{ id: '4', name: 'Transform Data', type: 'n8n-nodes-base.set', position: [300, 250], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'Data Source': { main: [[{ node: 'Process A', type: 'main', index: 0 }, { node: 'Process B', type: 'main', index: 0 }, { node: 'Transform Data', type: 'main', index: 0 }]] },
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.errors.some(e => e.message.includes('Incorrect error output configuration'))).toBe(false);
|
||||
});
|
||||
|
||||
it('should detect all variations of error-related node names', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Source', type: 'n8n-nodes-base.httpRequest', position: [100, 100], parameters: {} },
|
||||
{ id: '2', name: 'Handle Failure', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
|
||||
{ id: '3', name: 'Catch Exception', type: 'n8n-nodes-base.set', position: [300, 200], parameters: {} },
|
||||
{ id: '4', name: 'Success Path', type: 'n8n-nodes-base.set', position: [500, 100], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'Source': { main: [[{ node: 'Handle Failure', type: 'main', index: 0 }, { node: 'Catch Exception', type: 'main', index: 0 }, { node: 'Success Path', type: 'main', index: 0 }]] },
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.errors.some(e =>
|
||||
e.message.includes('Handle Failure') && e.message.includes('Catch Exception') && e.message.includes('appear to be error handlers but are in main[0]'),
|
||||
)).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,576 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { WorkflowValidator } from '@/services/workflow-validator';
|
||||
import { NodeRepository } from '@/database/node-repository';
|
||||
import { EnhancedConfigValidator } from '@/services/enhanced-config-validator';
|
||||
import type { WorkflowValidationResult } from '@/services/workflow-validator';
|
||||
|
||||
// NOTE: Mocking EnhancedConfigValidator is challenging because:
|
||||
// 1. WorkflowValidator expects the class itself, not an instance
|
||||
// 2. The class has static methods that are called directly
|
||||
// 3. vi.mock() hoisting makes it difficult to mock properly
|
||||
//
|
||||
// For properly mocked tests, see workflow-validator-with-mocks.test.ts
|
||||
// These tests use a partially mocked approach that may still access the database
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('@/database/node-repository');
|
||||
vi.mock('@/services/expression-validator');
|
||||
vi.mock('@/utils/logger');
|
||||
|
||||
// Mock EnhancedConfigValidator with static methods
|
||||
vi.mock('@/services/enhanced-config-validator', () => ({
|
||||
EnhancedConfigValidator: {
|
||||
validate: vi.fn().mockReturnValue({
|
||||
valid: true,
|
||||
errors: [],
|
||||
warnings: [],
|
||||
suggestions: [],
|
||||
visibleProperties: [],
|
||||
hiddenProperties: []
|
||||
}),
|
||||
validateWithMode: vi.fn().mockReturnValue({
|
||||
valid: true,
|
||||
errors: [],
|
||||
warnings: [],
|
||||
fixedConfig: null
|
||||
})
|
||||
}
|
||||
}));
|
||||
|
||||
describe('WorkflowValidator - Edge Cases', () => {
|
||||
let validator: WorkflowValidator;
|
||||
let mockNodeRepository: any;
|
||||
let mockEnhancedConfigValidator: any;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
// Create mock repository that returns node info for test nodes and common n8n nodes
|
||||
mockNodeRepository = {
|
||||
getNode: vi.fn().mockImplementation((type: string) => {
|
||||
if (type === 'test.node' || type === 'test.agent' || type === 'test.tool') {
|
||||
return {
|
||||
name: 'Test Node',
|
||||
type: type,
|
||||
typeVersion: 1,
|
||||
properties: [],
|
||||
package: 'test-package',
|
||||
version: 1,
|
||||
displayName: 'Test Node',
|
||||
isVersioned: false
|
||||
};
|
||||
}
|
||||
// Handle common n8n node types
|
||||
if (type.startsWith('n8n-nodes-base.') || type.startsWith('nodes-base.')) {
|
||||
const nodeName = type.split('.')[1];
|
||||
return {
|
||||
name: nodeName,
|
||||
type: type,
|
||||
typeVersion: 1,
|
||||
properties: [],
|
||||
package: 'n8n-nodes-base',
|
||||
version: 1,
|
||||
displayName: nodeName.charAt(0).toUpperCase() + nodeName.slice(1),
|
||||
isVersioned: ['set', 'httpRequest'].includes(nodeName)
|
||||
};
|
||||
}
|
||||
return null;
|
||||
}),
|
||||
findByType: vi.fn().mockReturnValue({
|
||||
name: 'Test Node',
|
||||
type: 'test.node',
|
||||
typeVersion: 1,
|
||||
properties: []
|
||||
}),
|
||||
searchNodes: vi.fn().mockReturnValue([])
|
||||
};
|
||||
|
||||
// Ensure EnhancedConfigValidator.validate always returns a valid result
|
||||
vi.mocked(EnhancedConfigValidator.validate).mockReturnValue({
|
||||
valid: true,
|
||||
errors: [],
|
||||
warnings: [],
|
||||
suggestions: [],
|
||||
visibleProperties: [],
|
||||
hiddenProperties: []
|
||||
});
|
||||
|
||||
// Create validator instance with mocked dependencies
|
||||
validator = new WorkflowValidator(mockNodeRepository, EnhancedConfigValidator);
|
||||
});
|
||||
|
||||
describe('Null and Undefined Handling', () => {
|
||||
it('should handle null workflow gracefully', async () => {
|
||||
const result = await validator.validateWorkflow(null as any);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e => e.message.includes('Invalid workflow structure'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle undefined workflow gracefully', async () => {
|
||||
const result = await validator.validateWorkflow(undefined as any);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e => e.message.includes('Invalid workflow structure'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle workflow with null nodes array', async () => {
|
||||
const workflow = {
|
||||
nodes: null,
|
||||
connections: {}
|
||||
};
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e => e.message.includes('nodes must be an array'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle workflow with null connections', async () => {
|
||||
const workflow = {
|
||||
nodes: [],
|
||||
connections: null
|
||||
};
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e => e.message.includes('connections must be an object'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle nodes with null/undefined properties', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: null,
|
||||
type: 'test.node',
|
||||
position: [0, 0],
|
||||
parameters: undefined
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Boundary Value Testing', () => {
|
||||
it('should handle empty workflow', async () => {
|
||||
const workflow = {
|
||||
nodes: [],
|
||||
connections: {}
|
||||
};
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.warnings.some(w => w.message.includes('empty'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle very large workflows', async () => {
|
||||
const nodes = Array(1000).fill(null).map((_, i) => ({
|
||||
id: `node${i}`,
|
||||
name: `Node ${i}`,
|
||||
type: 'test.node',
|
||||
position: [i * 100, 0] as [number, number],
|
||||
parameters: {}
|
||||
}));
|
||||
|
||||
const connections: any = {};
|
||||
for (let i = 0; i < 999; i++) {
|
||||
connections[`Node ${i}`] = {
|
||||
main: [[{ node: `Node ${i + 1}`, type: 'main', index: 0 }]]
|
||||
};
|
||||
}
|
||||
|
||||
const workflow = { nodes, connections };
|
||||
|
||||
const start = Date.now();
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
const duration = Date.now() - start;
|
||||
|
||||
expect(result).toBeDefined();
|
||||
// Use longer timeout for CI environments
|
||||
const isCI = process.env.CI === 'true' || process.env.GITHUB_ACTIONS === 'true';
|
||||
const timeout = isCI ? 10000 : 5000; // 10 seconds for CI, 5 seconds for local
|
||||
expect(duration).toBeLessThan(timeout);
|
||||
});
|
||||
|
||||
it('should handle deeply nested connections', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Start', type: 'test.node', position: [0, 0] as [number, number], parameters: {} },
|
||||
{ id: '2', name: 'Middle', type: 'test.node', position: [100, 0] as [number, number], parameters: {} },
|
||||
{ id: '3', name: 'End', type: 'test.node', position: [200, 0] as [number, number], parameters: {} }
|
||||
],
|
||||
connections: {
|
||||
'Start': {
|
||||
main: [[{ node: 'Middle', type: 'main', index: 0 }]],
|
||||
error: [[{ node: 'End', type: 'main', index: 0 }]],
|
||||
ai_tool: [[{ node: 'Middle', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.statistics.invalidConnections).toBe(0);
|
||||
});
|
||||
|
||||
it.skip('should handle nodes at extreme positions - FIXME: mock issues', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'FarLeft', type: 'n8n-nodes-base.set', position: [-999999, -999999] as [number, number], parameters: {} },
|
||||
{ id: '2', name: 'FarRight', type: 'n8n-nodes-base.set', position: [999999, 999999] as [number, number], parameters: {} },
|
||||
{ id: '3', name: 'Zero', type: 'n8n-nodes-base.set', position: [0, 0] as [number, number], parameters: {} }
|
||||
],
|
||||
connections: {
|
||||
'FarLeft': {
|
||||
main: [[{ node: 'FarRight', type: 'main', index: 0 }]]
|
||||
},
|
||||
'FarRight': {
|
||||
main: [[{ node: 'Zero', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Invalid Data Type Handling', () => {
|
||||
it('should handle non-array nodes', async () => {
|
||||
const workflow = {
|
||||
nodes: 'not-an-array',
|
||||
connections: {}
|
||||
};
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors[0].message).toContain('nodes must be an array');
|
||||
});
|
||||
|
||||
it('should handle non-object connections', async () => {
|
||||
const workflow = {
|
||||
nodes: [],
|
||||
connections: []
|
||||
};
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors[0].message).toContain('connections must be an object');
|
||||
});
|
||||
|
||||
it('should handle invalid position values', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'InvalidPos', type: 'test.node', position: 'invalid' as any, parameters: {} },
|
||||
{ id: '2', name: 'NaNPos', type: 'test.node', position: [NaN, NaN] as [number, number], parameters: {} },
|
||||
{ id: '3', name: 'InfinityPos', type: 'test.node', position: [Infinity, -Infinity] as [number, number], parameters: {} }
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.errors.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should handle circular references in workflow object', async () => {
|
||||
const workflow: any = {
|
||||
nodes: [],
|
||||
connections: {}
|
||||
};
|
||||
workflow.circular = workflow;
|
||||
|
||||
await expect(validator.validateWorkflow(workflow)).resolves.toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Connection Validation Edge Cases', () => {
|
||||
it('should detect self-referencing nodes', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'SelfLoop', type: 'test.node', position: [0, 0] as [number, number], parameters: {} }
|
||||
],
|
||||
connections: {
|
||||
'SelfLoop': {
|
||||
main: [[{ node: 'SelfLoop', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.warnings.some(w => w.message.includes('self-referencing'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle non-existent node references', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Node1', type: 'test.node', position: [0, 0] as [number, number], parameters: {} }
|
||||
],
|
||||
connections: {
|
||||
'Node1': {
|
||||
main: [[{ node: 'NonExistent', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.errors.some(e => e.message.includes('non-existent'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle invalid connection formats', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Node1', type: 'test.node', position: [0, 0] as [number, number], parameters: {} }
|
||||
],
|
||||
connections: {
|
||||
'Node1': {
|
||||
main: 'invalid-format' as any
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.errors.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should handle missing connection properties', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Node1', type: 'test.node', position: [0, 0] as [number, number], parameters: {} },
|
||||
{ id: '2', name: 'Node2', type: 'test.node', position: [100, 0] as [number, number], parameters: {} }
|
||||
],
|
||||
connections: {
|
||||
'Node1': {
|
||||
main: [[{ node: 'Node2' }]] // Missing type and index
|
||||
}
|
||||
} as any
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
// Should still work as type and index can have defaults
|
||||
expect(result.statistics.validConnections).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should handle negative output indices', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Node1', type: 'test.node', position: [0, 0] as [number, number], parameters: {} },
|
||||
{ id: '2', name: 'Node2', type: 'test.node', position: [100, 0] as [number, number], parameters: {} }
|
||||
],
|
||||
connections: {
|
||||
'Node1': {
|
||||
main: [[{ node: 'Node2', type: 'main', index: -1 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.errors.some(e => e.message.includes('Invalid'))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Special Characters and Unicode', () => {
|
||||
// Note: These tests are skipped because WorkflowValidator also needs special character
|
||||
// normalization (similar to WorkflowDiffEngine fix in #270). Will be addressed in a future PR.
|
||||
it.skip('should handle apostrophes in node names - TODO: needs WorkflowValidator normalization', async () => {
|
||||
// Test default n8n Manual Trigger node name with apostrophes
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: "When clicking 'Execute workflow'", type: 'n8n-nodes-base.manualTrigger', position: [0, 0] as [number, number], parameters: {} },
|
||||
{ id: '2', name: 'HTTP Request', type: 'n8n-nodes-base.httpRequest', position: [100, 0] as [number, number], parameters: {} }
|
||||
],
|
||||
connections: {
|
||||
"When clicking 'Execute workflow'": {
|
||||
main: [[{ node: 'HTTP Request', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it.skip('should handle special characters in node names - TODO: needs WorkflowValidator normalization', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Node@#$%', type: 'n8n-nodes-base.set', position: [0, 0] as [number, number], parameters: {} },
|
||||
{ id: '2', name: 'Node 中文', type: 'n8n-nodes-base.set', position: [100, 0] as [number, number], parameters: {} },
|
||||
{ id: '3', name: 'Node😊', type: 'n8n-nodes-base.set', position: [200, 0] as [number, number], parameters: {} }
|
||||
],
|
||||
connections: {
|
||||
'Node@#$%': {
|
||||
main: [[{ node: 'Node 中文', type: 'main', index: 0 }]]
|
||||
},
|
||||
'Node 中文': {
|
||||
main: [[{ node: 'Node😊', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle very long node names', async () => {
|
||||
const longName = 'A'.repeat(1000);
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: longName, type: 'test.node', position: [0, 0] as [number, number], parameters: {} }
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.warnings.some(w => w.message.includes('very long'))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Batch Validation', () => {
|
||||
it.skip('should handle batch validation with mixed valid/invalid workflows - FIXME: mock issues', async () => {
|
||||
const workflows = [
|
||||
{
|
||||
nodes: [
|
||||
{ id: '1', name: 'Node1', type: 'n8n-nodes-base.set', position: [0, 0] as [number, number], parameters: {} },
|
||||
{ id: '2', name: 'Node2', type: 'n8n-nodes-base.set', position: [100, 0] as [number, number], parameters: {} }
|
||||
],
|
||||
connections: {
|
||||
'Node1': {
|
||||
main: [[{ node: 'Node2', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
},
|
||||
null as any,
|
||||
{
|
||||
nodes: 'invalid' as any,
|
||||
connections: {}
|
||||
}
|
||||
];
|
||||
|
||||
const promises = workflows.map(w => validator.validateWorkflow(w));
|
||||
const results = await Promise.all(promises);
|
||||
|
||||
expect(results[0].valid).toBe(true);
|
||||
expect(results[1].valid).toBe(false);
|
||||
expect(results[2].valid).toBe(false);
|
||||
});
|
||||
|
||||
it.skip('should handle concurrent validation requests - FIXME: mock issues', async () => {
|
||||
const workflow = {
|
||||
nodes: [{ id: '1', name: 'Test', type: 'n8n-nodes-base.webhook', position: [0, 0] as [number, number], parameters: {} }],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const promises = Array(10).fill(null).map(() => validator.validateWorkflow(workflow));
|
||||
const results = await Promise.all(promises);
|
||||
|
||||
expect(results.every(r => r.valid)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Expression Validation Edge Cases', () => {
|
||||
it('should skip expression validation when option is false', async () => {
|
||||
const workflow = {
|
||||
nodes: [{
|
||||
id: '1',
|
||||
name: 'Node1',
|
||||
type: 'test.node',
|
||||
position: [0, 0] as [number, number],
|
||||
parameters: {
|
||||
value: '{{ $json.invalid.expression }}'
|
||||
}
|
||||
}],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow, {
|
||||
validateExpressions: false
|
||||
});
|
||||
|
||||
expect(result.statistics.expressionsValidated).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Connection Type Validation', () => {
|
||||
it('should validate different connection types', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Agent', type: 'test.agent', position: [0, 0] as [number, number], parameters: {} },
|
||||
{ id: '2', name: 'Tool', type: 'test.tool', position: [100, 0] as [number, number], parameters: {} }
|
||||
],
|
||||
connections: {
|
||||
'Tool': {
|
||||
ai_tool: [[{ node: 'Agent', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.statistics.validConnections).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Recovery', () => {
|
||||
it('should continue validation after encountering errors', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: null as any, type: 'test.node', position: [0, 0] as [number, number], parameters: {} },
|
||||
{ id: '2', name: 'Valid', type: 'test.node', position: [100, 0] as [number, number], parameters: {} },
|
||||
{ id: '3', name: 'AlsoValid', type: 'test.node', position: [200, 0] as [number, number], parameters: {} }
|
||||
],
|
||||
connections: {
|
||||
'Valid': {
|
||||
main: [[{ node: 'AlsoValid', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.errors.length).toBeGreaterThan(0);
|
||||
expect(result.statistics.validConnections).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Static Method Alternatives', () => {
|
||||
it('should validate workflow connections only', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{ id: '1', name: 'Node1', type: 'test.node', position: [0, 0] as [number, number], parameters: {} },
|
||||
{ id: '2', name: 'Node2', type: 'test.node', position: [100, 0] as [number, number], parameters: {} }
|
||||
],
|
||||
connections: {
|
||||
'Node1': {
|
||||
main: [[{ node: 'Node2', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow, {
|
||||
validateNodes: false,
|
||||
validateExpressions: false,
|
||||
validateConnections: true
|
||||
});
|
||||
|
||||
expect(result.statistics.validConnections).toBe(1);
|
||||
});
|
||||
|
||||
it('should validate workflow expressions only', async () => {
|
||||
const workflow = {
|
||||
nodes: [{
|
||||
id: '1',
|
||||
name: 'Node1',
|
||||
type: 'test.node',
|
||||
position: [0, 0] as [number, number],
|
||||
parameters: {
|
||||
value: '{{ $json.data }}'
|
||||
}
|
||||
}],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow, {
|
||||
validateNodes: false,
|
||||
validateExpressions: true,
|
||||
validateConnections: false
|
||||
});
|
||||
|
||||
expect(result.statistics.expressionsValidated).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,793 +0,0 @@
|
||||
import { describe, it, expect, beforeEach, vi } from 'vitest';
|
||||
import { WorkflowValidator } from '@/services/workflow-validator';
|
||||
import { NodeRepository } from '@/database/node-repository';
|
||||
import { EnhancedConfigValidator } from '@/services/enhanced-config-validator';
|
||||
|
||||
vi.mock('@/utils/logger');
|
||||
|
||||
describe('WorkflowValidator - Error Output Validation', () => {
|
||||
let validator: WorkflowValidator;
|
||||
let mockNodeRepository: any;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
// Create mock repository
|
||||
mockNodeRepository = {
|
||||
getNode: vi.fn((type: string) => {
|
||||
// Return mock node info for common node types
|
||||
if (type.includes('httpRequest') || type.includes('webhook') || type.includes('set')) {
|
||||
return {
|
||||
node_type: type,
|
||||
display_name: 'Mock Node',
|
||||
isVersioned: true,
|
||||
version: 1
|
||||
};
|
||||
}
|
||||
return null;
|
||||
})
|
||||
};
|
||||
|
||||
validator = new WorkflowValidator(mockNodeRepository, EnhancedConfigValidator);
|
||||
});
|
||||
|
||||
describe('Error Output Configuration', () => {
|
||||
it('should detect incorrect configuration - multiple nodes in same array', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Validate Input',
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 3.4,
|
||||
position: [-400, 64],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Filter URLs',
|
||||
type: 'n8n-nodes-base.filter',
|
||||
typeVersion: 2.2,
|
||||
position: [-176, 64],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Error Response1',
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
typeVersion: 1.5,
|
||||
position: [-160, 240],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Validate Input': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Filter URLs', type: 'main', index: 0 },
|
||||
{ node: 'Error Response1', type: 'main', index: 0 } // WRONG! Both in main[0]
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e =>
|
||||
e.message.includes('Incorrect error output configuration') &&
|
||||
e.message.includes('Error Response1') &&
|
||||
e.message.includes('appear to be error handlers but are in main[0]')
|
||||
)).toBe(true);
|
||||
|
||||
// Check that the error message includes the fix
|
||||
const errorMsg = result.errors.find(e => e.message.includes('Incorrect error output configuration'));
|
||||
expect(errorMsg?.message).toContain('INCORRECT (current)');
|
||||
expect(errorMsg?.message).toContain('CORRECT (should be)');
|
||||
expect(errorMsg?.message).toContain('main[1] = error output');
|
||||
});
|
||||
|
||||
it('should validate correct configuration - separate arrays', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Validate Input',
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 3.4,
|
||||
position: [-400, 64],
|
||||
parameters: {},
|
||||
onError: 'continueErrorOutput'
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Filter URLs',
|
||||
type: 'n8n-nodes-base.filter',
|
||||
typeVersion: 2.2,
|
||||
position: [-176, 64],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Error Response1',
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
typeVersion: 1.5,
|
||||
position: [-160, 240],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Validate Input': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Filter URLs', type: 'main', index: 0 }
|
||||
],
|
||||
[
|
||||
{ node: 'Error Response1', type: 'main', index: 0 } // Correctly in main[1]
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should not have the specific error about incorrect configuration
|
||||
expect(result.errors.some(e =>
|
||||
e.message.includes('Incorrect error output configuration')
|
||||
)).toBe(false);
|
||||
});
|
||||
|
||||
it('should detect onError without error connections', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'HTTP Request',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 4,
|
||||
position: [100, 100],
|
||||
parameters: {},
|
||||
onError: 'continueErrorOutput' // Has onError
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Process Data',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'HTTP Request': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Process Data', type: 'main', index: 0 }
|
||||
]
|
||||
// No main[1] for error output
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
expect(result.errors.some(e =>
|
||||
e.nodeName === 'HTTP Request' &&
|
||||
e.message.includes("has onError: 'continueErrorOutput' but no error output connections")
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should warn about error connections without onError', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'HTTP Request',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 4,
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
// Missing onError property
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Process Data',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Error Handler',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 300],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'HTTP Request': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Process Data', type: 'main', index: 0 }
|
||||
],
|
||||
[
|
||||
{ node: 'Error Handler', type: 'main', index: 0 } // Has error connection
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
expect(result.warnings.some(w =>
|
||||
w.nodeName === 'HTTP Request' &&
|
||||
w.message.includes('error output connections in main[1] but missing onError')
|
||||
)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Handler Detection', () => {
|
||||
it('should detect error handler nodes by name', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'API Call',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Process Success',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Handle Error', // Contains 'error'
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 300],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'API Call': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Process Success', type: 'main', index: 0 },
|
||||
{ node: 'Handle Error', type: 'main', index: 0 } // Wrong placement
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
expect(result.errors.some(e =>
|
||||
e.message.includes('Handle Error') &&
|
||||
e.message.includes('appear to be error handlers')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect error handler nodes by type', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Process',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Respond',
|
||||
type: 'n8n-nodes-base.respondToWebhook', // Common error handler type
|
||||
position: [300, 300],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Webhook': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Process', type: 'main', index: 0 },
|
||||
{ node: 'Respond', type: 'main', index: 0 } // Wrong placement
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
expect(result.errors.some(e =>
|
||||
e.message.includes('Respond') &&
|
||||
e.message.includes('appear to be error handlers')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should not flag non-error nodes in main[0]', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Start',
|
||||
type: 'n8n-nodes-base.manualTrigger',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'First Process',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Second Process',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 200],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Start': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'First Process', type: 'main', index: 0 },
|
||||
{ node: 'Second Process', type: 'main', index: 0 } // Both are valid success paths
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should not have error about incorrect error configuration
|
||||
expect(result.errors.some(e =>
|
||||
e.message.includes('Incorrect error output configuration')
|
||||
)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Complex Error Patterns', () => {
|
||||
it('should handle multiple error handlers correctly', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'HTTP Request',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [100, 100],
|
||||
parameters: {},
|
||||
onError: 'continueErrorOutput'
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Process',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Log Error',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 200],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '4',
|
||||
name: 'Send Error Email',
|
||||
type: 'n8n-nodes-base.emailSend',
|
||||
position: [300, 300],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'HTTP Request': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Process', type: 'main', index: 0 }
|
||||
],
|
||||
[
|
||||
{ node: 'Log Error', type: 'main', index: 0 },
|
||||
{ node: 'Send Error Email', type: 'main', index: 0 } // Multiple error handlers OK in main[1]
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should not have errors about the configuration
|
||||
expect(result.errors.some(e =>
|
||||
e.message.includes('Incorrect error output configuration')
|
||||
)).toBe(false);
|
||||
});
|
||||
|
||||
it('should detect mixed success and error handlers in main[0]', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'API Request',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Transform Data',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Store Data',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [500, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '4',
|
||||
name: 'Error Notification',
|
||||
type: 'n8n-nodes-base.emailSend',
|
||||
position: [300, 300],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'API Request': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Transform Data', type: 'main', index: 0 },
|
||||
{ node: 'Store Data', type: 'main', index: 0 },
|
||||
{ node: 'Error Notification', type: 'main', index: 0 } // Error handler mixed with success nodes
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
expect(result.errors.some(e =>
|
||||
e.message.includes('Error Notification') &&
|
||||
e.message.includes('appear to be error handlers but are in main[0]')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle nested error handling (error handlers with their own errors)', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Primary API',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [100, 100],
|
||||
parameters: {},
|
||||
onError: 'continueErrorOutput'
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Success Handler',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Error Logger',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [300, 200],
|
||||
parameters: {},
|
||||
onError: 'continueErrorOutput'
|
||||
},
|
||||
{
|
||||
id: '4',
|
||||
name: 'Fallback Error',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [500, 250],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Primary API': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Success Handler', type: 'main', index: 0 }
|
||||
],
|
||||
[
|
||||
{ node: 'Error Logger', type: 'main', index: 0 }
|
||||
]
|
||||
]
|
||||
},
|
||||
'Error Logger': {
|
||||
main: [
|
||||
[],
|
||||
[
|
||||
{ node: 'Fallback Error', type: 'main', index: 0 }
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should not have errors about incorrect configuration
|
||||
expect(result.errors.some(e =>
|
||||
e.message.includes('Incorrect error output configuration')
|
||||
)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Edge Cases', () => {
|
||||
it('should handle workflows with no connections at all', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Isolated Node',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [100, 100],
|
||||
parameters: {},
|
||||
onError: 'continueErrorOutput'
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should have warning about orphaned node but not error about connections
|
||||
expect(result.warnings.some(w =>
|
||||
w.nodeName === 'Isolated Node' &&
|
||||
w.message.includes('not connected to any other nodes')
|
||||
)).toBe(true);
|
||||
|
||||
// Should not have error about error output configuration
|
||||
expect(result.errors.some(e =>
|
||||
e.message.includes('Incorrect error output configuration')
|
||||
)).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle nodes with empty main arrays', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Source Node',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [100, 100],
|
||||
parameters: {},
|
||||
onError: 'continueErrorOutput'
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Target Node',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Source Node': {
|
||||
main: [
|
||||
[], // Empty success array
|
||||
[] // Empty error array
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should detect that onError is set but no error connections exist
|
||||
expect(result.errors.some(e =>
|
||||
e.nodeName === 'Source Node' &&
|
||||
e.message.includes("has onError: 'continueErrorOutput' but no error output connections")
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle workflows with only error outputs (no success path)', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Risky Operation',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [100, 100],
|
||||
parameters: {},
|
||||
onError: 'continueErrorOutput'
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Error Handler Only',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 200],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Risky Operation': {
|
||||
main: [
|
||||
[], // No success connections
|
||||
[
|
||||
{ node: 'Error Handler Only', type: 'main', index: 0 }
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should not have errors about incorrect configuration - this is valid
|
||||
expect(result.errors.some(e =>
|
||||
e.message.includes('Incorrect error output configuration')
|
||||
)).toBe(false);
|
||||
|
||||
// Should not have errors about missing error connections
|
||||
expect(result.errors.some(e =>
|
||||
e.message.includes("has onError: 'continueErrorOutput' but no error output connections")
|
||||
)).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle undefined or null connection arrays gracefully', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Source Node',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Source Node': {
|
||||
main: [
|
||||
null, // Null array
|
||||
undefined // Undefined array
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should not crash and should not have configuration errors
|
||||
expect(result.errors.some(e =>
|
||||
e.message.includes('Incorrect error output configuration')
|
||||
)).toBe(false);
|
||||
});
|
||||
|
||||
it('should detect all variations of error-related node names', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Source',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Handle Failure',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Catch Exception',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 200],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '4',
|
||||
name: 'Success Path',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [500, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Source': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Handle Failure', type: 'main', index: 0 },
|
||||
{ node: 'Catch Exception', type: 'main', index: 0 },
|
||||
{ node: 'Success Path', type: 'main', index: 0 }
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should detect both 'Handle Failure' and 'Catch Exception' as error handlers
|
||||
expect(result.errors.some(e =>
|
||||
e.message.includes('Handle Failure') &&
|
||||
e.message.includes('Catch Exception') &&
|
||||
e.message.includes('appear to be error handlers but are in main[0]')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it('should not flag legitimate parallel processing nodes', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Data Source',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Process A',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 50],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Process B',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 150],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '4',
|
||||
name: 'Transform Data',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 250],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Data Source': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Process A', type: 'main', index: 0 },
|
||||
{ node: 'Process B', type: 'main', index: 0 },
|
||||
{ node: 'Transform Data', type: 'main', index: 0 }
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should not flag these as error configuration issues
|
||||
expect(result.errors.some(e =>
|
||||
e.message.includes('Incorrect error output configuration')
|
||||
)).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,488 +0,0 @@
|
||||
import { describe, it, expect, beforeEach, vi } from 'vitest';
|
||||
import { WorkflowValidator } from '../../../src/services/workflow-validator';
|
||||
import { NodeRepository } from '../../../src/database/node-repository';
|
||||
import { EnhancedConfigValidator } from '../../../src/services/enhanced-config-validator';
|
||||
|
||||
// Mock the database
|
||||
vi.mock('../../../src/database/node-repository');
|
||||
|
||||
describe('WorkflowValidator - Expression Format Validation', () => {
|
||||
let validator: WorkflowValidator;
|
||||
let mockNodeRepository: any;
|
||||
|
||||
beforeEach(() => {
|
||||
// Create mock repository
|
||||
mockNodeRepository = {
|
||||
findNodeByType: vi.fn().mockImplementation((type: string) => {
|
||||
// Return mock nodes for common types
|
||||
if (type === 'n8n-nodes-base.emailSend') {
|
||||
return {
|
||||
node_type: 'n8n-nodes-base.emailSend',
|
||||
display_name: 'Email Send',
|
||||
properties: {},
|
||||
version: 2.1
|
||||
};
|
||||
}
|
||||
if (type === 'n8n-nodes-base.github') {
|
||||
return {
|
||||
node_type: 'n8n-nodes-base.github',
|
||||
display_name: 'GitHub',
|
||||
properties: {},
|
||||
version: 1.1
|
||||
};
|
||||
}
|
||||
if (type === 'n8n-nodes-base.webhook') {
|
||||
return {
|
||||
node_type: 'n8n-nodes-base.webhook',
|
||||
display_name: 'Webhook',
|
||||
properties: {},
|
||||
version: 1
|
||||
};
|
||||
}
|
||||
if (type === 'n8n-nodes-base.httpRequest') {
|
||||
return {
|
||||
node_type: 'n8n-nodes-base.httpRequest',
|
||||
display_name: 'HTTP Request',
|
||||
properties: {},
|
||||
version: 4
|
||||
};
|
||||
}
|
||||
return null;
|
||||
}),
|
||||
searchNodes: vi.fn().mockReturnValue([]),
|
||||
getAllNodes: vi.fn().mockReturnValue([]),
|
||||
close: vi.fn()
|
||||
};
|
||||
|
||||
validator = new WorkflowValidator(mockNodeRepository, EnhancedConfigValidator);
|
||||
});
|
||||
|
||||
describe('Expression Format Detection', () => {
|
||||
it('should detect missing = prefix in simple expressions', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Send Email',
|
||||
type: 'n8n-nodes-base.emailSend',
|
||||
position: [0, 0] as [number, number],
|
||||
parameters: {
|
||||
fromEmail: '{{ $env.SENDER_EMAIL }}',
|
||||
toEmail: 'user@example.com',
|
||||
subject: 'Test Email'
|
||||
},
|
||||
typeVersion: 2.1
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
|
||||
// Find expression format errors
|
||||
const formatErrors = result.errors.filter(e => e.message.includes('Expression format error'));
|
||||
expect(formatErrors).toHaveLength(1);
|
||||
|
||||
const error = formatErrors[0];
|
||||
expect(error.message).toContain('Expression format error');
|
||||
expect(error.message).toContain('fromEmail');
|
||||
expect(error.message).toContain('{{ $env.SENDER_EMAIL }}');
|
||||
expect(error.message).toContain('={{ $env.SENDER_EMAIL }}');
|
||||
});
|
||||
|
||||
it('should detect missing resource locator format for GitHub fields', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'GitHub',
|
||||
type: 'n8n-nodes-base.github',
|
||||
position: [0, 0] as [number, number],
|
||||
parameters: {
|
||||
operation: 'createComment',
|
||||
owner: '{{ $vars.GITHUB_OWNER }}',
|
||||
repository: '{{ $vars.GITHUB_REPO }}',
|
||||
issueNumber: 123,
|
||||
body: 'Test comment'
|
||||
},
|
||||
typeVersion: 1.1
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
// Should have errors for both owner and repository
|
||||
const ownerError = result.errors.find(e => e.message.includes('owner'));
|
||||
const repoError = result.errors.find(e => e.message.includes('repository'));
|
||||
|
||||
expect(ownerError).toBeTruthy();
|
||||
expect(repoError).toBeTruthy();
|
||||
expect(ownerError?.message).toContain('resource locator format');
|
||||
expect(ownerError?.message).toContain('__rl');
|
||||
});
|
||||
|
||||
it('should detect mixed content without prefix', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'HTTP Request',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [0, 0] as [number, number],
|
||||
parameters: {
|
||||
url: 'https://api.example.com/{{ $json.endpoint }}',
|
||||
headers: {
|
||||
Authorization: 'Bearer {{ $env.API_TOKEN }}'
|
||||
}
|
||||
},
|
||||
typeVersion: 4
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
const errors = result.errors.filter(e => e.message.includes('Expression format'));
|
||||
expect(errors.length).toBeGreaterThan(0);
|
||||
|
||||
// Check for URL error
|
||||
const urlError = errors.find(e => e.message.includes('url'));
|
||||
expect(urlError).toBeTruthy();
|
||||
expect(urlError?.message).toContain('=https://api.example.com/{{ $json.endpoint }}');
|
||||
});
|
||||
|
||||
it('should accept properly formatted expressions', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Send Email',
|
||||
type: 'n8n-nodes-base.emailSend',
|
||||
position: [0, 0] as [number, number],
|
||||
parameters: {
|
||||
fromEmail: '={{ $env.SENDER_EMAIL }}',
|
||||
toEmail: 'user@example.com',
|
||||
subject: '=Test {{ $json.type }}'
|
||||
},
|
||||
typeVersion: 2.1
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// Should have no expression format errors
|
||||
const formatErrors = result.errors.filter(e => e.message.includes('Expression format'));
|
||||
expect(formatErrors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should accept resource locator format', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'GitHub',
|
||||
type: 'n8n-nodes-base.github',
|
||||
position: [0, 0] as [number, number],
|
||||
parameters: {
|
||||
operation: 'createComment',
|
||||
owner: {
|
||||
__rl: true,
|
||||
value: '={{ $vars.GITHUB_OWNER }}',
|
||||
mode: 'expression'
|
||||
},
|
||||
repository: {
|
||||
__rl: true,
|
||||
value: '={{ $vars.GITHUB_REPO }}',
|
||||
mode: 'expression'
|
||||
},
|
||||
issueNumber: 123,
|
||||
body: '=Test comment from {{ $json.author }}'
|
||||
},
|
||||
typeVersion: 1.1
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// Should have no expression format errors
|
||||
const formatErrors = result.errors.filter(e => e.message.includes('Expression format'));
|
||||
expect(formatErrors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should validate nested expressions in complex parameters', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'HTTP Request',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [0, 0] as [number, number],
|
||||
parameters: {
|
||||
method: 'POST',
|
||||
url: 'https://api.example.com',
|
||||
sendBody: true,
|
||||
bodyParameters: {
|
||||
parameters: [
|
||||
{
|
||||
name: 'userId',
|
||||
value: '{{ $json.id }}'
|
||||
},
|
||||
{
|
||||
name: 'timestamp',
|
||||
value: '={{ $now }}'
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
typeVersion: 4
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// Should detect the missing prefix in nested parameter
|
||||
const errors = result.errors.filter(e => e.message.includes('Expression format'));
|
||||
expect(errors.length).toBeGreaterThan(0);
|
||||
|
||||
const nestedError = errors.find(e => e.message.includes('bodyParameters'));
|
||||
expect(nestedError).toBeTruthy();
|
||||
});
|
||||
|
||||
it('should warn about RL format even with prefix', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'GitHub',
|
||||
type: 'n8n-nodes-base.github',
|
||||
position: [0, 0] as [number, number],
|
||||
parameters: {
|
||||
operation: 'createComment',
|
||||
owner: '={{ $vars.GITHUB_OWNER }}',
|
||||
repository: '={{ $vars.GITHUB_REPO }}',
|
||||
issueNumber: 123,
|
||||
body: 'Test'
|
||||
},
|
||||
typeVersion: 1.1
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// Should have warnings about using RL format
|
||||
const warnings = result.warnings.filter(w => w.message.includes('resource locator format'));
|
||||
expect(warnings.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Real-world workflow examples', () => {
|
||||
it.skip('should validate Email workflow with expression issues', async () => {
|
||||
const workflow = {
|
||||
name: 'Error Notification Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: 'webhook-1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {
|
||||
path: 'error-handler',
|
||||
httpMethod: 'POST'
|
||||
},
|
||||
typeVersion: 1
|
||||
},
|
||||
{
|
||||
id: 'email-1',
|
||||
name: 'Error Handler',
|
||||
type: 'n8n-nodes-base.emailSend',
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {
|
||||
fromEmail: '{{ $env.ADMIN_EMAIL }}',
|
||||
toEmail: 'admin@company.com',
|
||||
subject: 'Error in {{ $json.workflow }}',
|
||||
message: 'An error occurred: {{ $json.error }}',
|
||||
options: {
|
||||
replyTo: '={{ $env.SUPPORT_EMAIL }}'
|
||||
}
|
||||
},
|
||||
typeVersion: 2.1
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Webhook': {
|
||||
main: [[{ node: 'Error Handler', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// Should have multiple expression format errors
|
||||
const formatErrors = result.errors.filter(e => e.message.includes('Expression format'));
|
||||
expect(formatErrors.length).toBeGreaterThanOrEqual(3); // fromEmail, subject, message
|
||||
|
||||
// Check specific errors
|
||||
const fromEmailError = formatErrors.find(e => e.message.includes('fromEmail'));
|
||||
expect(fromEmailError).toBeTruthy();
|
||||
expect(fromEmailError?.message).toContain('={{ $env.ADMIN_EMAIL }}');
|
||||
});
|
||||
|
||||
it.skip('should validate GitHub workflow with resource locator issues', async () => {
|
||||
const workflow = {
|
||||
name: 'GitHub Issue Handler',
|
||||
nodes: [
|
||||
{
|
||||
id: 'webhook-1',
|
||||
name: 'Issue Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {
|
||||
path: 'github-issue',
|
||||
httpMethod: 'POST'
|
||||
},
|
||||
typeVersion: 1
|
||||
},
|
||||
{
|
||||
id: 'github-1',
|
||||
name: 'Create Comment',
|
||||
type: 'n8n-nodes-base.github',
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {
|
||||
operation: 'createComment',
|
||||
owner: '{{ $vars.GITHUB_OWNER }}',
|
||||
repository: '{{ $vars.GITHUB_REPO }}',
|
||||
issueNumber: '={{ $json.body.issue.number }}',
|
||||
body: 'Thanks for the issue @{{ $json.body.issue.user.login }}!'
|
||||
},
|
||||
typeVersion: 1.1
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Issue Webhook': {
|
||||
main: [[{ node: 'Create Comment', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// Should have errors for owner, repository, and body
|
||||
const formatErrors = result.errors.filter(e => e.message.includes('Expression format'));
|
||||
expect(formatErrors.length).toBeGreaterThanOrEqual(3);
|
||||
|
||||
// Check for resource locator suggestions
|
||||
const ownerError = formatErrors.find(e => e.message.includes('owner'));
|
||||
expect(ownerError?.message).toContain('__rl');
|
||||
expect(ownerError?.message).toContain('resource locator format');
|
||||
});
|
||||
|
||||
it('should provide clear fix examples in error messages', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Process Data',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [0, 0] as [number, number],
|
||||
parameters: {
|
||||
url: 'https://api.example.com/users/{{ $json.userId }}'
|
||||
},
|
||||
typeVersion: 4
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
const error = result.errors.find(e => e.message.includes('Expression format'));
|
||||
expect(error).toBeTruthy();
|
||||
|
||||
// Error message should contain both incorrect and correct examples
|
||||
expect(error?.message).toContain('Current (incorrect):');
|
||||
expect(error?.message).toContain('"url": "https://api.example.com/users/{{ $json.userId }}"');
|
||||
expect(error?.message).toContain('Fixed (correct):');
|
||||
expect(error?.message).toContain('"url": "=https://api.example.com/users/{{ $json.userId }}"');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Integration with other validations', () => {
|
||||
it('should validate expression format alongside syntax', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Test Node',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [0, 0] as [number, number],
|
||||
parameters: {
|
||||
url: '{{ $json.url', // Syntax error: unclosed expression
|
||||
headers: {
|
||||
'X-Token': '{{ $env.TOKEN }}' // Format error: missing prefix
|
||||
}
|
||||
},
|
||||
typeVersion: 4
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// Should have both syntax and format errors
|
||||
const syntaxErrors = result.errors.filter(e => e.message.includes('Unmatched expression brackets'));
|
||||
const formatErrors = result.errors.filter(e => e.message.includes('Expression format'));
|
||||
|
||||
expect(syntaxErrors.length).toBeGreaterThan(0);
|
||||
expect(formatErrors.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should not interfere with node validation', async () => {
|
||||
// Test that expression format validation works alongside other validations
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'HTTP Request',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [0, 0] as [number, number],
|
||||
parameters: {
|
||||
url: '{{ $json.endpoint }}', // Expression format error
|
||||
headers: {
|
||||
Authorization: '={{ $env.TOKEN }}' // Correct format
|
||||
}
|
||||
},
|
||||
typeVersion: 4
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// Should have expression format error for url field
|
||||
const formatErrors = result.errors.filter(e => e.message.includes('Expression format'));
|
||||
expect(formatErrors).toHaveLength(1);
|
||||
expect(formatErrors[0].message).toContain('url');
|
||||
|
||||
// The workflow should still have structure validation (no trigger warning, etc)
|
||||
// This proves that expression validation doesn't interfere with other checks
|
||||
expect(result.warnings.some(w => w.message.includes('trigger'))).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,434 +0,0 @@
|
||||
import { describe, it, expect, beforeEach, vi } from 'vitest';
|
||||
import { WorkflowValidator } from '@/services/workflow-validator';
|
||||
import { NodeRepository } from '@/database/node-repository';
|
||||
import { EnhancedConfigValidator } from '@/services/enhanced-config-validator';
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('@/database/node-repository');
|
||||
vi.mock('@/services/enhanced-config-validator');
|
||||
|
||||
describe('WorkflowValidator - SplitInBatches Validation (Simplified)', () => {
|
||||
let validator: WorkflowValidator;
|
||||
let mockNodeRepository: any;
|
||||
let mockNodeValidator: any;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
mockNodeRepository = {
|
||||
getNode: vi.fn()
|
||||
};
|
||||
|
||||
mockNodeValidator = {
|
||||
validateWithMode: vi.fn().mockReturnValue({
|
||||
errors: [],
|
||||
warnings: []
|
||||
})
|
||||
};
|
||||
|
||||
validator = new WorkflowValidator(mockNodeRepository, mockNodeValidator);
|
||||
});
|
||||
|
||||
describe('SplitInBatches node detection', () => {
|
||||
it('should identify SplitInBatches nodes in workflow', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.splitInBatches',
|
||||
properties: []
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'SplitInBatches Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Split In Batches',
|
||||
type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100, 100],
|
||||
parameters: { batchSize: 10 }
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Process Item',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': {
|
||||
main: [
|
||||
[], // Done output (0)
|
||||
[{ node: 'Process Item', type: 'main', index: 0 }] // Loop output (1)
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should complete validation without crashing
|
||||
expect(result).toBeDefined();
|
||||
expect(result.valid).toBeDefined();
|
||||
});
|
||||
|
||||
it('should handle SplitInBatches with processing node name patterns', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.splitInBatches',
|
||||
properties: []
|
||||
});
|
||||
|
||||
const processingNames = [
|
||||
'Process Item',
|
||||
'Transform Data',
|
||||
'Handle Each',
|
||||
'Function Node',
|
||||
'Code Block'
|
||||
];
|
||||
|
||||
for (const nodeName of processingNames) {
|
||||
const workflow = {
|
||||
name: 'Processing Pattern Test',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Split In Batches',
|
||||
type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: nodeName,
|
||||
type: 'n8n-nodes-base.function',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': {
|
||||
main: [
|
||||
[{ node: nodeName, type: 'main', index: 0 }], // Processing node on Done output
|
||||
[]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should identify potential processing nodes
|
||||
expect(result).toBeDefined();
|
||||
}
|
||||
});
|
||||
|
||||
it('should handle final processing node patterns', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.splitInBatches',
|
||||
properties: []
|
||||
});
|
||||
|
||||
const finalNames = [
|
||||
'Final Summary',
|
||||
'Send Email',
|
||||
'Complete Notification',
|
||||
'Final Report'
|
||||
];
|
||||
|
||||
for (const nodeName of finalNames) {
|
||||
const workflow = {
|
||||
name: 'Final Pattern Test',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Split In Batches',
|
||||
type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: nodeName,
|
||||
type: 'n8n-nodes-base.emailSend',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': {
|
||||
main: [
|
||||
[{ node: nodeName, type: 'main', index: 0 }], // Final node on Done output (correct)
|
||||
[]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should not warn about final nodes on done output
|
||||
expect(result).toBeDefined();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('Connection validation', () => {
|
||||
it('should validate connection indices', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.splitInBatches',
|
||||
properties: []
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'Connection Index Test',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Split In Batches',
|
||||
type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Target',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': {
|
||||
main: [
|
||||
[{ node: 'Target', type: 'main', index: -1 }] // Invalid negative index
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
const negativeIndexErrors = result.errors.filter(e =>
|
||||
e.message?.includes('Invalid connection index -1')
|
||||
);
|
||||
expect(negativeIndexErrors.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should handle non-existent target nodes', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.splitInBatches',
|
||||
properties: []
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'Missing Target Test',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Split In Batches',
|
||||
type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': {
|
||||
main: [
|
||||
[{ node: 'NonExistentNode', type: 'main', index: 0 }]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
const missingNodeErrors = result.errors.filter(e =>
|
||||
e.message?.includes('non-existent node')
|
||||
);
|
||||
expect(missingNodeErrors.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Self-referencing connections', () => {
|
||||
it('should allow self-referencing for SplitInBatches nodes', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.splitInBatches',
|
||||
properties: []
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'Self Reference Test',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Split In Batches',
|
||||
type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': {
|
||||
main: [
|
||||
[],
|
||||
[{ node: 'Split In Batches', type: 'main', index: 0 }] // Self-reference on loop output
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should not warn about self-reference for SplitInBatches
|
||||
const selfRefWarnings = result.warnings.filter(w =>
|
||||
w.message?.includes('self-referencing')
|
||||
);
|
||||
expect(selfRefWarnings).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should warn about self-referencing for non-loop nodes', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.set',
|
||||
properties: []
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'Non-Loop Self Reference Test',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Set',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Set': {
|
||||
main: [
|
||||
[{ node: 'Set', type: 'main', index: 0 }] // Self-reference on regular node
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should warn about self-reference for non-loop nodes
|
||||
const selfRefWarnings = result.warnings.filter(w =>
|
||||
w.message?.includes('self-referencing')
|
||||
);
|
||||
expect(selfRefWarnings.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Output connection validation', () => {
|
||||
it('should validate output connections for nodes with outputs', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.if',
|
||||
outputs: [
|
||||
{ displayName: 'True', description: 'Items that match condition' },
|
||||
{ displayName: 'False', description: 'Items that do not match condition' }
|
||||
],
|
||||
outputNames: ['true', 'false'],
|
||||
properties: []
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'IF Node Test',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'IF',
|
||||
type: 'n8n-nodes-base.if',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'True Handler',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 50],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'False Handler',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [300, 150],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'IF': {
|
||||
main: [
|
||||
[{ node: 'True Handler', type: 'main', index: 0 }], // True output (0)
|
||||
[{ node: 'False Handler', type: 'main', index: 0 }] // False output (1)
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should validate without major errors
|
||||
expect(result).toBeDefined();
|
||||
expect(result.statistics.validConnections).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error handling', () => {
|
||||
it('should handle nodes without outputs gracefully', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.httpRequest',
|
||||
outputs: null,
|
||||
outputNames: null,
|
||||
properties: []
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'No Outputs Test',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'HTTP Request',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should handle gracefully without crashing
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
|
||||
it('should handle unknown node types gracefully', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue(null);
|
||||
|
||||
const workflow = {
|
||||
name: 'Unknown Node Test',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Unknown',
|
||||
type: 'n8n-nodes-base.unknown',
|
||||
position: [100, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should report unknown node error
|
||||
const unknownErrors = result.errors.filter(e =>
|
||||
e.message?.includes('Unknown node type')
|
||||
);
|
||||
expect(unknownErrors.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -702,4 +702,244 @@ describe('WorkflowValidator - Loop Node Validation', () => {
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Loop Output Edge Cases (absorbed from loop-output-edge-cases) ──
|
||||
|
||||
describe('Nodes without outputs', () => {
|
||||
it('should handle nodes with null outputs gracefully', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.httpRequest', outputs: null, outputNames: null, properties: [],
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'No Outputs',
|
||||
nodes: [
|
||||
{ id: '1', name: 'HTTP Request', type: 'n8n-nodes-base.httpRequest', position: [100, 100], parameters: { url: 'https://example.com' } },
|
||||
{ id: '2', name: 'Set', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
|
||||
],
|
||||
connections: { 'HTTP Request': { main: [[{ node: 'Set', type: 'main', index: 0 }]] } },
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result).toBeDefined();
|
||||
const outputErrors = result.errors.filter(e => e.message?.includes('output') && !e.message?.includes('Connection'));
|
||||
expect(outputErrors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle nodes with empty outputs array', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.customNode', outputs: [], outputNames: [], properties: [],
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'Empty Outputs',
|
||||
nodes: [{ id: '1', name: 'Custom Node', type: 'n8n-nodes-base.customNode', position: [100, 100], parameters: {} }],
|
||||
connections: { 'Custom Node': { main: [[{ node: 'Custom Node', type: 'main', index: 0 }]] } },
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
const selfRefWarnings = result.warnings.filter(w => w.message?.includes('self-referencing'));
|
||||
expect(selfRefWarnings).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Invalid connection indices', () => {
|
||||
it('should handle very large connection indices', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.switch', outputs: [{ displayName: 'Output 1' }, { displayName: 'Output 2' }], properties: [],
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'Large Index',
|
||||
nodes: [
|
||||
{ id: '1', name: 'Switch', type: 'n8n-nodes-base.switch', position: [100, 100], parameters: {} },
|
||||
{ id: '2', name: 'Set', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
|
||||
],
|
||||
connections: { 'Switch': { main: [[{ node: 'Set', type: 'main', index: 999 }]] } },
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Malformed connection structures', () => {
|
||||
it('should handle null connection objects', async () => {
|
||||
const workflow = {
|
||||
name: 'Null Connections',
|
||||
nodes: [{ id: '1', name: 'Split In Batches', type: 'n8n-nodes-base.splitInBatches', position: [100, 100], parameters: {} }],
|
||||
connections: { 'Split In Batches': { main: [null, [{ node: 'NonExistent', type: 'main', index: 0 }]] as any } },
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
|
||||
it('should handle missing connection properties', async () => {
|
||||
const workflow = {
|
||||
name: 'Malformed Connections',
|
||||
nodes: [
|
||||
{ id: '1', name: 'Split In Batches', type: 'n8n-nodes-base.splitInBatches', position: [100, 100], parameters: {} },
|
||||
{ id: '2', name: 'Set', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': { main: [[{ node: 'Set' } as any, { type: 'main', index: 0 } as any, {} as any]] },
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result).toBeDefined();
|
||||
expect(result.errors.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Complex output structures', () => {
|
||||
it('should handle nodes with many outputs', async () => {
|
||||
const manyOutputs = Array.from({ length: 20 }, (_, i) => ({
|
||||
displayName: `Output ${i + 1}`, name: `output${i + 1}`,
|
||||
}));
|
||||
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.complexSwitch', outputs: manyOutputs, outputNames: manyOutputs.map(o => o.name), properties: [],
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'Many Outputs',
|
||||
nodes: [
|
||||
{ id: '1', name: 'Complex Switch', type: 'n8n-nodes-base.complexSwitch', position: [100, 100], parameters: {} },
|
||||
{ id: '2', name: 'Set', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
|
||||
],
|
||||
connections: { 'Complex Switch': { main: Array.from({ length: 20 }, () => [{ node: 'Set', type: 'main', index: 0 }]) } },
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
|
||||
it('should handle mixed output types (main, error, ai_tool)', async () => {
|
||||
mockNodeRepository.getNode.mockReturnValue({
|
||||
nodeType: 'nodes-base.complexNode', outputs: [{ displayName: 'Main', type: 'main' }, { displayName: 'Error', type: 'error' }], properties: [],
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
name: 'Mixed Output Types',
|
||||
nodes: [
|
||||
{ id: '1', name: 'Complex Node', type: 'n8n-nodes-base.complexNode', position: [100, 100], parameters: {} },
|
||||
{ id: '2', name: 'Main Handler', type: 'n8n-nodes-base.set', position: [300, 50], parameters: {} },
|
||||
{ id: '3', name: 'Error Handler', type: 'n8n-nodes-base.set', position: [300, 150], parameters: {} },
|
||||
{ id: '4', name: 'Tool', type: 'n8n-nodes-base.httpRequest', position: [500, 100], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'Complex Node': {
|
||||
main: [[{ node: 'Main Handler', type: 'main', index: 0 }]],
|
||||
error: [[{ node: 'Error Handler', type: 'main', index: 0 }]],
|
||||
ai_tool: [[{ node: 'Tool', type: 'main', index: 0 }]],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result).toBeDefined();
|
||||
expect(result.statistics.validConnections).toBe(3);
|
||||
});
|
||||
});
|
||||
|
||||
describe('SplitInBatches specific edge cases', () => {
|
||||
it('should handle SplitInBatches with no connections', async () => {
|
||||
const workflow = {
|
||||
name: 'Isolated SplitInBatches',
|
||||
nodes: [{ id: '1', name: 'Split In Batches', type: 'n8n-nodes-base.splitInBatches', position: [100, 100], parameters: {} }],
|
||||
connections: {},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
const splitWarnings = result.warnings.filter(w => w.message?.includes('SplitInBatches') || w.message?.includes('loop') || w.message?.includes('done'));
|
||||
expect(splitWarnings).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle SplitInBatches with only done output connected', async () => {
|
||||
const workflow = {
|
||||
name: 'Single Output SplitInBatches',
|
||||
nodes: [
|
||||
{ id: '1', name: 'Split In Batches', type: 'n8n-nodes-base.splitInBatches', position: [100, 100], parameters: {} },
|
||||
{ id: '2', name: 'Final Action', type: 'n8n-nodes-base.emailSend', position: [300, 100], parameters: {} },
|
||||
],
|
||||
connections: { 'Split In Batches': { main: [[{ node: 'Final Action', type: 'main', index: 0 }], []] } },
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
const loopWarnings = result.warnings.filter(w => w.message?.includes('loop') && w.message?.includes('connect back'));
|
||||
expect(loopWarnings).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle SplitInBatches with both outputs to same node', async () => {
|
||||
const workflow = {
|
||||
name: 'Same Target SplitInBatches',
|
||||
nodes: [
|
||||
{ id: '1', name: 'Split In Batches', type: 'n8n-nodes-base.splitInBatches', position: [100, 100], parameters: {} },
|
||||
{ id: '2', name: 'Multi Purpose', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': { main: [[{ node: 'Multi Purpose', type: 'main', index: 0 }], [{ node: 'Multi Purpose', type: 'main', index: 0 }]] },
|
||||
'Multi Purpose': { main: [[{ node: 'Split In Batches', type: 'main', index: 0 }]] },
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
const loopWarnings = result.warnings.filter(w => w.message?.includes('loop') && w.message?.includes('connect back'));
|
||||
expect(loopWarnings).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should detect reversed outputs with processing node on done output', async () => {
|
||||
const workflow = {
|
||||
name: 'Reversed SplitInBatches with Function Node',
|
||||
nodes: [
|
||||
{ id: '1', name: 'Split In Batches', type: 'n8n-nodes-base.splitInBatches', position: [100, 100], parameters: {} },
|
||||
{ id: '2', name: 'Process Function', type: 'n8n-nodes-base.function', position: [300, 100], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': { main: [[{ node: 'Process Function', type: 'main', index: 0 }], []] },
|
||||
'Process Function': { main: [[{ node: 'Split In Batches', type: 'main', index: 0 }]] },
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
const reversedErrors = result.errors.filter(e => e.message?.includes('SplitInBatches outputs appear reversed'));
|
||||
expect(reversedErrors).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should handle self-referencing nodes in loop back detection', async () => {
|
||||
const workflow = {
|
||||
name: 'Self Reference in Loop Back',
|
||||
nodes: [
|
||||
{ id: '1', name: 'Split In Batches', type: 'n8n-nodes-base.splitInBatches', position: [100, 100], parameters: {} },
|
||||
{ id: '2', name: 'SelfRef', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
|
||||
],
|
||||
connections: {
|
||||
'Split In Batches': { main: [[], [{ node: 'SelfRef', type: 'main', index: 0 }]] },
|
||||
'SelfRef': { main: [[{ node: 'SelfRef', type: 'main', index: 0 }]] },
|
||||
},
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
expect(result.warnings.filter(w => w.message?.includes("doesn't connect back"))).toHaveLength(1);
|
||||
expect(result.warnings.filter(w => w.message?.includes('self-referencing'))).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should handle many SplitInBatches nodes', async () => {
|
||||
const nodes = Array.from({ length: 100 }, (_, i) => ({
|
||||
id: `split${i}`, name: `Split ${i}`, type: 'n8n-nodes-base.splitInBatches',
|
||||
position: [100 + (i % 10) * 100, 100 + Math.floor(i / 10) * 100], parameters: {},
|
||||
}));
|
||||
|
||||
const connections: any = {};
|
||||
for (let i = 0; i < 99; i++) {
|
||||
connections[`Split ${i}`] = { main: [[{ node: `Split ${i + 1}`, type: 'main', index: 0 }], []] };
|
||||
}
|
||||
|
||||
const result = await validator.validateWorkflow({ name: 'Many SplitInBatches', nodes, connections } as any);
|
||||
expect(result).toBeDefined();
|
||||
expect(result.statistics.totalNodes).toBe(100);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,721 +0,0 @@
|
||||
import { describe, it, expect, beforeEach, vi, type Mock } from 'vitest';
|
||||
import { WorkflowValidator } from '@/services/workflow-validator';
|
||||
import { NodeRepository } from '@/database/node-repository';
|
||||
import { EnhancedConfigValidator } from '@/services/enhanced-config-validator';
|
||||
|
||||
vi.mock('@/utils/logger');
|
||||
|
||||
describe('WorkflowValidator - Mock-based Unit Tests', () => {
|
||||
let validator: WorkflowValidator;
|
||||
let mockNodeRepository: any;
|
||||
let mockGetNode: Mock;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
// Create detailed mock repository with spy functions
|
||||
mockGetNode = vi.fn();
|
||||
mockNodeRepository = {
|
||||
getNode: mockGetNode
|
||||
};
|
||||
|
||||
validator = new WorkflowValidator(mockNodeRepository, EnhancedConfigValidator);
|
||||
|
||||
// Default mock responses
|
||||
mockGetNode.mockImplementation((type: string) => {
|
||||
if (type.includes('httpRequest')) {
|
||||
return {
|
||||
node_type: type,
|
||||
display_name: 'HTTP Request',
|
||||
isVersioned: true,
|
||||
version: 4
|
||||
};
|
||||
} else if (type.includes('set')) {
|
||||
return {
|
||||
node_type: type,
|
||||
display_name: 'Set',
|
||||
isVersioned: true,
|
||||
version: 3
|
||||
};
|
||||
} else if (type.includes('respondToWebhook')) {
|
||||
return {
|
||||
node_type: type,
|
||||
display_name: 'Respond to Webhook',
|
||||
isVersioned: true,
|
||||
version: 1
|
||||
};
|
||||
}
|
||||
return null;
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Handler Detection Logic', () => {
|
||||
it('should correctly identify error handlers by node name patterns', async () => {
|
||||
const errorNodeNames = [
|
||||
'Error Handler',
|
||||
'Handle Error',
|
||||
'Catch Exception',
|
||||
'Failure Response',
|
||||
'Error Notification',
|
||||
'Fail Safe',
|
||||
'Exception Handler',
|
||||
'Error Callback'
|
||||
];
|
||||
|
||||
const successNodeNames = [
|
||||
'Process Data',
|
||||
'Transform',
|
||||
'Success Handler',
|
||||
'Continue Process',
|
||||
'Normal Flow'
|
||||
];
|
||||
|
||||
for (const errorName of errorNodeNames) {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Source',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Success Path',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [200, 0],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: errorName,
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [200, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Source': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Success Path', type: 'main', index: 0 },
|
||||
{ node: errorName, type: 'main', index: 0 } // Should be detected as error handler
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should detect this as an incorrect error configuration
|
||||
const hasError = result.errors.some(e =>
|
||||
e.message.includes('Incorrect error output configuration') &&
|
||||
e.message.includes(errorName)
|
||||
);
|
||||
expect(hasError).toBe(true);
|
||||
}
|
||||
|
||||
// Test that success node names are NOT flagged
|
||||
for (const successName of successNodeNames) {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Source',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'First Process',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [200, 0],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: successName,
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [200, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Source': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'First Process', type: 'main', index: 0 },
|
||||
{ node: successName, type: 'main', index: 0 }
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should NOT detect this as an error configuration
|
||||
const hasError = result.errors.some(e =>
|
||||
e.message.includes('Incorrect error output configuration')
|
||||
);
|
||||
expect(hasError).toBe(false);
|
||||
}
|
||||
});
|
||||
|
||||
it('should correctly identify error handlers by node type patterns', async () => {
|
||||
const errorNodeTypes = [
|
||||
'n8n-nodes-base.respondToWebhook',
|
||||
'n8n-nodes-base.emailSend'
|
||||
// Note: slack and webhook are not in the current detection logic
|
||||
];
|
||||
|
||||
// Update mock to return appropriate node info for these types
|
||||
mockGetNode.mockImplementation((type: string) => {
|
||||
return {
|
||||
node_type: type,
|
||||
display_name: type.split('.').pop() || 'Unknown',
|
||||
isVersioned: true,
|
||||
version: 1
|
||||
};
|
||||
});
|
||||
|
||||
for (const nodeType of errorNodeTypes) {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Source',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Success Path',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [200, 0],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Response Node',
|
||||
type: nodeType,
|
||||
position: [200, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Source': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Success Path', type: 'main', index: 0 },
|
||||
{ node: 'Response Node', type: 'main', index: 0 } // Should be detected
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should detect this as an incorrect error configuration
|
||||
const hasError = result.errors.some(e =>
|
||||
e.message.includes('Incorrect error output configuration') &&
|
||||
e.message.includes('Response Node')
|
||||
);
|
||||
expect(hasError).toBe(true);
|
||||
}
|
||||
});
|
||||
|
||||
it('should handle cases where node repository returns null', async () => {
|
||||
// Mock repository to return null for unknown nodes
|
||||
mockGetNode.mockImplementation((type: string) => {
|
||||
if (type === 'n8n-nodes-base.unknownNode') {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
node_type: type,
|
||||
display_name: 'Known Node',
|
||||
isVersioned: true,
|
||||
version: 1
|
||||
};
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Source',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Unknown Node',
|
||||
type: 'n8n-nodes-base.unknownNode',
|
||||
position: [200, 0],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Error Handler',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [200, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Source': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Unknown Node', type: 'main', index: 0 },
|
||||
{ node: 'Error Handler', type: 'main', index: 0 }
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should still detect the error configuration based on node name
|
||||
const hasError = result.errors.some(e =>
|
||||
e.message.includes('Incorrect error output configuration') &&
|
||||
e.message.includes('Error Handler')
|
||||
);
|
||||
expect(hasError).toBe(true);
|
||||
|
||||
// Should not crash due to null node info
|
||||
expect(result).toHaveProperty('valid');
|
||||
expect(Array.isArray(result.errors)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('onError Property Validation Logic', () => {
|
||||
it('should validate onError property combinations correctly', async () => {
|
||||
const testCases = [
|
||||
{
|
||||
name: 'onError set but no error connections',
|
||||
onError: 'continueErrorOutput',
|
||||
hasErrorConnections: false,
|
||||
expectedErrorType: 'error',
|
||||
expectedMessage: "has onError: 'continueErrorOutput' but no error output connections"
|
||||
},
|
||||
{
|
||||
name: 'error connections but no onError',
|
||||
onError: undefined,
|
||||
hasErrorConnections: true,
|
||||
expectedErrorType: 'warning',
|
||||
expectedMessage: 'error output connections in main[1] but missing onError'
|
||||
},
|
||||
{
|
||||
name: 'onError set with error connections',
|
||||
onError: 'continueErrorOutput',
|
||||
hasErrorConnections: true,
|
||||
expectedErrorType: null,
|
||||
expectedMessage: null
|
||||
},
|
||||
{
|
||||
name: 'no onError and no error connections',
|
||||
onError: undefined,
|
||||
hasErrorConnections: false,
|
||||
expectedErrorType: null,
|
||||
expectedMessage: null
|
||||
}
|
||||
];
|
||||
|
||||
for (const testCase of testCases) {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Test Node',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [0, 0],
|
||||
parameters: {},
|
||||
...(testCase.onError ? { onError: testCase.onError } : {})
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Success Handler',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [200, 0],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Error Handler',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [200, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Test Node': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Success Handler', type: 'main', index: 0 }
|
||||
],
|
||||
...(testCase.hasErrorConnections ? [
|
||||
[
|
||||
{ node: 'Error Handler', type: 'main', index: 0 }
|
||||
]
|
||||
] : [])
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
if (testCase.expectedErrorType === 'error') {
|
||||
const hasExpectedError = result.errors.some(e =>
|
||||
e.nodeName === 'Test Node' &&
|
||||
e.message.includes(testCase.expectedMessage!)
|
||||
);
|
||||
expect(hasExpectedError).toBe(true);
|
||||
} else if (testCase.expectedErrorType === 'warning') {
|
||||
const hasExpectedWarning = result.warnings.some(w =>
|
||||
w.nodeName === 'Test Node' &&
|
||||
w.message.includes(testCase.expectedMessage!)
|
||||
);
|
||||
expect(hasExpectedWarning).toBe(true);
|
||||
} else {
|
||||
// Should not have related errors or warnings about onError/error output mismatches
|
||||
const hasRelatedError = result.errors.some(e =>
|
||||
e.nodeName === 'Test Node' &&
|
||||
(e.message.includes("has onError: 'continueErrorOutput' but no error output connections") ||
|
||||
e.message.includes('Incorrect error output configuration'))
|
||||
);
|
||||
const hasRelatedWarning = result.warnings.some(w =>
|
||||
w.nodeName === 'Test Node' &&
|
||||
w.message.includes('error output connections in main[1] but missing onError')
|
||||
);
|
||||
expect(hasRelatedError).toBe(false);
|
||||
expect(hasRelatedWarning).toBe(false);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it('should handle different onError values correctly', async () => {
|
||||
const onErrorValues = [
|
||||
'continueErrorOutput',
|
||||
'continueRegularOutput',
|
||||
'stopWorkflow'
|
||||
];
|
||||
|
||||
for (const onErrorValue of onErrorValues) {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Test Node',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [0, 0],
|
||||
parameters: {},
|
||||
onError: onErrorValue
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Next Node',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [200, 0],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Test Node': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Next Node', type: 'main', index: 0 }
|
||||
]
|
||||
// No error connections
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
if (onErrorValue === 'continueErrorOutput') {
|
||||
// Should have error about missing error connections
|
||||
const hasError = result.errors.some(e =>
|
||||
e.nodeName === 'Test Node' &&
|
||||
e.message.includes("has onError: 'continueErrorOutput' but no error output connections")
|
||||
);
|
||||
expect(hasError).toBe(true);
|
||||
} else {
|
||||
// Should not have error about missing error connections
|
||||
const hasError = result.errors.some(e =>
|
||||
e.nodeName === 'Test Node' &&
|
||||
e.message.includes('but no error output connections')
|
||||
);
|
||||
expect(hasError).toBe(false);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('JSON Format Generation', () => {
|
||||
it('should generate valid JSON in error messages', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'API Call',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Success Process',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [200, 0],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'Error Handler',
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
position: [200, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'API Call': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Success Process', type: 'main', index: 0 },
|
||||
{ node: 'Error Handler', type: 'main', index: 0 }
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
const errorConfigError = result.errors.find(e =>
|
||||
e.message.includes('Incorrect error output configuration')
|
||||
);
|
||||
|
||||
expect(errorConfigError).toBeDefined();
|
||||
|
||||
// Extract JSON sections from error message
|
||||
const incorrectMatch = errorConfigError!.message.match(/INCORRECT \(current\):\n([\s\S]*?)\n\nCORRECT/);
|
||||
const correctMatch = errorConfigError!.message.match(/CORRECT \(should be\):\n([\s\S]*?)\n\nAlso add/);
|
||||
|
||||
expect(incorrectMatch).toBeDefined();
|
||||
expect(correctMatch).toBeDefined();
|
||||
|
||||
// Extract just the JSON part (remove comments)
|
||||
const incorrectJsonStr = incorrectMatch![1];
|
||||
const correctJsonStr = correctMatch![1];
|
||||
|
||||
// Remove comments and clean up for JSON parsing
|
||||
const cleanIncorrectJson = incorrectJsonStr.replace(/\/\/.*$/gm, '').replace(/,\s*$/, '');
|
||||
const cleanCorrectJson = correctJsonStr.replace(/\/\/.*$/gm, '').replace(/,\s*$/, '');
|
||||
|
||||
const incorrectJson = `{${cleanIncorrectJson}}`;
|
||||
const correctJson = `{${cleanCorrectJson}}`;
|
||||
|
||||
expect(() => JSON.parse(incorrectJson)).not.toThrow();
|
||||
expect(() => JSON.parse(correctJson)).not.toThrow();
|
||||
|
||||
const parsedIncorrect = JSON.parse(incorrectJson);
|
||||
const parsedCorrect = JSON.parse(correctJson);
|
||||
|
||||
// Validate structure
|
||||
expect(parsedIncorrect).toHaveProperty('API Call');
|
||||
expect(parsedCorrect).toHaveProperty('API Call');
|
||||
expect(parsedIncorrect['API Call']).toHaveProperty('main');
|
||||
expect(parsedCorrect['API Call']).toHaveProperty('main');
|
||||
|
||||
// Incorrect should have both nodes in main[0]
|
||||
expect(Array.isArray(parsedIncorrect['API Call'].main)).toBe(true);
|
||||
expect(parsedIncorrect['API Call'].main).toHaveLength(1);
|
||||
expect(parsedIncorrect['API Call'].main[0]).toHaveLength(2);
|
||||
|
||||
// Correct should have separate arrays
|
||||
expect(Array.isArray(parsedCorrect['API Call'].main)).toBe(true);
|
||||
expect(parsedCorrect['API Call'].main).toHaveLength(2);
|
||||
expect(parsedCorrect['API Call'].main[0]).toHaveLength(1); // Success only
|
||||
expect(parsedCorrect['API Call'].main[1]).toHaveLength(1); // Error only
|
||||
});
|
||||
|
||||
it('should handle special characters in node names in JSON', async () => {
|
||||
// Test simpler special characters that are easier to handle in JSON
|
||||
const specialNodeNames = [
|
||||
'Node with spaces',
|
||||
'Node-with-dashes',
|
||||
'Node_with_underscores'
|
||||
];
|
||||
|
||||
for (const specialName of specialNodeNames) {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Source',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Success',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [200, 0],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: specialName,
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
position: [200, 100],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Source': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Success', type: 'main', index: 0 },
|
||||
{ node: specialName, type: 'main', index: 0 }
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
const errorConfigError = result.errors.find(e =>
|
||||
e.message.includes('Incorrect error output configuration')
|
||||
);
|
||||
|
||||
expect(errorConfigError).toBeDefined();
|
||||
|
||||
// Verify the error message contains the special node name
|
||||
expect(errorConfigError!.message).toContain(specialName);
|
||||
|
||||
// Verify JSON structure is present (but don't parse due to comments)
|
||||
expect(errorConfigError!.message).toContain('INCORRECT (current):');
|
||||
expect(errorConfigError!.message).toContain('CORRECT (should be):');
|
||||
expect(errorConfigError!.message).toContain('main[0]');
|
||||
expect(errorConfigError!.message).toContain('main[1]');
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('Repository Interaction Patterns', () => {
|
||||
it('should call repository getNode with correct parameters', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'HTTP Node',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Set Node',
|
||||
type: 'n8n-nodes-base.set',
|
||||
position: [200, 0],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'HTTP Node': {
|
||||
main: [
|
||||
[
|
||||
{ node: 'Set Node', type: 'main', index: 0 }
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should have called getNode for each node type (normalized to short form)
|
||||
// Called during node validation + output/input index bounds checking
|
||||
expect(mockGetNode).toHaveBeenCalledWith('nodes-base.httpRequest');
|
||||
expect(mockGetNode).toHaveBeenCalledWith('nodes-base.set');
|
||||
expect(mockGetNode.mock.calls.length).toBeGreaterThanOrEqual(2);
|
||||
});
|
||||
|
||||
it('should handle repository errors gracefully', async () => {
|
||||
// Mock repository to throw error
|
||||
mockGetNode.mockImplementation(() => {
|
||||
throw new Error('Database connection failed');
|
||||
});
|
||||
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Test Node',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
// Should not throw error
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should still return a valid result
|
||||
expect(result).toHaveProperty('valid');
|
||||
expect(Array.isArray(result.errors)).toBe(true);
|
||||
expect(Array.isArray(result.warnings)).toBe(true);
|
||||
});
|
||||
|
||||
it('should optimize repository calls for duplicate node types', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'HTTP 1',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'HTTP 2',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [200, 0],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '3',
|
||||
name: 'HTTP 3',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [400, 0],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Should call getNode for the same type multiple times (current implementation)
|
||||
// Note: This test documents current behavior. Could be optimized in the future.
|
||||
const httpRequestCalls = mockGetNode.mock.calls.filter(
|
||||
call => call[0] === 'nodes-base.httpRequest'
|
||||
);
|
||||
expect(httpRequestCalls.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,528 +0,0 @@
|
||||
import { describe, it, expect, beforeEach, vi } from 'vitest';
|
||||
import { WorkflowValidator } from '@/services/workflow-validator';
|
||||
import { NodeRepository } from '@/database/node-repository';
|
||||
import { EnhancedConfigValidator } from '@/services/enhanced-config-validator';
|
||||
|
||||
vi.mock('@/utils/logger');
|
||||
|
||||
describe('WorkflowValidator - Performance Tests', () => {
|
||||
let validator: WorkflowValidator;
|
||||
let mockNodeRepository: any;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
// Create mock repository with performance optimizations
|
||||
mockNodeRepository = {
|
||||
getNode: vi.fn((type: string) => {
|
||||
// Return mock node info for any node type to avoid database calls
|
||||
return {
|
||||
node_type: type,
|
||||
display_name: 'Mock Node',
|
||||
isVersioned: true,
|
||||
version: 1
|
||||
};
|
||||
})
|
||||
};
|
||||
|
||||
validator = new WorkflowValidator(mockNodeRepository, EnhancedConfigValidator);
|
||||
});
|
||||
|
||||
describe('Large Workflow Performance', () => {
|
||||
it('should validate large workflows with many error paths efficiently', async () => {
|
||||
// Generate a large workflow with 500 nodes
|
||||
const nodeCount = 500;
|
||||
const nodes = [];
|
||||
const connections: any = {};
|
||||
|
||||
// Create nodes with various error handling patterns
|
||||
for (let i = 1; i <= nodeCount; i++) {
|
||||
nodes.push({
|
||||
id: i.toString(),
|
||||
name: `Node${i}`,
|
||||
type: i % 5 === 0 ? 'n8n-nodes-base.httpRequest' : 'n8n-nodes-base.set',
|
||||
typeVersion: 1,
|
||||
position: [i * 10, (i % 10) * 100],
|
||||
parameters: {},
|
||||
...(i % 3 === 0 ? { onError: 'continueErrorOutput' } : {})
|
||||
});
|
||||
}
|
||||
|
||||
// Create connections with multiple error handling scenarios
|
||||
for (let i = 1; i < nodeCount; i++) {
|
||||
const hasErrorHandling = i % 3 === 0;
|
||||
const hasMultipleConnections = i % 7 === 0;
|
||||
|
||||
if (hasErrorHandling && hasMultipleConnections) {
|
||||
// Mix correct and incorrect error handling patterns
|
||||
const isIncorrect = i % 14 === 0;
|
||||
|
||||
if (isIncorrect) {
|
||||
// Incorrect: error handlers mixed with success nodes in main[0]
|
||||
connections[`Node${i}`] = {
|
||||
main: [
|
||||
[
|
||||
{ node: `Node${i + 1}`, type: 'main', index: 0 },
|
||||
{ node: `Error Handler ${i}`, type: 'main', index: 0 } // Wrong!
|
||||
]
|
||||
]
|
||||
};
|
||||
} else {
|
||||
// Correct: separate success and error outputs
|
||||
connections[`Node${i}`] = {
|
||||
main: [
|
||||
[
|
||||
{ node: `Node${i + 1}`, type: 'main', index: 0 }
|
||||
],
|
||||
[
|
||||
{ node: `Error Handler ${i}`, type: 'main', index: 0 }
|
||||
]
|
||||
]
|
||||
};
|
||||
}
|
||||
|
||||
// Add error handler node
|
||||
nodes.push({
|
||||
id: `error-${i}`,
|
||||
name: `Error Handler ${i}`,
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
typeVersion: 1,
|
||||
position: [(i + nodeCount) * 10, 500],
|
||||
parameters: {}
|
||||
});
|
||||
} else {
|
||||
// Simple connection
|
||||
connections[`Node${i}`] = {
|
||||
main: [
|
||||
[
|
||||
{ node: `Node${i + 1}`, type: 'main', index: 0 }
|
||||
]
|
||||
]
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const workflow = { nodes, connections };
|
||||
|
||||
const startTime = performance.now();
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
const endTime = performance.now();
|
||||
|
||||
const executionTime = endTime - startTime;
|
||||
|
||||
// Validation should complete within reasonable time
|
||||
expect(executionTime).toBeLessThan(10000); // Less than 10 seconds
|
||||
|
||||
// Should still catch validation errors
|
||||
expect(Array.isArray(result.errors)).toBe(true);
|
||||
expect(Array.isArray(result.warnings)).toBe(true);
|
||||
|
||||
// Should detect incorrect error configurations
|
||||
const incorrectConfigErrors = result.errors.filter(e =>
|
||||
e.message.includes('Incorrect error output configuration')
|
||||
);
|
||||
expect(incorrectConfigErrors.length).toBeGreaterThan(0);
|
||||
|
||||
console.log(`Validated ${nodes.length} nodes in ${executionTime.toFixed(2)}ms`);
|
||||
console.log(`Found ${result.errors.length} errors and ${result.warnings.length} warnings`);
|
||||
});
|
||||
|
||||
it('should handle deeply nested error handling chains efficiently', async () => {
|
||||
// Create a chain of error handlers, each with their own error handling
|
||||
const chainLength = 100;
|
||||
const nodes = [];
|
||||
const connections: any = {};
|
||||
|
||||
for (let i = 1; i <= chainLength; i++) {
|
||||
// Main processing node
|
||||
nodes.push({
|
||||
id: `main-${i}`,
|
||||
name: `Main ${i}`,
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 1,
|
||||
position: [i * 150, 100],
|
||||
parameters: {},
|
||||
onError: 'continueErrorOutput'
|
||||
});
|
||||
|
||||
// Error handler node
|
||||
nodes.push({
|
||||
id: `error-${i}`,
|
||||
name: `Error Handler ${i}`,
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 1,
|
||||
position: [i * 150, 300],
|
||||
parameters: {},
|
||||
onError: 'continueErrorOutput'
|
||||
});
|
||||
|
||||
// Fallback error node
|
||||
nodes.push({
|
||||
id: `fallback-${i}`,
|
||||
name: `Fallback ${i}`,
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 1,
|
||||
position: [i * 150, 500],
|
||||
parameters: {}
|
||||
});
|
||||
|
||||
// Connections
|
||||
connections[`Main ${i}`] = {
|
||||
main: [
|
||||
// Success path
|
||||
i < chainLength ? [{ node: `Main ${i + 1}`, type: 'main', index: 0 }] : [],
|
||||
// Error path
|
||||
[{ node: `Error Handler ${i}`, type: 'main', index: 0 }]
|
||||
]
|
||||
};
|
||||
|
||||
connections[`Error Handler ${i}`] = {
|
||||
main: [
|
||||
// Success path (continue to next error handler or end)
|
||||
[],
|
||||
// Error path (go to fallback)
|
||||
[{ node: `Fallback ${i}`, type: 'main', index: 0 }]
|
||||
]
|
||||
};
|
||||
}
|
||||
|
||||
const workflow = { nodes, connections };
|
||||
|
||||
const startTime = performance.now();
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
const endTime = performance.now();
|
||||
|
||||
const executionTime = endTime - startTime;
|
||||
|
||||
// Should complete quickly even with complex nested error handling
|
||||
expect(executionTime).toBeLessThan(5000); // Less than 5 seconds
|
||||
|
||||
// Should not have errors about incorrect configuration (this is correct)
|
||||
const incorrectConfigErrors = result.errors.filter(e =>
|
||||
e.message.includes('Incorrect error output configuration')
|
||||
);
|
||||
expect(incorrectConfigErrors.length).toBe(0);
|
||||
|
||||
console.log(`Validated ${nodes.length} nodes with nested error handling in ${executionTime.toFixed(2)}ms`);
|
||||
});
|
||||
|
||||
it('should efficiently validate workflows with many parallel error paths', async () => {
|
||||
// Create a workflow with one source node that fans out to many parallel paths,
|
||||
// each with their own error handling
|
||||
const parallelPathCount = 200;
|
||||
const nodes = [
|
||||
{
|
||||
id: 'source',
|
||||
name: 'Source',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [0, 0],
|
||||
parameters: {}
|
||||
}
|
||||
];
|
||||
const connections: any = {
|
||||
'Source': {
|
||||
main: [[]]
|
||||
}
|
||||
};
|
||||
|
||||
// Create parallel paths
|
||||
for (let i = 1; i <= parallelPathCount; i++) {
|
||||
// Processing node
|
||||
nodes.push({
|
||||
id: `process-${i}`,
|
||||
name: `Process ${i}`,
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 1,
|
||||
position: [200, i * 20],
|
||||
parameters: {},
|
||||
onError: 'continueErrorOutput'
|
||||
} as any);
|
||||
|
||||
// Success handler
|
||||
nodes.push({
|
||||
id: `success-${i}`,
|
||||
name: `Success ${i}`,
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 1,
|
||||
position: [400, i * 20],
|
||||
parameters: {}
|
||||
});
|
||||
|
||||
// Error handler
|
||||
nodes.push({
|
||||
id: `error-${i}`,
|
||||
name: `Error Handler ${i}`,
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
typeVersion: 1,
|
||||
position: [400, i * 20 + 10],
|
||||
parameters: {}
|
||||
});
|
||||
|
||||
// Connect source to processing node
|
||||
connections['Source'].main[0].push({
|
||||
node: `Process ${i}`,
|
||||
type: 'main',
|
||||
index: 0
|
||||
});
|
||||
|
||||
// Connect processing node to success and error handlers
|
||||
connections[`Process ${i}`] = {
|
||||
main: [
|
||||
[{ node: `Success ${i}`, type: 'main', index: 0 }],
|
||||
[{ node: `Error Handler ${i}`, type: 'main', index: 0 }]
|
||||
]
|
||||
};
|
||||
}
|
||||
|
||||
const workflow = { nodes, connections };
|
||||
|
||||
const startTime = performance.now();
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
const endTime = performance.now();
|
||||
|
||||
const executionTime = endTime - startTime;
|
||||
|
||||
// Should validate efficiently despite many parallel paths
|
||||
expect(executionTime).toBeLessThan(8000); // Less than 8 seconds
|
||||
|
||||
// Should not have errors about incorrect configuration
|
||||
const incorrectConfigErrors = result.errors.filter(e =>
|
||||
e.message.includes('Incorrect error output configuration')
|
||||
);
|
||||
expect(incorrectConfigErrors.length).toBe(0);
|
||||
|
||||
console.log(`Validated ${nodes.length} nodes with ${parallelPathCount} parallel error paths in ${executionTime.toFixed(2)}ms`);
|
||||
});
|
||||
|
||||
it('should handle worst-case scenario with many incorrect configurations efficiently', async () => {
|
||||
// Create a workflow where many nodes have the incorrect error configuration
|
||||
// This tests the performance of the error detection algorithm
|
||||
const nodeCount = 300;
|
||||
const nodes = [];
|
||||
const connections: any = {};
|
||||
|
||||
for (let i = 1; i <= nodeCount; i++) {
|
||||
// Main node
|
||||
nodes.push({
|
||||
id: `main-${i}`,
|
||||
name: `Main ${i}`,
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 1,
|
||||
position: [i * 20, 100],
|
||||
parameters: {}
|
||||
});
|
||||
|
||||
// Success handler
|
||||
nodes.push({
|
||||
id: `success-${i}`,
|
||||
name: `Success ${i}`,
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 1,
|
||||
position: [i * 20, 200],
|
||||
parameters: {}
|
||||
});
|
||||
|
||||
// Error handler (with error-indicating name)
|
||||
nodes.push({
|
||||
id: `error-${i}`,
|
||||
name: `Error Handler ${i}`,
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
typeVersion: 1,
|
||||
position: [i * 20, 300],
|
||||
parameters: {}
|
||||
});
|
||||
|
||||
// INCORRECT configuration: both success and error handlers in main[0]
|
||||
connections[`Main ${i}`] = {
|
||||
main: [
|
||||
[
|
||||
{ node: `Success ${i}`, type: 'main', index: 0 },
|
||||
{ node: `Error Handler ${i}`, type: 'main', index: 0 } // Wrong!
|
||||
]
|
||||
]
|
||||
};
|
||||
}
|
||||
|
||||
const workflow = { nodes, connections };
|
||||
|
||||
const startTime = performance.now();
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
const endTime = performance.now();
|
||||
|
||||
const executionTime = endTime - startTime;
|
||||
|
||||
// Should complete within reasonable time even when generating many errors
|
||||
expect(executionTime).toBeLessThan(15000); // Less than 15 seconds
|
||||
|
||||
// Should detect ALL incorrect configurations
|
||||
const incorrectConfigErrors = result.errors.filter(e =>
|
||||
e.message.includes('Incorrect error output configuration')
|
||||
);
|
||||
expect(incorrectConfigErrors.length).toBe(nodeCount); // One error per node
|
||||
|
||||
console.log(`Detected ${incorrectConfigErrors.length} incorrect configurations in ${nodes.length} nodes in ${executionTime.toFixed(2)}ms`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Memory Usage and Optimization', () => {
|
||||
it('should not leak memory during large workflow validation', async () => {
|
||||
// Get initial memory usage
|
||||
const initialMemory = process.memoryUsage().heapUsed;
|
||||
|
||||
// Validate multiple large workflows
|
||||
for (let run = 0; run < 5; run++) {
|
||||
const nodeCount = 200;
|
||||
const nodes = [];
|
||||
const connections: any = {};
|
||||
|
||||
for (let i = 1; i <= nodeCount; i++) {
|
||||
nodes.push({
|
||||
id: i.toString(),
|
||||
name: `Node${i}`,
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 1,
|
||||
position: [i * 10, 100],
|
||||
parameters: {},
|
||||
onError: 'continueErrorOutput'
|
||||
});
|
||||
|
||||
if (i > 1) {
|
||||
connections[`Node${i - 1}`] = {
|
||||
main: [
|
||||
[{ node: `Node${i}`, type: 'main', index: 0 }],
|
||||
[{ node: `Error${i}`, type: 'main', index: 0 }]
|
||||
]
|
||||
};
|
||||
|
||||
nodes.push({
|
||||
id: `error-${i}`,
|
||||
name: `Error${i}`,
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 1,
|
||||
position: [i * 10, 200],
|
||||
parameters: {}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const workflow = { nodes, connections };
|
||||
await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Force garbage collection if available
|
||||
if (global.gc) {
|
||||
global.gc();
|
||||
}
|
||||
}
|
||||
|
||||
const finalMemory = process.memoryUsage().heapUsed;
|
||||
const memoryIncrease = finalMemory - initialMemory;
|
||||
const memoryIncreaseMB = memoryIncrease / (1024 * 1024);
|
||||
|
||||
// Memory increase should be reasonable (less than 50MB)
|
||||
expect(memoryIncreaseMB).toBeLessThan(50);
|
||||
|
||||
console.log(`Memory increase after 5 large workflow validations: ${memoryIncreaseMB.toFixed(2)}MB`);
|
||||
});
|
||||
|
||||
it('should handle concurrent validation requests efficiently', async () => {
|
||||
// Create multiple validation requests that run concurrently
|
||||
const concurrentRequests = 10;
|
||||
const workflows = [];
|
||||
|
||||
// Prepare workflows
|
||||
for (let r = 0; r < concurrentRequests; r++) {
|
||||
const nodeCount = 50;
|
||||
const nodes = [];
|
||||
const connections: any = {};
|
||||
|
||||
for (let i = 1; i <= nodeCount; i++) {
|
||||
nodes.push({
|
||||
id: `${r}-${i}`,
|
||||
name: `R${r}Node${i}`,
|
||||
type: i % 2 === 0 ? 'n8n-nodes-base.httpRequest' : 'n8n-nodes-base.set',
|
||||
typeVersion: 1,
|
||||
position: [i * 20, r * 100],
|
||||
parameters: {},
|
||||
...(i % 3 === 0 ? { onError: 'continueErrorOutput' } : {})
|
||||
});
|
||||
|
||||
if (i > 1) {
|
||||
const hasError = i % 3 === 0;
|
||||
const isIncorrect = i % 6 === 0;
|
||||
|
||||
if (hasError && isIncorrect) {
|
||||
// Incorrect configuration
|
||||
connections[`R${r}Node${i - 1}`] = {
|
||||
main: [
|
||||
[
|
||||
{ node: `R${r}Node${i}`, type: 'main', index: 0 },
|
||||
{ node: `R${r}Error${i}`, type: 'main', index: 0 } // Wrong!
|
||||
]
|
||||
]
|
||||
};
|
||||
|
||||
nodes.push({
|
||||
id: `${r}-error-${i}`,
|
||||
name: `R${r}Error${i}`,
|
||||
type: 'n8n-nodes-base.respondToWebhook',
|
||||
typeVersion: 1,
|
||||
position: [i * 20, r * 100 + 50],
|
||||
parameters: {}
|
||||
});
|
||||
} else if (hasError) {
|
||||
// Correct configuration
|
||||
connections[`R${r}Node${i - 1}`] = {
|
||||
main: [
|
||||
[{ node: `R${r}Node${i}`, type: 'main', index: 0 }],
|
||||
[{ node: `R${r}Error${i}`, type: 'main', index: 0 }]
|
||||
]
|
||||
};
|
||||
|
||||
nodes.push({
|
||||
id: `${r}-error-${i}`,
|
||||
name: `R${r}Error${i}`,
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 1,
|
||||
position: [i * 20, r * 100 + 50],
|
||||
parameters: {}
|
||||
});
|
||||
} else {
|
||||
// Normal connection
|
||||
connections[`R${r}Node${i - 1}`] = {
|
||||
main: [
|
||||
[{ node: `R${r}Node${i}`, type: 'main', index: 0 }]
|
||||
]
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
workflows.push({ nodes, connections });
|
||||
}
|
||||
|
||||
// Run concurrent validations
|
||||
const startTime = performance.now();
|
||||
const results = await Promise.all(
|
||||
workflows.map(workflow => validator.validateWorkflow(workflow as any))
|
||||
);
|
||||
const endTime = performance.now();
|
||||
|
||||
const totalTime = endTime - startTime;
|
||||
|
||||
// All validations should complete
|
||||
expect(results).toHaveLength(concurrentRequests);
|
||||
|
||||
// Each result should be valid
|
||||
results.forEach(result => {
|
||||
expect(Array.isArray(result.errors)).toBe(true);
|
||||
expect(Array.isArray(result.warnings)).toBe(true);
|
||||
});
|
||||
|
||||
// Concurrent execution should be efficient
|
||||
expect(totalTime).toBeLessThan(20000); // Less than 20 seconds total
|
||||
|
||||
console.log(`Completed ${concurrentRequests} concurrent validations in ${totalTime.toFixed(2)}ms`);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,892 +0,0 @@
|
||||
/**
|
||||
* Tests for WorkflowValidator - Tool Variant Validation
|
||||
*
|
||||
* Tests the validateAIToolSource() method which ensures that base nodes
|
||||
* with ai_tool connections use the correct Tool variant node type.
|
||||
*
|
||||
* Coverage:
|
||||
* - Langchain tool nodes pass validation
|
||||
* - Tool variant nodes pass validation
|
||||
* - Base nodes with Tool variants fail with WRONG_NODE_TYPE_FOR_AI_TOOL
|
||||
* - Error includes fix suggestion with tool-variant-correction type
|
||||
* - Unknown nodes don't cause errors
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, vi } from 'vitest';
|
||||
import { WorkflowValidator } from '@/services/workflow-validator';
|
||||
import { NodeRepository } from '@/database/node-repository';
|
||||
import { EnhancedConfigValidator } from '@/services/enhanced-config-validator';
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('@/database/node-repository');
|
||||
vi.mock('@/services/enhanced-config-validator');
|
||||
vi.mock('@/utils/logger');
|
||||
|
||||
describe('WorkflowValidator - Tool Variant Validation', () => {
|
||||
let validator: WorkflowValidator;
|
||||
let mockRepository: NodeRepository;
|
||||
let mockValidator: typeof EnhancedConfigValidator;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
// Create mock repository
|
||||
mockRepository = {
|
||||
getNode: vi.fn((nodeType: string) => {
|
||||
// Mock base node with Tool variant available
|
||||
if (nodeType === 'nodes-base.supabase') {
|
||||
return {
|
||||
nodeType: 'nodes-base.supabase',
|
||||
displayName: 'Supabase',
|
||||
isAITool: true,
|
||||
hasToolVariant: true,
|
||||
isToolVariant: false,
|
||||
isTrigger: false,
|
||||
properties: []
|
||||
};
|
||||
}
|
||||
|
||||
// Mock Tool variant node
|
||||
if (nodeType === 'nodes-base.supabaseTool') {
|
||||
return {
|
||||
nodeType: 'nodes-base.supabaseTool',
|
||||
displayName: 'Supabase Tool',
|
||||
isAITool: true,
|
||||
hasToolVariant: false,
|
||||
isToolVariant: true,
|
||||
toolVariantOf: 'nodes-base.supabase',
|
||||
isTrigger: false,
|
||||
properties: []
|
||||
};
|
||||
}
|
||||
|
||||
// Mock langchain node (Calculator tool)
|
||||
if (nodeType === 'nodes-langchain.toolCalculator') {
|
||||
return {
|
||||
nodeType: 'nodes-langchain.toolCalculator',
|
||||
displayName: 'Calculator',
|
||||
isAITool: true,
|
||||
hasToolVariant: false,
|
||||
isToolVariant: false,
|
||||
isTrigger: false,
|
||||
properties: []
|
||||
};
|
||||
}
|
||||
|
||||
// Mock HTTP Request Tool node
|
||||
if (nodeType === 'nodes-langchain.toolHttpRequest') {
|
||||
return {
|
||||
nodeType: 'nodes-langchain.toolHttpRequest',
|
||||
displayName: 'HTTP Request Tool',
|
||||
isAITool: true,
|
||||
hasToolVariant: false,
|
||||
isToolVariant: false,
|
||||
isTrigger: false,
|
||||
properties: []
|
||||
};
|
||||
}
|
||||
|
||||
// Mock base node without Tool variant
|
||||
if (nodeType === 'nodes-base.httpRequest') {
|
||||
return {
|
||||
nodeType: 'nodes-base.httpRequest',
|
||||
displayName: 'HTTP Request',
|
||||
isAITool: false,
|
||||
hasToolVariant: false,
|
||||
isToolVariant: false,
|
||||
isTrigger: false,
|
||||
properties: []
|
||||
};
|
||||
}
|
||||
|
||||
return null; // Unknown node
|
||||
})
|
||||
} as any;
|
||||
|
||||
mockValidator = EnhancedConfigValidator;
|
||||
|
||||
validator = new WorkflowValidator(mockRepository, mockValidator);
|
||||
});
|
||||
|
||||
describe('validateAIToolSource - Langchain tool nodes', () => {
|
||||
it('should pass validation for Calculator tool node', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'calculator-1',
|
||||
name: 'Calculator',
|
||||
type: 'n8n-nodes-langchain.toolCalculator',
|
||||
typeVersion: 1.2,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'agent-1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
typeVersion: 1.7,
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
Calculator: {
|
||||
ai_tool: [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// Should not have errors about wrong node type for AI tool
|
||||
const toolVariantErrors = result.errors.filter(e =>
|
||||
e.code === 'WRONG_NODE_TYPE_FOR_AI_TOOL'
|
||||
);
|
||||
expect(toolVariantErrors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should pass validation for HTTP Request Tool node', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'http-tool-1',
|
||||
name: 'HTTP Request Tool',
|
||||
type: '@n8n/n8n-nodes-langchain.toolHttpRequest',
|
||||
typeVersion: 1.2,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {
|
||||
url: 'https://api.example.com',
|
||||
toolDescription: 'Fetch data from API'
|
||||
}
|
||||
},
|
||||
{
|
||||
id: 'agent-1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
typeVersion: 1.7,
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'HTTP Request Tool': {
|
||||
ai_tool: [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
const toolVariantErrors = result.errors.filter(e =>
|
||||
e.code === 'WRONG_NODE_TYPE_FOR_AI_TOOL'
|
||||
);
|
||||
expect(toolVariantErrors).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateAIToolSource - Tool variant nodes', () => {
|
||||
it('should pass validation for Tool variant node (supabaseTool)', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'supabase-tool-1',
|
||||
name: 'Supabase Tool',
|
||||
type: 'n8n-nodes-base.supabaseTool',
|
||||
typeVersion: 1,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {
|
||||
toolDescription: 'Query Supabase database'
|
||||
}
|
||||
},
|
||||
{
|
||||
id: 'agent-1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
typeVersion: 1.7,
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Supabase Tool': {
|
||||
ai_tool: [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
const toolVariantErrors = result.errors.filter(e =>
|
||||
e.code === 'WRONG_NODE_TYPE_FOR_AI_TOOL'
|
||||
);
|
||||
expect(toolVariantErrors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should verify Tool variant is marked correctly in database', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'supabase-tool-1',
|
||||
name: 'Supabase Tool',
|
||||
type: 'n8n-nodes-base.supabaseTool',
|
||||
typeVersion: 1,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Supabase Tool': {
|
||||
ai_tool: [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
await validator.validateWorkflow(workflow);
|
||||
|
||||
// Verify repository was called to check if it's a Tool variant
|
||||
expect(mockRepository.getNode).toHaveBeenCalledWith('nodes-base.supabaseTool');
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateAIToolSource - Base nodes with Tool variants', () => {
|
||||
it('should fail when base node is used instead of Tool variant', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'supabase-1',
|
||||
name: 'Supabase',
|
||||
type: 'n8n-nodes-base.supabase',
|
||||
typeVersion: 1,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'agent-1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
typeVersion: 1.7,
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
Supabase: {
|
||||
ai_tool: [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// Should have error with WRONG_NODE_TYPE_FOR_AI_TOOL code
|
||||
const toolVariantErrors = result.errors.filter(e =>
|
||||
e.code === 'WRONG_NODE_TYPE_FOR_AI_TOOL'
|
||||
);
|
||||
expect(toolVariantErrors).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should include fix suggestion in error', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'supabase-1',
|
||||
name: 'Supabase',
|
||||
type: 'n8n-nodes-base.supabase',
|
||||
typeVersion: 1,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'agent-1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
typeVersion: 1.7,
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
Supabase: {
|
||||
ai_tool: [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
const toolVariantError = result.errors.find(e =>
|
||||
e.code === 'WRONG_NODE_TYPE_FOR_AI_TOOL'
|
||||
) as any;
|
||||
|
||||
expect(toolVariantError).toBeDefined();
|
||||
expect(toolVariantError.fix).toBeDefined();
|
||||
expect(toolVariantError.fix.type).toBe('tool-variant-correction');
|
||||
expect(toolVariantError.fix.currentType).toBe('n8n-nodes-base.supabase');
|
||||
expect(toolVariantError.fix.suggestedType).toBe('n8n-nodes-base.supabaseTool');
|
||||
expect(toolVariantError.fix.description).toContain('n8n-nodes-base.supabase');
|
||||
expect(toolVariantError.fix.description).toContain('n8n-nodes-base.supabaseTool');
|
||||
});
|
||||
|
||||
it('should provide clear error message', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'supabase-1',
|
||||
name: 'Supabase',
|
||||
type: 'n8n-nodes-base.supabase',
|
||||
typeVersion: 1,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'agent-1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
typeVersion: 1.7,
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
Supabase: {
|
||||
ai_tool: [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
const toolVariantError = result.errors.find(e =>
|
||||
e.code === 'WRONG_NODE_TYPE_FOR_AI_TOOL'
|
||||
);
|
||||
|
||||
expect(toolVariantError).toBeDefined();
|
||||
expect(toolVariantError!.message).toContain('cannot output ai_tool connections');
|
||||
expect(toolVariantError!.message).toContain('Tool variant');
|
||||
expect(toolVariantError!.message).toContain('n8n-nodes-base.supabaseTool');
|
||||
});
|
||||
|
||||
it('should handle multiple base nodes incorrectly used as tools', async () => {
|
||||
mockRepository.getNode = vi.fn((nodeType: string) => {
|
||||
if (nodeType === 'nodes-base.postgres') {
|
||||
return {
|
||||
nodeType: 'nodes-base.postgres',
|
||||
displayName: 'Postgres',
|
||||
isAITool: true,
|
||||
hasToolVariant: true,
|
||||
isToolVariant: false,
|
||||
isTrigger: false,
|
||||
properties: []
|
||||
};
|
||||
}
|
||||
if (nodeType === 'nodes-base.supabase') {
|
||||
return {
|
||||
nodeType: 'nodes-base.supabase',
|
||||
displayName: 'Supabase',
|
||||
isAITool: true,
|
||||
hasToolVariant: true,
|
||||
isToolVariant: false,
|
||||
isTrigger: false,
|
||||
properties: []
|
||||
};
|
||||
}
|
||||
return null;
|
||||
}) as any;
|
||||
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'postgres-1',
|
||||
name: 'Postgres',
|
||||
type: 'n8n-nodes-base.postgres',
|
||||
typeVersion: 1,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'supabase-1',
|
||||
name: 'Supabase',
|
||||
type: 'n8n-nodes-base.supabase',
|
||||
typeVersion: 1,
|
||||
position: [250, 400] as [number, number],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'agent-1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
typeVersion: 1.7,
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
Postgres: {
|
||||
ai_tool: [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
},
|
||||
Supabase: {
|
||||
ai_tool: [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
const toolVariantErrors = result.errors.filter(e =>
|
||||
e.code === 'WRONG_NODE_TYPE_FOR_AI_TOOL'
|
||||
);
|
||||
expect(toolVariantErrors).toHaveLength(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateAIToolSource - Unknown nodes', () => {
|
||||
it('should not error for unknown node types', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'unknown-1',
|
||||
name: 'Unknown Tool',
|
||||
type: 'custom-package.unknownTool',
|
||||
typeVersion: 1,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'agent-1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
typeVersion: 1.7,
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Unknown Tool': {
|
||||
ai_tool: [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// Unknown nodes should not cause tool variant errors
|
||||
// Let other validation handle unknown node types
|
||||
const toolVariantErrors = result.errors.filter(e =>
|
||||
e.code === 'WRONG_NODE_TYPE_FOR_AI_TOOL'
|
||||
);
|
||||
expect(toolVariantErrors).toHaveLength(0);
|
||||
|
||||
// But there might be an "Unknown node type" error from different validation
|
||||
const unknownNodeErrors = result.errors.filter(e =>
|
||||
e.message && e.message.includes('Unknown node type')
|
||||
);
|
||||
expect(unknownNodeErrors.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should not error for community nodes', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'community-1',
|
||||
name: 'Community Tool',
|
||||
type: 'community-package.customTool',
|
||||
typeVersion: 1,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'agent-1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
typeVersion: 1.7,
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Community Tool': {
|
||||
ai_tool: [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// Community nodes should not cause tool variant errors
|
||||
const toolVariantErrors = result.errors.filter(e =>
|
||||
e.code === 'WRONG_NODE_TYPE_FOR_AI_TOOL'
|
||||
);
|
||||
expect(toolVariantErrors).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateAIToolSource - Edge cases', () => {
|
||||
it('should not error for base nodes without ai_tool connections', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'supabase-1',
|
||||
name: 'Supabase',
|
||||
type: 'n8n-nodes-base.supabase',
|
||||
typeVersion: 1,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'set-1',
|
||||
name: 'Set',
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 1,
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
Supabase: {
|
||||
main: [[{ node: 'Set', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// No ai_tool connections, so no tool variant validation errors
|
||||
const toolVariantErrors = result.errors.filter(e =>
|
||||
e.code === 'WRONG_NODE_TYPE_FOR_AI_TOOL'
|
||||
);
|
||||
expect(toolVariantErrors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should not error when base node without Tool variant uses ai_tool', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'http-1',
|
||||
name: 'HTTP Request',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 1,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'agent-1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
typeVersion: 1.7,
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'HTTP Request': {
|
||||
ai_tool: [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// httpRequest has no Tool variant, so this should produce a different error
|
||||
const toolVariantErrors = result.errors.filter(e =>
|
||||
e.code === 'WRONG_NODE_TYPE_FOR_AI_TOOL'
|
||||
);
|
||||
expect(toolVariantErrors).toHaveLength(0);
|
||||
|
||||
// Should have INVALID_AI_TOOL_SOURCE error instead
|
||||
const invalidToolErrors = result.errors.filter(e =>
|
||||
e.code === 'INVALID_AI_TOOL_SOURCE'
|
||||
);
|
||||
expect(invalidToolErrors.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateAllNodes - Inferred Tool Variants (Issue #522)', () => {
|
||||
/**
|
||||
* Tests for dynamic AI Tool nodes that are created at runtime by n8n
|
||||
* when ANY node is used in an AI Agent's tool slot.
|
||||
*
|
||||
* These nodes (e.g., googleDriveTool, googleSheetsTool) don't exist in npm packages
|
||||
* but are valid when the base node exists.
|
||||
*/
|
||||
|
||||
beforeEach(() => {
|
||||
// Update mock repository to include Google nodes
|
||||
mockRepository.getNode = vi.fn((nodeType: string) => {
|
||||
// Base node with Tool variant
|
||||
if (nodeType === 'nodes-base.supabase') {
|
||||
return {
|
||||
nodeType: 'nodes-base.supabase',
|
||||
displayName: 'Supabase',
|
||||
isAITool: true,
|
||||
hasToolVariant: true,
|
||||
isToolVariant: false,
|
||||
isTrigger: false,
|
||||
properties: []
|
||||
};
|
||||
}
|
||||
|
||||
// Tool variant in database
|
||||
if (nodeType === 'nodes-base.supabaseTool') {
|
||||
return {
|
||||
nodeType: 'nodes-base.supabaseTool',
|
||||
displayName: 'Supabase Tool',
|
||||
isAITool: true,
|
||||
hasToolVariant: false,
|
||||
isToolVariant: true,
|
||||
toolVariantOf: 'nodes-base.supabase',
|
||||
isTrigger: false,
|
||||
properties: []
|
||||
};
|
||||
}
|
||||
|
||||
// Google Drive base node (exists, but no Tool variant in DB)
|
||||
if (nodeType === 'nodes-base.googleDrive') {
|
||||
return {
|
||||
nodeType: 'nodes-base.googleDrive',
|
||||
displayName: 'Google Drive',
|
||||
isAITool: false, // Not marked as AI tool in npm package
|
||||
hasToolVariant: false, // No Tool variant in database
|
||||
isToolVariant: false,
|
||||
isTrigger: false,
|
||||
properties: [],
|
||||
category: 'files'
|
||||
};
|
||||
}
|
||||
|
||||
// Google Sheets base node (exists, but no Tool variant in DB)
|
||||
if (nodeType === 'nodes-base.googleSheets') {
|
||||
return {
|
||||
nodeType: 'nodes-base.googleSheets',
|
||||
displayName: 'Google Sheets',
|
||||
isAITool: false,
|
||||
hasToolVariant: false,
|
||||
isToolVariant: false,
|
||||
isTrigger: false,
|
||||
properties: [],
|
||||
category: 'productivity'
|
||||
};
|
||||
}
|
||||
|
||||
// AI Agent node
|
||||
if (nodeType === 'nodes-langchain.agent') {
|
||||
return {
|
||||
nodeType: 'nodes-langchain.agent',
|
||||
displayName: 'AI Agent',
|
||||
isAITool: false,
|
||||
hasToolVariant: false,
|
||||
isToolVariant: false,
|
||||
isTrigger: false,
|
||||
properties: []
|
||||
};
|
||||
}
|
||||
|
||||
return null; // Unknown node
|
||||
}) as any;
|
||||
});
|
||||
|
||||
it('should pass validation for googleDriveTool when googleDrive exists', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'drive-tool-1',
|
||||
name: 'Google Drive Tool',
|
||||
type: 'n8n-nodes-base.googleDriveTool',
|
||||
typeVersion: 3,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// Should NOT have "Unknown node type" error
|
||||
const unknownErrors = result.errors.filter(e =>
|
||||
e.message && e.message.includes('Unknown node type')
|
||||
);
|
||||
expect(unknownErrors).toHaveLength(0);
|
||||
|
||||
// Should have INFERRED_TOOL_VARIANT warning
|
||||
const inferredWarnings = result.warnings.filter(e =>
|
||||
(e as any).code === 'INFERRED_TOOL_VARIANT'
|
||||
);
|
||||
expect(inferredWarnings).toHaveLength(1);
|
||||
expect(inferredWarnings[0].message).toContain('googleDriveTool');
|
||||
expect(inferredWarnings[0].message).toContain('Google Drive');
|
||||
});
|
||||
|
||||
it('should pass validation for googleSheetsTool when googleSheets exists', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'sheets-tool-1',
|
||||
name: 'Google Sheets Tool',
|
||||
type: 'n8n-nodes-base.googleSheetsTool',
|
||||
typeVersion: 4,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// Should NOT have "Unknown node type" error
|
||||
const unknownErrors = result.errors.filter(e =>
|
||||
e.message && e.message.includes('Unknown node type')
|
||||
);
|
||||
expect(unknownErrors).toHaveLength(0);
|
||||
|
||||
// Should have INFERRED_TOOL_VARIANT warning
|
||||
const inferredWarnings = result.warnings.filter(e =>
|
||||
(e as any).code === 'INFERRED_TOOL_VARIANT'
|
||||
);
|
||||
expect(inferredWarnings).toHaveLength(1);
|
||||
expect(inferredWarnings[0].message).toContain('googleSheetsTool');
|
||||
expect(inferredWarnings[0].message).toContain('Google Sheets');
|
||||
});
|
||||
|
||||
it('should report error for unknownNodeTool when base node does not exist', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'unknown-tool-1',
|
||||
name: 'Unknown Tool',
|
||||
type: 'n8n-nodes-base.nonExistentNodeTool',
|
||||
typeVersion: 1,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// Should have "Unknown node type" error
|
||||
const unknownErrors = result.errors.filter(e =>
|
||||
e.message && e.message.includes('Unknown node type')
|
||||
);
|
||||
expect(unknownErrors).toHaveLength(1);
|
||||
|
||||
// Should NOT have INFERRED_TOOL_VARIANT warning
|
||||
const inferredWarnings = result.warnings.filter(e =>
|
||||
(e as any).code === 'INFERRED_TOOL_VARIANT'
|
||||
);
|
||||
expect(inferredWarnings).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle multiple inferred tool variants in same workflow', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'drive-tool-1',
|
||||
name: 'Google Drive Tool',
|
||||
type: 'n8n-nodes-base.googleDriveTool',
|
||||
typeVersion: 3,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'sheets-tool-1',
|
||||
name: 'Google Sheets Tool',
|
||||
type: 'n8n-nodes-base.googleSheetsTool',
|
||||
typeVersion: 4,
|
||||
position: [250, 400] as [number, number],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: 'agent-1',
|
||||
name: 'AI Agent',
|
||||
type: '@n8n/n8n-nodes-langchain.agent',
|
||||
typeVersion: 1.7,
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Google Drive Tool': {
|
||||
ai_tool: [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
},
|
||||
'Google Sheets Tool': {
|
||||
ai_tool: [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// Should NOT have "Unknown node type" errors
|
||||
const unknownErrors = result.errors.filter(e =>
|
||||
e.message && e.message.includes('Unknown node type')
|
||||
);
|
||||
expect(unknownErrors).toHaveLength(0);
|
||||
|
||||
// Should have 2 INFERRED_TOOL_VARIANT warnings
|
||||
const inferredWarnings = result.warnings.filter(e =>
|
||||
(e as any).code === 'INFERRED_TOOL_VARIANT'
|
||||
);
|
||||
expect(inferredWarnings).toHaveLength(2);
|
||||
});
|
||||
|
||||
it('should prefer database record over inference for supabaseTool', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'supabase-tool-1',
|
||||
name: 'Supabase Tool',
|
||||
type: 'n8n-nodes-base.supabaseTool',
|
||||
typeVersion: 1,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
// Should NOT have "Unknown node type" error
|
||||
const unknownErrors = result.errors.filter(e =>
|
||||
e.message && e.message.includes('Unknown node type')
|
||||
);
|
||||
expect(unknownErrors).toHaveLength(0);
|
||||
|
||||
// Should NOT have INFERRED_TOOL_VARIANT warning (it's in database)
|
||||
const inferredWarnings = result.warnings.filter(e =>
|
||||
(e as any).code === 'INFERRED_TOOL_VARIANT'
|
||||
);
|
||||
expect(inferredWarnings).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should include helpful message in warning', async () => {
|
||||
const workflow = {
|
||||
nodes: [
|
||||
{
|
||||
id: 'drive-tool-1',
|
||||
name: 'Google Drive Tool',
|
||||
type: 'n8n-nodes-base.googleDriveTool',
|
||||
typeVersion: 3,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
const result = await validator.validateWorkflow(workflow);
|
||||
|
||||
const inferredWarning = result.warnings.find(e =>
|
||||
(e as any).code === 'INFERRED_TOOL_VARIANT'
|
||||
);
|
||||
|
||||
expect(inferredWarning).toBeDefined();
|
||||
expect(inferredWarning!.message).toContain('inferred as a dynamic AI Tool variant');
|
||||
expect(inferredWarning!.message).toContain('nodes-base.googleDrive');
|
||||
expect(inferredWarning!.message).toContain('Google Drive');
|
||||
expect(inferredWarning!.message).toContain('AI Agent');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,513 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { WorkflowValidator } from '@/services/workflow-validator';
|
||||
import { EnhancedConfigValidator } from '@/services/enhanced-config-validator';
|
||||
|
||||
// Mock logger to prevent console output
|
||||
vi.mock('@/utils/logger', () => ({
|
||||
Logger: vi.fn().mockImplementation(() => ({
|
||||
error: vi.fn(),
|
||||
warn: vi.fn(),
|
||||
info: vi.fn()
|
||||
}))
|
||||
}));
|
||||
|
||||
describe('WorkflowValidator - Simple Unit Tests', () => {
|
||||
let validator: WorkflowValidator;
|
||||
|
||||
// Create a simple mock repository
|
||||
const createMockRepository = (nodeData: Record<string, any>) => ({
|
||||
getNode: vi.fn((type: string) => nodeData[type] || null),
|
||||
findSimilarNodes: vi.fn().mockReturnValue([])
|
||||
});
|
||||
|
||||
// Create a simple mock validator class
|
||||
const createMockValidatorClass = (validationResult: any) => ({
|
||||
validateWithMode: vi.fn().mockReturnValue(validationResult)
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('Basic validation scenarios', () => {
|
||||
it('should pass validation for a webhook workflow with single node', async () => {
|
||||
// Arrange
|
||||
const nodeData = {
|
||||
'n8n-nodes-base.webhook': {
|
||||
type: 'nodes-base.webhook',
|
||||
displayName: 'Webhook',
|
||||
name: 'webhook',
|
||||
version: 1,
|
||||
isVersioned: true,
|
||||
properties: []
|
||||
},
|
||||
'nodes-base.webhook': {
|
||||
type: 'nodes-base.webhook',
|
||||
displayName: 'Webhook',
|
||||
name: 'webhook',
|
||||
version: 1,
|
||||
isVersioned: true,
|
||||
properties: []
|
||||
}
|
||||
};
|
||||
|
||||
const mockRepository = createMockRepository(nodeData);
|
||||
const mockValidatorClass = createMockValidatorClass({
|
||||
valid: true,
|
||||
errors: [],
|
||||
warnings: [],
|
||||
suggestions: []
|
||||
});
|
||||
|
||||
validator = new WorkflowValidator(mockRepository as any, mockValidatorClass as any);
|
||||
|
||||
const workflow = {
|
||||
name: 'Webhook Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook',
|
||||
typeVersion: 1,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Assert
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
// Single webhook node should just have a warning about no connections
|
||||
expect(result.warnings.some(w => w.message.includes('no connections'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should fail validation for unknown node types', async () => {
|
||||
// Arrange
|
||||
const mockRepository = createMockRepository({}); // Empty node data
|
||||
const mockValidatorClass = createMockValidatorClass({
|
||||
valid: true,
|
||||
errors: [],
|
||||
warnings: [],
|
||||
suggestions: []
|
||||
});
|
||||
|
||||
validator = new WorkflowValidator(mockRepository as any, mockValidatorClass as any);
|
||||
|
||||
const workflow = {
|
||||
name: 'Test Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Unknown',
|
||||
type: 'n8n-nodes-base.unknownNode',
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Assert
|
||||
expect(result.valid).toBe(false);
|
||||
// Check for either the error message or valid being false
|
||||
const hasUnknownNodeError = result.errors.some(e =>
|
||||
e.message && (e.message.includes('Unknown node type') || e.message.includes('unknown-node-type'))
|
||||
);
|
||||
expect(result.errors.length > 0 || hasUnknownNodeError).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect duplicate node names', async () => {
|
||||
// Arrange
|
||||
const mockRepository = createMockRepository({});
|
||||
const mockValidatorClass = createMockValidatorClass({
|
||||
valid: true,
|
||||
errors: [],
|
||||
warnings: [],
|
||||
suggestions: []
|
||||
});
|
||||
|
||||
validator = new WorkflowValidator(mockRepository as any, mockValidatorClass as any);
|
||||
|
||||
const workflow = {
|
||||
name: 'Duplicate Names',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'HTTP Request',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'HTTP Request', // Duplicate name
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Assert
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e => e.message.includes('Duplicate node name'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate connections properly', async () => {
|
||||
// Arrange
|
||||
const nodeData = {
|
||||
'n8n-nodes-base.manualTrigger': {
|
||||
type: 'nodes-base.manualTrigger',
|
||||
displayName: 'Manual Trigger',
|
||||
isVersioned: false,
|
||||
properties: []
|
||||
},
|
||||
'nodes-base.manualTrigger': {
|
||||
type: 'nodes-base.manualTrigger',
|
||||
displayName: 'Manual Trigger',
|
||||
isVersioned: false,
|
||||
properties: []
|
||||
},
|
||||
'n8n-nodes-base.set': {
|
||||
type: 'nodes-base.set',
|
||||
displayName: 'Set',
|
||||
version: 2,
|
||||
isVersioned: true,
|
||||
properties: []
|
||||
},
|
||||
'nodes-base.set': {
|
||||
type: 'nodes-base.set',
|
||||
displayName: 'Set',
|
||||
version: 2,
|
||||
isVersioned: true,
|
||||
properties: []
|
||||
}
|
||||
};
|
||||
|
||||
const mockRepository = createMockRepository(nodeData);
|
||||
const mockValidatorClass = createMockValidatorClass({
|
||||
valid: true,
|
||||
errors: [],
|
||||
warnings: [],
|
||||
suggestions: []
|
||||
});
|
||||
|
||||
validator = new WorkflowValidator(mockRepository as any, mockValidatorClass as any);
|
||||
|
||||
const workflow = {
|
||||
name: 'Connected Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Manual Trigger',
|
||||
type: 'n8n-nodes-base.manualTrigger',
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Set',
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 2,
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Manual Trigger': {
|
||||
main: [[{ node: 'Set', type: 'main', index: 0 }]]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Assert
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.statistics.validConnections).toBe(1);
|
||||
expect(result.statistics.invalidConnections).toBe(0);
|
||||
});
|
||||
|
||||
it('should detect workflow cycles', async () => {
|
||||
// Arrange
|
||||
const nodeData = {
|
||||
'n8n-nodes-base.set': {
|
||||
type: 'nodes-base.set',
|
||||
displayName: 'Set',
|
||||
isVersioned: true,
|
||||
version: 2,
|
||||
properties: []
|
||||
},
|
||||
'nodes-base.set': {
|
||||
type: 'nodes-base.set',
|
||||
displayName: 'Set',
|
||||
isVersioned: true,
|
||||
version: 2,
|
||||
properties: []
|
||||
}
|
||||
};
|
||||
|
||||
const mockRepository = createMockRepository(nodeData);
|
||||
const mockValidatorClass = createMockValidatorClass({
|
||||
valid: true,
|
||||
errors: [],
|
||||
warnings: [],
|
||||
suggestions: []
|
||||
});
|
||||
|
||||
validator = new WorkflowValidator(mockRepository as any, mockValidatorClass as any);
|
||||
|
||||
const workflow = {
|
||||
name: 'Cyclic Workflow',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Node A',
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 2,
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Node B',
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 2,
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {
|
||||
'Node A': {
|
||||
main: [[{ node: 'Node B', type: 'main', index: 0 }]]
|
||||
},
|
||||
'Node B': {
|
||||
main: [[{ node: 'Node A', type: 'main', index: 0 }]] // Creates a cycle
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Assert
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e => e.message.includes('cycle'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle null workflow gracefully', async () => {
|
||||
// Arrange
|
||||
const mockRepository = createMockRepository({});
|
||||
const mockValidatorClass = createMockValidatorClass({
|
||||
valid: true,
|
||||
errors: [],
|
||||
warnings: [],
|
||||
suggestions: []
|
||||
});
|
||||
|
||||
validator = new WorkflowValidator(mockRepository as any, mockValidatorClass as any);
|
||||
|
||||
// Act
|
||||
const result = await validator.validateWorkflow(null as any);
|
||||
|
||||
// Assert
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors[0].message).toContain('workflow is null or undefined');
|
||||
});
|
||||
|
||||
it('should require connections for multi-node workflows', async () => {
|
||||
// Arrange
|
||||
const nodeData = {
|
||||
'n8n-nodes-base.manualTrigger': {
|
||||
type: 'nodes-base.manualTrigger',
|
||||
displayName: 'Manual Trigger',
|
||||
properties: []
|
||||
},
|
||||
'nodes-base.manualTrigger': {
|
||||
type: 'nodes-base.manualTrigger',
|
||||
displayName: 'Manual Trigger',
|
||||
properties: []
|
||||
},
|
||||
'n8n-nodes-base.set': {
|
||||
type: 'nodes-base.set',
|
||||
displayName: 'Set',
|
||||
version: 2,
|
||||
isVersioned: true,
|
||||
properties: []
|
||||
},
|
||||
'nodes-base.set': {
|
||||
type: 'nodes-base.set',
|
||||
displayName: 'Set',
|
||||
version: 2,
|
||||
isVersioned: true,
|
||||
properties: []
|
||||
}
|
||||
};
|
||||
|
||||
const mockRepository = createMockRepository(nodeData);
|
||||
const mockValidatorClass = createMockValidatorClass({
|
||||
valid: true,
|
||||
errors: [],
|
||||
warnings: [],
|
||||
suggestions: []
|
||||
});
|
||||
|
||||
validator = new WorkflowValidator(mockRepository as any, mockValidatorClass as any);
|
||||
|
||||
const workflow = {
|
||||
name: 'No Connections',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Manual Trigger',
|
||||
type: 'n8n-nodes-base.manualTrigger',
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'Set',
|
||||
type: 'n8n-nodes-base.set',
|
||||
typeVersion: 2,
|
||||
position: [450, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {} // No connections between nodes
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Assert
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some(e => e.message.includes('Multi-node workflow has no connections'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate typeVersion for versioned nodes', async () => {
|
||||
// Arrange
|
||||
const nodeData = {
|
||||
'n8n-nodes-base.httpRequest': {
|
||||
type: 'nodes-base.httpRequest',
|
||||
displayName: 'HTTP Request',
|
||||
isVersioned: true,
|
||||
version: 3, // Latest version is 3
|
||||
properties: []
|
||||
},
|
||||
'nodes-base.httpRequest': {
|
||||
type: 'nodes-base.httpRequest',
|
||||
displayName: 'HTTP Request',
|
||||
isVersioned: true,
|
||||
version: 3,
|
||||
properties: []
|
||||
}
|
||||
};
|
||||
|
||||
const mockRepository = createMockRepository(nodeData);
|
||||
const mockValidatorClass = createMockValidatorClass({
|
||||
valid: true,
|
||||
errors: [],
|
||||
warnings: [],
|
||||
suggestions: []
|
||||
});
|
||||
|
||||
validator = new WorkflowValidator(mockRepository as any, mockValidatorClass as any);
|
||||
|
||||
const workflow = {
|
||||
name: 'Version Test',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'HTTP Request',
|
||||
type: 'n8n-nodes-base.httpRequest',
|
||||
typeVersion: 2, // Outdated version
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {}
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Assert
|
||||
expect(result.warnings.some(w => w.message.includes('Outdated typeVersion'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should normalize and validate nodes-base prefix to find the node', async () => {
|
||||
// Arrange - Test that full-form types are normalized to short form to find the node
|
||||
// The repository only has the node under the SHORT normalized key (database format)
|
||||
const nodeData = {
|
||||
'nodes-base.webhook': { // Repository has it under SHORT form (database format)
|
||||
type: 'nodes-base.webhook',
|
||||
displayName: 'Webhook',
|
||||
isVersioned: true,
|
||||
version: 2,
|
||||
properties: []
|
||||
}
|
||||
};
|
||||
|
||||
// Mock repository that simulates the normalization behavior
|
||||
// After our changes, getNode is called with the already-normalized type (short form)
|
||||
const mockRepository = {
|
||||
getNode: vi.fn((type: string) => {
|
||||
// The validator now normalizes to short form before calling getNode
|
||||
// So getNode receives 'nodes-base.webhook'
|
||||
if (type === 'nodes-base.webhook') {
|
||||
return nodeData['nodes-base.webhook'];
|
||||
}
|
||||
return null;
|
||||
}),
|
||||
findSimilarNodes: vi.fn().mockReturnValue([])
|
||||
};
|
||||
|
||||
const mockValidatorClass = createMockValidatorClass({
|
||||
valid: true,
|
||||
errors: [],
|
||||
warnings: [],
|
||||
suggestions: []
|
||||
});
|
||||
|
||||
validator = new WorkflowValidator(mockRepository as any, mockValidatorClass as any);
|
||||
|
||||
const workflow = {
|
||||
name: 'Valid Alternative Prefix',
|
||||
nodes: [
|
||||
{
|
||||
id: '1',
|
||||
name: 'Webhook',
|
||||
type: 'n8n-nodes-base.webhook', // Using the full-form prefix (will be normalized to short)
|
||||
position: [250, 300] as [number, number],
|
||||
parameters: {},
|
||||
typeVersion: 2
|
||||
}
|
||||
],
|
||||
connections: {}
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await validator.validateWorkflow(workflow as any);
|
||||
|
||||
// Assert - The node should be found through normalization
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
|
||||
// Verify the repository was called (once with original, once with normalized)
|
||||
expect(mockRepository.getNode).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user