refactor: streamline test suite — 33 fewer files, 11.9x faster (#670)

* refactor: streamline test suite - cut 33 files, enable parallel execution (11.9x speedup)

Remove duplicate, low-value, and fragmented test files while preserving
all meaningful coverage. Enable parallel test execution and remove
the entire benchmark infrastructure.

Key changes:
- Consolidate workflow-validator tests (13 files -> 3)
- Consolidate config-validator tests (9 files -> 3)
- Consolidate telemetry tests (11 files -> 6)
- Merge AI validator tests (2 files -> 1)
- Remove example/demo test files, mock-testing files, and already-skipped tests
- Remove benchmark infrastructure (10 files, CI workflow, 4 npm scripts)
- Enable parallel test execution (remove singleThread: true)
- Remove retry:2 that was masking flaky tests
- Slim CI publish-results job

Results: 224 -> 191 test files, 4690 -> 4303 tests, 121K -> 106K lines
Local runtime: 319s -> 27s (11.9x speedup)

Conceived by Romuald Członkowski - www.aiadvisors.pl/en

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

* refactor: absorb config-validator satellite tests into consolidated file

The previous commit deleted 4 config-validator satellite files. This
properly merges their unique tests into the consolidated config-validator.test.ts,
recovering 89 tests that were dropped during the bulk deletion.

Deduplicates 5 tests that existed in both the satellite files and the
security test file.

Conceived by Romuald Członkowski - www.aiadvisors.pl/en

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

* fix: delete missed benchmark-pr.yml workflow, fix flaky session test

- Remove benchmark-pr.yml that referenced deleted benchmark:ci script
- Fix session-persistence round-trip test using timestamps closer to
  now to avoid edge cases exposed by removing retry:2

Conceived by Romuald Członkowski - www.aiadvisors.pl/en

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

* fix: rebuild FTS5 index after database rebuild to prevent stale rowid refs

The FTS5 content-synced index could retain phantom rowid references from
previous rebuild cycles, causing 'missing row N from content table'
errors on MATCH queries.

- Add explicit FTS5 rebuild command in rebuild script after all nodes saved
- Add FTS5 rebuild in test beforeAll as defense-in-depth
- Rebuild nodes.db with consistent FTS5 index

Conceived by Romuald Członkowski - www.aiadvisors.pl/en

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

* fix: use recent timestamps in all session persistence tests

Session round-trip tests used timestamps 5-10 minutes in the past which
could fail under CI load when combined with session timeout validation.
Use timestamps 30 seconds in the past for all valid-session test data.

Conceived by Romuald Członkowski - www.aiadvisors.pl/en

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Romuald Członkowski
2026-03-27 14:22:22 +01:00
committed by GitHub
parent 07bd1d4cc2
commit de2abaf89d
75 changed files with 3718 additions and 21917 deletions

View File

@@ -702,4 +702,244 @@ describe('WorkflowValidator - Loop Node Validation', () => {
expect(result).toBeDefined();
});
});
// ─── Loop Output Edge Cases (absorbed from loop-output-edge-cases) ──
describe('Nodes without outputs', () => {
it('should handle nodes with null outputs gracefully', async () => {
mockNodeRepository.getNode.mockReturnValue({
nodeType: 'nodes-base.httpRequest', outputs: null, outputNames: null, properties: [],
});
const workflow = {
name: 'No Outputs',
nodes: [
{ id: '1', name: 'HTTP Request', type: 'n8n-nodes-base.httpRequest', position: [100, 100], parameters: { url: 'https://example.com' } },
{ id: '2', name: 'Set', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
],
connections: { 'HTTP Request': { main: [[{ node: 'Set', type: 'main', index: 0 }]] } },
};
const result = await validator.validateWorkflow(workflow as any);
expect(result).toBeDefined();
const outputErrors = result.errors.filter(e => e.message?.includes('output') && !e.message?.includes('Connection'));
expect(outputErrors).toHaveLength(0);
});
it('should handle nodes with empty outputs array', async () => {
mockNodeRepository.getNode.mockReturnValue({
nodeType: 'nodes-base.customNode', outputs: [], outputNames: [], properties: [],
});
const workflow = {
name: 'Empty Outputs',
nodes: [{ id: '1', name: 'Custom Node', type: 'n8n-nodes-base.customNode', position: [100, 100], parameters: {} }],
connections: { 'Custom Node': { main: [[{ node: 'Custom Node', type: 'main', index: 0 }]] } },
};
const result = await validator.validateWorkflow(workflow as any);
const selfRefWarnings = result.warnings.filter(w => w.message?.includes('self-referencing'));
expect(selfRefWarnings).toHaveLength(1);
});
});
describe('Invalid connection indices', () => {
it('should handle very large connection indices', async () => {
mockNodeRepository.getNode.mockReturnValue({
nodeType: 'nodes-base.switch', outputs: [{ displayName: 'Output 1' }, { displayName: 'Output 2' }], properties: [],
});
const workflow = {
name: 'Large Index',
nodes: [
{ id: '1', name: 'Switch', type: 'n8n-nodes-base.switch', position: [100, 100], parameters: {} },
{ id: '2', name: 'Set', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
],
connections: { 'Switch': { main: [[{ node: 'Set', type: 'main', index: 999 }]] } },
};
const result = await validator.validateWorkflow(workflow as any);
expect(result).toBeDefined();
});
});
describe('Malformed connection structures', () => {
it('should handle null connection objects', async () => {
const workflow = {
name: 'Null Connections',
nodes: [{ id: '1', name: 'Split In Batches', type: 'n8n-nodes-base.splitInBatches', position: [100, 100], parameters: {} }],
connections: { 'Split In Batches': { main: [null, [{ node: 'NonExistent', type: 'main', index: 0 }]] as any } },
};
const result = await validator.validateWorkflow(workflow as any);
expect(result).toBeDefined();
});
it('should handle missing connection properties', async () => {
const workflow = {
name: 'Malformed Connections',
nodes: [
{ id: '1', name: 'Split In Batches', type: 'n8n-nodes-base.splitInBatches', position: [100, 100], parameters: {} },
{ id: '2', name: 'Set', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
],
connections: {
'Split In Batches': { main: [[{ node: 'Set' } as any, { type: 'main', index: 0 } as any, {} as any]] },
},
};
const result = await validator.validateWorkflow(workflow as any);
expect(result).toBeDefined();
expect(result.errors.length).toBeGreaterThan(0);
});
});
describe('Complex output structures', () => {
it('should handle nodes with many outputs', async () => {
const manyOutputs = Array.from({ length: 20 }, (_, i) => ({
displayName: `Output ${i + 1}`, name: `output${i + 1}`,
}));
mockNodeRepository.getNode.mockReturnValue({
nodeType: 'nodes-base.complexSwitch', outputs: manyOutputs, outputNames: manyOutputs.map(o => o.name), properties: [],
});
const workflow = {
name: 'Many Outputs',
nodes: [
{ id: '1', name: 'Complex Switch', type: 'n8n-nodes-base.complexSwitch', position: [100, 100], parameters: {} },
{ id: '2', name: 'Set', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
],
connections: { 'Complex Switch': { main: Array.from({ length: 20 }, () => [{ node: 'Set', type: 'main', index: 0 }]) } },
};
const result = await validator.validateWorkflow(workflow as any);
expect(result).toBeDefined();
});
it('should handle mixed output types (main, error, ai_tool)', async () => {
mockNodeRepository.getNode.mockReturnValue({
nodeType: 'nodes-base.complexNode', outputs: [{ displayName: 'Main', type: 'main' }, { displayName: 'Error', type: 'error' }], properties: [],
});
const workflow = {
name: 'Mixed Output Types',
nodes: [
{ id: '1', name: 'Complex Node', type: 'n8n-nodes-base.complexNode', position: [100, 100], parameters: {} },
{ id: '2', name: 'Main Handler', type: 'n8n-nodes-base.set', position: [300, 50], parameters: {} },
{ id: '3', name: 'Error Handler', type: 'n8n-nodes-base.set', position: [300, 150], parameters: {} },
{ id: '4', name: 'Tool', type: 'n8n-nodes-base.httpRequest', position: [500, 100], parameters: {} },
],
connections: {
'Complex Node': {
main: [[{ node: 'Main Handler', type: 'main', index: 0 }]],
error: [[{ node: 'Error Handler', type: 'main', index: 0 }]],
ai_tool: [[{ node: 'Tool', type: 'main', index: 0 }]],
},
},
};
const result = await validator.validateWorkflow(workflow as any);
expect(result).toBeDefined();
expect(result.statistics.validConnections).toBe(3);
});
});
describe('SplitInBatches specific edge cases', () => {
it('should handle SplitInBatches with no connections', async () => {
const workflow = {
name: 'Isolated SplitInBatches',
nodes: [{ id: '1', name: 'Split In Batches', type: 'n8n-nodes-base.splitInBatches', position: [100, 100], parameters: {} }],
connections: {},
};
const result = await validator.validateWorkflow(workflow as any);
const splitWarnings = result.warnings.filter(w => w.message?.includes('SplitInBatches') || w.message?.includes('loop') || w.message?.includes('done'));
expect(splitWarnings).toHaveLength(0);
});
it('should handle SplitInBatches with only done output connected', async () => {
const workflow = {
name: 'Single Output SplitInBatches',
nodes: [
{ id: '1', name: 'Split In Batches', type: 'n8n-nodes-base.splitInBatches', position: [100, 100], parameters: {} },
{ id: '2', name: 'Final Action', type: 'n8n-nodes-base.emailSend', position: [300, 100], parameters: {} },
],
connections: { 'Split In Batches': { main: [[{ node: 'Final Action', type: 'main', index: 0 }], []] } },
};
const result = await validator.validateWorkflow(workflow as any);
const loopWarnings = result.warnings.filter(w => w.message?.includes('loop') && w.message?.includes('connect back'));
expect(loopWarnings).toHaveLength(0);
});
it('should handle SplitInBatches with both outputs to same node', async () => {
const workflow = {
name: 'Same Target SplitInBatches',
nodes: [
{ id: '1', name: 'Split In Batches', type: 'n8n-nodes-base.splitInBatches', position: [100, 100], parameters: {} },
{ id: '2', name: 'Multi Purpose', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
],
connections: {
'Split In Batches': { main: [[{ node: 'Multi Purpose', type: 'main', index: 0 }], [{ node: 'Multi Purpose', type: 'main', index: 0 }]] },
'Multi Purpose': { main: [[{ node: 'Split In Batches', type: 'main', index: 0 }]] },
},
};
const result = await validator.validateWorkflow(workflow as any);
const loopWarnings = result.warnings.filter(w => w.message?.includes('loop') && w.message?.includes('connect back'));
expect(loopWarnings).toHaveLength(0);
});
it('should detect reversed outputs with processing node on done output', async () => {
const workflow = {
name: 'Reversed SplitInBatches with Function Node',
nodes: [
{ id: '1', name: 'Split In Batches', type: 'n8n-nodes-base.splitInBatches', position: [100, 100], parameters: {} },
{ id: '2', name: 'Process Function', type: 'n8n-nodes-base.function', position: [300, 100], parameters: {} },
],
connections: {
'Split In Batches': { main: [[{ node: 'Process Function', type: 'main', index: 0 }], []] },
'Process Function': { main: [[{ node: 'Split In Batches', type: 'main', index: 0 }]] },
},
};
const result = await validator.validateWorkflow(workflow as any);
const reversedErrors = result.errors.filter(e => e.message?.includes('SplitInBatches outputs appear reversed'));
expect(reversedErrors).toHaveLength(1);
});
it('should handle self-referencing nodes in loop back detection', async () => {
const workflow = {
name: 'Self Reference in Loop Back',
nodes: [
{ id: '1', name: 'Split In Batches', type: 'n8n-nodes-base.splitInBatches', position: [100, 100], parameters: {} },
{ id: '2', name: 'SelfRef', type: 'n8n-nodes-base.set', position: [300, 100], parameters: {} },
],
connections: {
'Split In Batches': { main: [[], [{ node: 'SelfRef', type: 'main', index: 0 }]] },
'SelfRef': { main: [[{ node: 'SelfRef', type: 'main', index: 0 }]] },
},
};
const result = await validator.validateWorkflow(workflow as any);
expect(result.warnings.filter(w => w.message?.includes("doesn't connect back"))).toHaveLength(1);
expect(result.warnings.filter(w => w.message?.includes('self-referencing'))).toHaveLength(1);
});
it('should handle many SplitInBatches nodes', async () => {
const nodes = Array.from({ length: 100 }, (_, i) => ({
id: `split${i}`, name: `Split ${i}`, type: 'n8n-nodes-base.splitInBatches',
position: [100 + (i % 10) * 100, 100 + Math.floor(i / 10) * 100], parameters: {},
}));
const connections: any = {};
for (let i = 0; i < 99; i++) {
connections[`Split ${i}`] = { main: [[{ node: `Split ${i + 1}`, type: 'main', index: 0 }], []] };
}
const result = await validator.validateWorkflow({ name: 'Many SplitInBatches', nodes, connections } as any);
expect(result).toBeDefined();
expect(result.statistics.totalNodes).toBe(100);
});
});
});