fix: resolve node type normalization bug blocking all AI validation (HIGH-01, HIGH-04, HIGH-08)

CRITICAL BUG FIX:
NodeTypeNormalizer.normalizeToFullForm() converts TO SHORT form (nodes-langchain.*),
but all validation code compared against FULL form (@n8n/n8n-nodes-langchain.*).
This caused ALL AI validation to be silently skipped.

Impact:
- Missing language model detection: NEVER triggered
- AI tool connection detection: NEVER triggered
- Streaming mode validation: NEVER triggered
- AI tool sub-node validation: NEVER triggered

ROOT CAUSE:
Line 348 in ai-node-validator.ts (and 19 other locations):
  if (normalizedType === '@n8n/n8n-nodes-langchain.agent') // FULL form
But normalizedType is 'nodes-langchain.agent' (SHORT form)
Result: Comparison always FALSE, validation never runs

FIXES:
1. ai-node-validator.ts (7 locations):
   - Lines 551, 557, 563: validateAISpecificNodes comparisons
   - Line 348: checkIfStreamingTarget comparison
   - Lines 417, 444: validateChatTrigger comparisons
   - Lines 589-591: hasAINodes array
   - Lines 606-608, 612: getAINodeCategory comparisons

2. ai-tool-validators.ts (14 locations):
   - Lines 980-991: AI_TOOL_VALIDATORS keys (13 validators)
   - Lines 1015-1037: validateAIToolSubNode switch cases (13 cases)

3. ENHANCED streaming validation:
   - Added validation for AI Agent's own streamResponse setting
   - Previously only checked streaming FROM Chat Trigger
   - Now validates BOTH scenarios (lines 259-276)

VERIFICATION:
- All 25 AI validator unit tests:  PASS
- Debug test (missing LM):  PASS
- Debug test (AI tools):  PASS
- Debug test (streaming):  PASS

Resolves:
- HIGH-01: Missing language model detection (was never running)
- HIGH-04: AI tool connection detection (was never running)
- HIGH-08: Streaming mode validation (was never running + incomplete)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
czlonkowski
2025-10-06 23:36:56 +02:00
parent ccbe04f007
commit 92eb4ef34f
4 changed files with 233 additions and 39 deletions

Binary file not shown.

View File

@@ -0,0 +1,189 @@
#!/usr/bin/env node
/**
* Debug test for AI validation issues
* Reproduces the bugs found by n8n-mcp-tester
*/
import { validateAISpecificNodes, buildReverseConnectionMap } from '../src/services/ai-node-validator';
import type { WorkflowJson } from '../src/services/ai-tool-validators';
import { NodeTypeNormalizer } from '../src/utils/node-type-normalizer';
console.log('=== AI Validation Debug Tests ===\n');
// Test 1: AI Agent with NO language model connection
console.log('Test 1: Missing Language Model Detection');
const workflow1: WorkflowJson = {
name: 'Test Missing LM',
nodes: [
{
id: 'ai-agent-1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
position: [500, 300],
parameters: {
promptType: 'define',
text: 'You are a helpful assistant'
},
typeVersion: 1.7
}
],
connections: {
// NO connections - AI Agent is isolated
}
};
console.log('Workflow:', JSON.stringify(workflow1, null, 2));
const reverseMap1 = buildReverseConnectionMap(workflow1);
console.log('\nReverse connection map for AI Agent:');
console.log('Entries:', Array.from(reverseMap1.entries()));
console.log('AI Agent connections:', reverseMap1.get('AI Agent'));
// Check node normalization
const normalizedType1 = NodeTypeNormalizer.normalizeToFullForm(workflow1.nodes[0].type);
console.log(`\nNode type: ${workflow1.nodes[0].type}`);
console.log(`Normalized type: ${normalizedType1}`);
console.log(`Match check: ${normalizedType1 === '@n8n/n8n-nodes-langchain.agent'}`);
const issues1 = validateAISpecificNodes(workflow1);
console.log('\nValidation issues:');
console.log(JSON.stringify(issues1, null, 2));
const hasMissingLMError = issues1.some(
i => i.severity === 'error' && i.code === 'MISSING_LANGUAGE_MODEL'
);
console.log(`\n✓ Has MISSING_LANGUAGE_MODEL error: ${hasMissingLMError}`);
console.log(`✗ Expected: true, Got: ${hasMissingLMError}`);
// Test 2: AI Agent WITH language model connection
console.log('\n\n' + '='.repeat(60));
console.log('Test 2: AI Agent WITH Language Model (Should be valid)');
const workflow2: WorkflowJson = {
name: 'Test With LM',
nodes: [
{
id: 'openai-1',
name: 'OpenAI Chat Model',
type: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
position: [200, 300],
parameters: {
modelName: 'gpt-4'
},
typeVersion: 1
},
{
id: 'ai-agent-1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
position: [500, 300],
parameters: {
promptType: 'define',
text: 'You are a helpful assistant'
},
typeVersion: 1.7
}
],
connections: {
'OpenAI Chat Model': {
ai_languageModel: [
[
{
node: 'AI Agent',
type: 'ai_languageModel',
index: 0
}
]
]
}
}
};
console.log('\nConnections:', JSON.stringify(workflow2.connections, null, 2));
const reverseMap2 = buildReverseConnectionMap(workflow2);
console.log('\nReverse connection map for AI Agent:');
console.log('AI Agent connections:', reverseMap2.get('AI Agent'));
const issues2 = validateAISpecificNodes(workflow2);
console.log('\nValidation issues:');
console.log(JSON.stringify(issues2, null, 2));
const hasMissingLMError2 = issues2.some(
i => i.severity === 'error' && i.code === 'MISSING_LANGUAGE_MODEL'
);
console.log(`\n✓ Should NOT have MISSING_LANGUAGE_MODEL error: ${!hasMissingLMError2}`);
console.log(`Expected: false, Got: ${hasMissingLMError2}`);
// Test 3: AI Agent with tools but no language model
console.log('\n\n' + '='.repeat(60));
console.log('Test 3: AI Agent with Tools but NO Language Model');
const workflow3: WorkflowJson = {
name: 'Test Tools No LM',
nodes: [
{
id: 'http-tool-1',
name: 'HTTP Request Tool',
type: '@n8n/n8n-nodes-langchain.toolHttpRequest',
position: [200, 300],
parameters: {
toolDescription: 'Calls an API',
url: 'https://api.example.com'
},
typeVersion: 1.1
},
{
id: 'ai-agent-1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
position: [500, 300],
parameters: {
promptType: 'define',
text: 'You are a helpful assistant'
},
typeVersion: 1.7
}
],
connections: {
'HTTP Request Tool': {
ai_tool: [
[
{
node: 'AI Agent',
type: 'ai_tool',
index: 0
}
]
]
}
}
};
console.log('\nConnections:', JSON.stringify(workflow3.connections, null, 2));
const reverseMap3 = buildReverseConnectionMap(workflow3);
console.log('\nReverse connection map for AI Agent:');
const aiAgentConns = reverseMap3.get('AI Agent');
console.log('AI Agent connections:', aiAgentConns);
console.log('Connection types:', aiAgentConns?.map(c => c.type));
const issues3 = validateAISpecificNodes(workflow3);
console.log('\nValidation issues:');
console.log(JSON.stringify(issues3, null, 2));
const hasMissingLMError3 = issues3.some(
i => i.severity === 'error' && i.code === 'MISSING_LANGUAGE_MODEL'
);
const hasNoToolsInfo3 = issues3.some(
i => i.severity === 'info' && i.message.includes('no ai_tool connections')
);
console.log(`\n✓ Should have MISSING_LANGUAGE_MODEL error: ${hasMissingLMError3}`);
console.log(`Expected: true, Got: ${hasMissingLMError3}`);
console.log(`✗ Should NOT have "no tools" info: ${!hasNoToolsInfo3}`);
console.log(`Expected: false, Got: ${hasNoToolsInfo3}`);
console.log('\n' + '='.repeat(60));
console.log('Summary:');
console.log(`Test 1 (No LM): ${hasMissingLMError ? 'PASS ✓' : 'FAIL ✗'}`);
console.log(`Test 2 (With LM): ${!hasMissingLMError2 ? 'PASS ✓' : 'FAIL ✗'}`);
console.log(`Test 3 (Tools, No LM): ${hasMissingLMError3 && !hasNoToolsInfo3 ? 'PASS ✓' : 'FAIL ✗'}`);

View File

@@ -256,15 +256,20 @@ export function validateAIAgent(
// 5. Validate streaming mode constraints (CRITICAL)
// From spec lines 753-879: AI Agent with streaming MUST NOT have main output connections
const isStreamingTarget = checkIfStreamingTarget(node, workflow, reverseConnections);
if (isStreamingTarget) {
const hasOwnStreamingEnabled = node.parameters?.options?.streamResponse === true;
if (isStreamingTarget || hasOwnStreamingEnabled) {
// Check if AI Agent has any main output connections
const agentMainOutput = workflow.connections[node.name]?.main;
if (agentMainOutput && agentMainOutput.flat().some((c: any) => c)) {
const streamSource = isStreamingTarget
? 'connected from Chat Trigger with responseMode="streaming"'
: 'has streamResponse=true in options';
issues.push({
severity: 'error',
nodeId: node.id,
nodeName: node.name,
message: `AI Agent "${node.name}" is in streaming mode (connected from Chat Trigger with responseMode="streaming") but has outgoing main connections. Remove all main output connections - streaming responses flow back through the Chat Trigger.`,
message: `AI Agent "${node.name}" is in streaming mode (${streamSource}) but has outgoing main connections. Remove all main output connections - streaming responses flow back through the Chat Trigger.`,
code: 'STREAMING_WITH_MAIN_OUTPUT'
});
}
@@ -345,7 +350,7 @@ function checkIfStreamingTarget(
if (!sourceNode) continue;
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(sourceNode.type);
if (normalizedType === '@n8n/n8n-nodes-langchain.chatTrigger') {
if (normalizedType === 'nodes-langchain.chatTrigger') {
const responseMode = sourceNode.parameters?.options?.responseMode || 'lastNode';
if (responseMode === 'streaming') {
return true;
@@ -409,7 +414,7 @@ export function validateChatTrigger(
// Validate streaming mode
if (responseMode === 'streaming') {
// CRITICAL: Streaming mode only works with AI Agent
if (targetType !== '@n8n/n8n-nodes-langchain.agent') {
if (targetType !== 'nodes-langchain.agent') {
issues.push({
severity: 'error',
nodeId: node.id,
@@ -436,7 +441,7 @@ export function validateChatTrigger(
if (responseMode === 'lastNode') {
// lastNode mode requires a workflow that ends somewhere
// Just informational - this is the default and works with any workflow
if (targetType === '@n8n/n8n-nodes-langchain.agent') {
if (targetType === 'nodes-langchain.agent') {
issues.push({
severity: 'info',
nodeId: node.id,
@@ -548,19 +553,19 @@ export function validateAISpecificNodes(
const normalizedType = NodeTypeNormalizer.normalizeToFullForm(node.type);
// Validate AI Agent nodes
if (normalizedType === '@n8n/n8n-nodes-langchain.agent') {
if (normalizedType === 'nodes-langchain.agent') {
const nodeIssues = validateAIAgent(node, reverseConnectionMap, workflow);
issues.push(...nodeIssues);
}
// Validate Chat Trigger nodes
if (normalizedType === '@n8n/n8n-nodes-langchain.chatTrigger') {
if (normalizedType === 'nodes-langchain.chatTrigger') {
const nodeIssues = validateChatTrigger(node, workflow, reverseConnectionMap);
issues.push(...nodeIssues);
}
// Validate Basic LLM Chain nodes
if (normalizedType === '@n8n/n8n-nodes-langchain.chainLlm') {
if (normalizedType === 'nodes-langchain.chainLlm') {
const nodeIssues = validateBasicLLMChain(node, reverseConnectionMap);
issues.push(...nodeIssues);
}
@@ -586,9 +591,9 @@ export function validateAISpecificNodes(
*/
export function hasAINodes(workflow: WorkflowJson): boolean {
const aiNodeTypes = [
'@n8n/n8n-nodes-langchain.agent',
'@n8n/n8n-nodes-langchain.chatTrigger',
'@n8n/n8n-nodes-langchain.chainLlm',
'nodes-langchain.agent',
'nodes-langchain.chatTrigger',
'nodes-langchain.chainLlm',
];
return workflow.nodes.some(node => {
@@ -603,13 +608,13 @@ export function hasAINodes(workflow: WorkflowJson): boolean {
export function getAINodeCategory(nodeType: string): string | null {
const normalized = NodeTypeNormalizer.normalizeToFullForm(nodeType);
if (normalized === '@n8n/n8n-nodes-langchain.agent') return 'AI Agent';
if (normalized === '@n8n/n8n-nodes-langchain.chatTrigger') return 'Chat Trigger';
if (normalized === '@n8n/n8n-nodes-langchain.chainLlm') return 'Basic LLM Chain';
if (normalized === 'nodes-langchain.agent') return 'AI Agent';
if (normalized === 'nodes-langchain.chatTrigger') return 'Chat Trigger';
if (normalized === 'nodes-langchain.chainLlm') return 'Basic LLM Chain';
if (isAIToolSubNode(normalized)) return 'AI Tool';
// Check for AI component nodes
if (normalized.startsWith('@n8n/n8n-nodes-langchain.')) {
if (normalized.startsWith('nodes-langchain.')) {
if (normalized.includes('openAi') || normalized.includes('anthropic') || normalized.includes('googleGemini')) {
return 'Language Model';
}

View File

@@ -977,18 +977,18 @@ export function validateWolframAlphaTool(node: WorkflowNode): ValidationIssue[]
* Helper: Map node types to validator functions
*/
export const AI_TOOL_VALIDATORS = {
'@n8n/n8n-nodes-langchain.toolHttpRequest': validateHTTPRequestTool,
'@n8n/n8n-nodes-langchain.toolCode': validateCodeTool,
'@n8n/n8n-nodes-langchain.toolVectorStore': validateVectorStoreTool,
'@n8n/n8n-nodes-langchain.toolWorkflow': validateWorkflowTool,
'@n8n/n8n-nodes-langchain.agentTool': validateAIAgentTool,
'@n8n/n8n-nodes-langchain.mcpClientTool': validateMCPClientTool,
'@n8n/n8n-nodes-langchain.toolCalculator': validateCalculatorTool,
'@n8n/n8n-nodes-langchain.toolThink': validateThinkTool,
'@n8n/n8n-nodes-langchain.toolSerpApi': validateSerpApiTool,
'@n8n/n8n-nodes-langchain.toolWikipedia': validateWikipediaTool,
'@n8n/n8n-nodes-langchain.toolSearXng': validateSearXngTool,
'@n8n/n8n-nodes-langchain.toolWolframAlpha': validateWolframAlphaTool,
'nodes-langchain.toolHttpRequest': validateHTTPRequestTool,
'nodes-langchain.toolCode': validateCodeTool,
'nodes-langchain.toolVectorStore': validateVectorStoreTool,
'nodes-langchain.toolWorkflow': validateWorkflowTool,
'nodes-langchain.agentTool': validateAIAgentTool,
'nodes-langchain.mcpClientTool': validateMCPClientTool,
'nodes-langchain.toolCalculator': validateCalculatorTool,
'nodes-langchain.toolThink': validateThinkTool,
'nodes-langchain.toolSerpApi': validateSerpApiTool,
'nodes-langchain.toolWikipedia': validateWikipediaTool,
'nodes-langchain.toolSearXng': validateSearXngTool,
'nodes-langchain.toolWolframAlpha': validateWolframAlphaTool,
} as const;
/**
@@ -1012,29 +1012,29 @@ export function validateAIToolSubNode(
// Route to appropriate validator based on node type
switch (normalized) {
case '@n8n/n8n-nodes-langchain.toolHttpRequest':
case 'nodes-langchain.toolHttpRequest':
return validateHTTPRequestTool(node);
case '@n8n/n8n-nodes-langchain.toolCode':
case 'nodes-langchain.toolCode':
return validateCodeTool(node);
case '@n8n/n8n-nodes-langchain.toolVectorStore':
case 'nodes-langchain.toolVectorStore':
return validateVectorStoreTool(node, reverseConnections, workflow);
case '@n8n/n8n-nodes-langchain.toolWorkflow':
case 'nodes-langchain.toolWorkflow':
return validateWorkflowTool(node);
case '@n8n/n8n-nodes-langchain.agentTool':
case 'nodes-langchain.agentTool':
return validateAIAgentTool(node, reverseConnections);
case '@n8n/n8n-nodes-langchain.mcpClientTool':
case 'nodes-langchain.mcpClientTool':
return validateMCPClientTool(node);
case '@n8n/n8n-nodes-langchain.toolCalculator':
case 'nodes-langchain.toolCalculator':
return validateCalculatorTool(node);
case '@n8n/n8n-nodes-langchain.toolThink':
case 'nodes-langchain.toolThink':
return validateThinkTool(node);
case '@n8n/n8n-nodes-langchain.toolSerpApi':
case 'nodes-langchain.toolSerpApi':
return validateSerpApiTool(node);
case '@n8n/n8n-nodes-langchain.toolWikipedia':
case 'nodes-langchain.toolWikipedia':
return validateWikipediaTool(node);
case '@n8n/n8n-nodes-langchain.toolSearXng':
case 'nodes-langchain.toolSearXng':
return validateSearXngTool(node);
case '@n8n/n8n-nodes-langchain.toolWolframAlpha':
case 'nodes-langchain.toolWolframAlpha':
return validateWolframAlphaTool(node);
default:
return [];