chore: update n8n to 2.13.3 (#666)

* chore: update n8n to 2.13.3 and bump version to 2.41.0

- Updated n8n from 2.12.3 to 2.13.3
- Updated n8n-core from 2.12.0 to 2.13.1
- Updated n8n-workflow from 2.12.0 to 2.13.1
- Updated @n8n/n8n-nodes-langchain from 2.12.0 to 2.13.1
- Rebuilt node database with 1,396 nodes (812 core + 584 community: 516 verified + 68 npm)
- Refreshed community nodes with 581 AI-generated documentation summaries
- Improved documentation generator: strip <think> tags, raw fetch for vLLM chat_template_kwargs
- Incremental community updates: saveNode uses ON CONFLICT DO UPDATE preserving READMEs/AI summaries
- fetch:community now upserts by default (use --rebuild for clean slate)
- Updated README badge and node counts
- Updated CHANGELOG and MEMORY_N8N_UPDATE.md

Conceived by Romuald Członkowski - https://www.aiadvisors.pl/en

Co-Authored-By: Claude <noreply@anthropic.com>

* chore: update MCP SDK from 1.27.1 to 1.28.0

- Pinned @modelcontextprotocol/sdk to 1.28.0 (was ^1.27.1)
- Updated CI dependency check to expect 1.28.0
- SDK 1.28.0 includes: loopback port relaxation, inputSchema fix,
  timeout cleanup fix, OAuth scope improvements
- All 15 MCP tool tests pass with no regressions

Conceived by Romuald Członkowski - https://www.aiadvisors.pl/en

Co-Authored-By: Claude <noreply@anthropic.com>

* fix: update test assertions for ON CONFLICT saveNode SQL

Tests expected old INSERT OR REPLACE SQL, updated to match new
INSERT INTO ... ON CONFLICT(node_type) DO UPDATE SET pattern.

Conceived by Romuald Członkowski - https://www.aiadvisors.pl/en

Co-Authored-By: Claude <noreply@anthropic.com>

* chore: remove documentation generator tests

These tests mocked the OpenAI SDK which was replaced with raw fetch.
Documentation generation is a local LLM utility, not core functionality.

Conceived by Romuald Członkowski - https://www.aiadvisors.pl/en

Co-Authored-By: Claude <noreply@anthropic.com>

* fix: relax SQL assertion in outputs test to match ON CONFLICT pattern

Conceived by Romuald Członkowski - https://www.aiadvisors.pl/en

Co-Authored-By: Claude <noreply@anthropic.com>

* fix: use INSERT OR REPLACE with docs preservation instead of ON CONFLICT

ON CONFLICT DO UPDATE caused FTS5 trigger conflicts ("database disk
image is malformed") in CI. Reverted to INSERT OR REPLACE but now
reads existing npm_readme/ai_documentation_summary/ai_summary_generated_at
before saving and carries them through the replace.

Conceived by Romuald Członkowski - https://www.aiadvisors.pl/en

Co-Authored-By: Claude <noreply@anthropic.com>

* fix: update saveNode test mocks for docs preservation pattern

Tests now account for the SELECT query that reads existing docs
before INSERT OR REPLACE, and the 3 extra params (npm_readme,
ai_documentation_summary, ai_summary_generated_at).

Conceived by Romuald Członkowski - https://www.aiadvisors.pl/en

Co-Authored-By: Claude <noreply@anthropic.com>

* fix: update community integration test mock for INSERT OR REPLACE

The mock SQL matching used 'INSERT INTO nodes' which doesn't match
'INSERT OR REPLACE INTO nodes'. Also added handler for the new
SELECT npm_readme query in saveNode.

Conceived by Romuald Członkowski - https://www.aiadvisors.pl/en

Co-Authored-By: Claude <noreply@anthropic.com>

---------

Co-authored-by: Claude <noreply@anthropic.com>
This commit is contained in:
Romuald Członkowski
2026-03-26 22:21:56 +01:00
committed by GitHub
parent 1f0738e637
commit 07bd1d4cc2
28 changed files with 7026 additions and 3669 deletions

View File

@@ -77,6 +77,8 @@ const DEFAULT_CONFIG: Required<Omit<DocumentationGeneratorConfig, 'baseUrl' | 't
*/
export class DocumentationGenerator {
private client: OpenAI;
private baseUrl: string;
private apiKey: string;
private model: string;
private maxTokens: number;
private timeout: number;
@@ -85,6 +87,8 @@ export class DocumentationGenerator {
constructor(config: DocumentationGeneratorConfig) {
const fullConfig = { ...DEFAULT_CONFIG, ...config };
this.baseUrl = config.baseUrl;
this.apiKey = fullConfig.apiKey;
this.client = new OpenAI({
baseURL: config.baseUrl,
apiKey: fullConfig.apiKey,
@@ -103,21 +107,10 @@ export class DocumentationGenerator {
try {
const prompt = this.buildPrompt(input);
const completion = await this.client.chat.completions.create({
model: this.model,
max_completion_tokens: this.maxTokens,
...(this.temperature !== undefined ? { temperature: this.temperature } : {}),
messages: [
{
role: 'system',
content: this.getSystemPrompt(),
},
{
role: 'user',
content: prompt,
},
],
});
const completion = await this.chatCompletion([
{ role: 'system', content: this.getSystemPrompt() },
{ role: 'user', content: prompt },
], this.maxTokens);
const content = completion.choices[0]?.message?.content;
if (!content) {
@@ -246,20 +239,23 @@ Guidelines:
* Extract JSON from LLM response (handles markdown code blocks)
*/
private extractJson(content: string): string {
// Strip <think>...</think> blocks from thinking models (e.g., Qwen3-Thinking)
const stripped = content.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
// Try to extract from markdown code block
const jsonBlockMatch = content.match(/```(?:json)?\s*([\s\S]*?)```/);
const jsonBlockMatch = stripped.match(/```(?:json)?\s*([\s\S]*?)```/);
if (jsonBlockMatch) {
return jsonBlockMatch[1].trim();
}
// Try to find JSON object directly
const jsonMatch = content.match(/\{[\s\S]*\}/);
const jsonMatch = stripped.match(/\{[\s\S]*\}/);
if (jsonMatch) {
return jsonMatch[0];
}
// Return as-is if no extraction needed
return content.trim();
return stripped;
}
/**
@@ -323,16 +319,9 @@ Guidelines:
*/
async testConnection(): Promise<{ success: boolean; message: string }> {
try {
const completion = await this.client.chat.completions.create({
model: this.model,
max_completion_tokens: 200,
messages: [
{
role: 'user',
content: 'Hello',
},
],
});
const completion = await this.chatCompletion([
{ role: 'user', content: 'Hello' },
], 200);
if (completion.choices[0]?.message?.content) {
return { success: true, message: `Connected to ${this.model}` };
@@ -345,6 +334,44 @@ Guidelines:
}
}
/**
* Make a chat completion request with chat_template_kwargs support for vLLM thinking models
*/
private async chatCompletion(
messages: Array<{ role: string; content: string }>,
maxTokens: number
): Promise<{ choices: Array<{ message: { content: string | null } }> }> {
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), this.timeout);
try {
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
...(this.apiKey !== 'not-needed' ? { Authorization: `Bearer ${this.apiKey}` } : {}),
},
body: JSON.stringify({
model: this.model,
messages,
max_completion_tokens: maxTokens,
...(this.temperature !== undefined ? { temperature: this.temperature } : {}),
chat_template_kwargs: { enable_thinking: false },
}),
signal: controller.signal,
});
if (!response.ok) {
const text = await response.text();
throw new Error(`${response.status} ${text}`);
}
return (await response.json()) as { choices: Array<{ message: { content: string | null } }> };
} finally {
clearTimeout(timeoutId);
}
}
private sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}

View File

@@ -34,6 +34,11 @@ export class NodeRepository {
* Supports both core and community nodes via optional community fields
*/
saveNode(node: ParsedNode & Partial<CommunityNodeFields>): void {
// Preserve existing npm_readme and ai_documentation_summary on upsert
const existing = this.db.prepare(
'SELECT npm_readme, ai_documentation_summary, ai_summary_generated_at FROM nodes WHERE node_type = ?'
).get(node.nodeType) as { npm_readme?: string; ai_documentation_summary?: string; ai_summary_generated_at?: string } | undefined;
const stmt = this.db.prepare(`
INSERT OR REPLACE INTO nodes (
node_type, package_name, display_name, description,
@@ -43,8 +48,9 @@ export class NodeRepository {
properties_schema, operations, credentials_required,
outputs, output_names,
is_community, is_verified, author_name, author_github_url,
npm_package_name, npm_version, npm_downloads, community_fetched_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
npm_package_name, npm_version, npm_downloads, community_fetched_at,
npm_readme, ai_documentation_summary, ai_summary_generated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
`);
stmt.run(
@@ -76,7 +82,11 @@ export class NodeRepository {
node.npmPackageName || null,
node.npmVersion || null,
node.npmDownloads || 0,
node.communityFetchedAt || null
node.communityFetchedAt || null,
// Preserve existing docs data on upsert
existing?.npm_readme || null,
existing?.ai_documentation_summary || null,
existing?.ai_summary_generated_at || null
);
}

View File

@@ -3,13 +3,14 @@
* Fetch community nodes from n8n Strapi API and npm registry.
*
* Usage:
* npm run fetch:community # Full rebuild (verified + top 100 npm)
* npm run fetch:community # Upsert all (preserves READMEs and AI summaries)
* npm run fetch:community:verified # Verified nodes only (fast)
* npm run fetch:community:update # Incremental update (skip existing)
*
* Options:
* --verified-only Only fetch verified nodes from Strapi API
* --update Skip nodes that already exist in database
* --rebuild Delete all community nodes first (wipes READMEs/AI summaries!)
* --npm-limit=N Maximum number of npm packages to fetch (default: 100)
* --staging Use staging Strapi API instead of production
*/
@@ -22,6 +23,7 @@ import { createDatabaseAdapter } from '../database/database-adapter';
interface CliOptions {
verifiedOnly: boolean;
update: boolean;
rebuild: boolean;
npmLimit: number;
staging: boolean;
}
@@ -32,6 +34,7 @@ function parseArgs(): CliOptions {
const options: CliOptions = {
verifiedOnly: false,
update: false,
rebuild: false,
npmLimit: 100,
staging: false,
};
@@ -41,6 +44,8 @@ function parseArgs(): CliOptions {
options.verifiedOnly = true;
} else if (arg === '--update') {
options.update = true;
} else if (arg === '--rebuild') {
options.rebuild = true;
} else if (arg === '--staging') {
options.staging = true;
} else if (arg.startsWith('--npm-limit=')) {
@@ -73,7 +78,7 @@ async function main(): Promise<void> {
// Print options
console.log('Options:');
console.log(` - Mode: ${cliOptions.update ? 'Update (incremental)' : 'Rebuild'}`);
console.log(` - Mode: ${cliOptions.rebuild ? 'Rebuild (clean slate)' : cliOptions.update ? 'Update (skip existing)' : 'Upsert (preserves docs)'}`);
console.log(` - Verified only: ${cliOptions.verifiedOnly ? 'Yes' : 'No'}`);
if (!cliOptions.verifiedOnly) {
console.log(` - npm package limit: ${cliOptions.npmLimit}`);
@@ -92,9 +97,10 @@ async function main(): Promise<void> {
const environment = cliOptions.staging ? 'staging' : 'production';
const service = new CommunityNodeService(repository, environment);
// If not updating, delete existing community nodes
if (!cliOptions.update) {
console.log('\nClearing existing community nodes...');
// Only delete existing community nodes when --rebuild is explicitly requested
if (cliOptions.rebuild) {
console.log('\nClearing existing community nodes (--rebuild)...');
console.log(' WARNING: This wipes READMEs and AI summaries!');
const deleted = service.deleteCommunityNodes();
console.log(` Deleted ${deleted} existing community nodes`);
}