refactor: streamline test suite - cut 33 files, enable parallel execution (11.9x speedup)

Remove duplicate, low-value, and fragmented test files while preserving
all meaningful coverage. Enable parallel test execution and remove
the entire benchmark infrastructure.

Key changes:
- Consolidate workflow-validator tests (13 files -> 3)
- Consolidate config-validator tests (9 files -> 3)
- Consolidate telemetry tests (11 files -> 6)
- Merge AI validator tests (2 files -> 1)
- Remove example/demo test files, mock-testing files, and already-skipped tests
- Remove benchmark infrastructure (10 files, CI workflow, 4 npm scripts)
- Enable parallel test execution (remove singleThread: true)
- Remove retry:2 that was masking flaky tests
- Slim CI publish-results job

Results: 224 -> 191 test files, 4690 -> 4303 tests, 121K -> 106K lines
Local runtime: 319s -> 27s (11.9x speedup)

Conceived by Romuald Członkowski - www.aiadvisors.pl/en

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
czlonkowski
2026-03-26 23:41:06 +01:00
parent 07bd1d4cc2
commit fdc37efde7
62 changed files with 2998 additions and 20806 deletions

View File

@@ -1,214 +0,0 @@
name: Performance Benchmarks
on:
push:
branches: [main, feat/comprehensive-testing-suite]
paths-ignore:
- '**.md'
- '**.txt'
- 'docs/**'
- 'examples/**'
- '.github/FUNDING.yml'
- '.github/ISSUE_TEMPLATE/**'
- '.github/pull_request_template.md'
- '.gitignore'
- 'LICENSE*'
- 'ATTRIBUTION.md'
- 'SECURITY.md'
- 'CODE_OF_CONDUCT.md'
pull_request:
branches: [main]
paths-ignore:
- '**.md'
- '**.txt'
- 'docs/**'
- 'examples/**'
- '.github/FUNDING.yml'
- '.github/ISSUE_TEMPLATE/**'
- '.github/pull_request_template.md'
- '.gitignore'
- 'LICENSE*'
- 'ATTRIBUTION.md'
- 'SECURITY.md'
- 'CODE_OF_CONDUCT.md'
workflow_dispatch:
permissions:
# For PR comments
pull-requests: write
# For pushing to gh-pages branch
contents: write
# For deployment to GitHub Pages
pages: write
id-token: write
jobs:
benchmark:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
# Fetch all history for proper benchmark comparison
fetch-depth: 0
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Build project
run: npm run build
- name: Run benchmarks
run: npm run benchmark:ci
- name: Format benchmark results
run: node scripts/format-benchmark-results.js
- name: Upload benchmark artifacts
uses: actions/upload-artifact@v4
with:
name: benchmark-results
path: |
benchmark-results.json
benchmark-results-formatted.json
benchmark-summary.json
# Ensure gh-pages branch exists
- name: Check and create gh-pages branch
run: |
git fetch origin gh-pages:gh-pages 2>/dev/null || {
echo "gh-pages branch doesn't exist. Creating it..."
git checkout --orphan gh-pages
git rm -rf .
echo "# Benchmark Results" > README.md
git add README.md
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git commit -m "Initial gh-pages commit"
git push origin gh-pages
git checkout ${{ github.ref_name }}
}
# Clean up workspace before benchmark action
- name: Clean workspace
run: |
git add -A
git stash || true
# Store benchmark results and compare
- name: Store benchmark result
uses: benchmark-action/github-action-benchmark@v1
continue-on-error: true
id: benchmark
with:
name: n8n-mcp Benchmarks
tool: 'customSmallerIsBetter'
output-file-path: benchmark-results-formatted.json
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
# Where to store benchmark data
benchmark-data-dir-path: 'benchmarks'
# Alert when performance regresses by 10%
alert-threshold: '110%'
# Comment on PR when regression is detected
comment-on-alert: true
alert-comment-cc-users: '@czlonkowski'
# Summary always
summary-always: true
# Max number of data points to retain
max-items-in-chart: 50
fail-on-alert: false
# Comment on PR with benchmark results
- name: Comment PR with results
uses: actions/github-script@v7
if: github.event_name == 'pull_request'
continue-on-error: true
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
try {
const fs = require('fs');
const summary = JSON.parse(fs.readFileSync('benchmark-summary.json', 'utf8'));
// Format results for PR comment
let comment = '## 📊 Performance Benchmark Results\n\n';
comment += `🕐 Run at: ${new Date(summary.timestamp).toLocaleString()}\n\n`;
comment += '| Benchmark | Time | Ops/sec | Range |\n';
comment += '|-----------|------|---------|-------|\n';
// Group benchmarks by category
const categories = {};
for (const benchmark of summary.benchmarks) {
const [category, ...nameParts] = benchmark.name.split(' - ');
if (!categories[category]) categories[category] = [];
categories[category].push({
...benchmark,
shortName: nameParts.join(' - ')
});
}
// Display by category
for (const [category, benchmarks] of Object.entries(categories)) {
comment += `\n### ${category}\n`;
for (const benchmark of benchmarks) {
comment += `| ${benchmark.shortName} | ${benchmark.time} | ${benchmark.opsPerSec} | ${benchmark.range} |\n`;
}
}
// Add comparison link
comment += '\n\n📈 [View historical benchmark trends](https://czlonkowski.github.io/n8n-mcp/benchmarks/)\n';
comment += '\n⚡ Performance regressions >10% will be flagged automatically.\n';
await github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: comment
});
} catch (error) {
console.error('Failed to create PR comment:', error.message);
console.log('This is likely due to insufficient permissions for external PRs.');
console.log('Benchmark results have been saved to artifacts instead.');
}
# Deploy benchmark results to GitHub Pages
deploy:
needs: benchmark
if: github.ref == 'refs/heads/main'
runs-on: ubuntu-latest
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
ref: gh-pages
continue-on-error: true
# If gh-pages checkout failed, create a minimal structure
- name: Ensure gh-pages content exists
run: |
if [ ! -f "index.html" ]; then
echo "Creating minimal gh-pages structure..."
mkdir -p benchmarks
echo '<!DOCTYPE html><html><head><title>n8n-mcp Benchmarks</title></head><body><h1>n8n-mcp Benchmarks</h1><p>Benchmark data will appear here after the first run.</p></body></html>' > index.html
fi
- name: Setup Pages
uses: actions/configure-pages@v4
- name: Upload Pages artifact
uses: actions/upload-pages-artifact@v3
with:
path: '.'
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4

View File

@@ -133,23 +133,6 @@ jobs:
- name: Run type checking
run: npm run typecheck
# Run benchmarks
- name: Run benchmarks
id: benchmarks
run: npm run benchmark:ci
continue-on-error: true
# Upload benchmark results
- name: Upload benchmark results
if: always() && steps.benchmarks.outcome != 'skipped'
uses: actions/upload-artifact@v4
with:
name: benchmark-results-${{ github.run_number }}-${{ github.run_attempt }}
path: |
benchmark-results.json
retention-days: 30
if-no-files-found: warn
# Create test report comment for PRs
- name: Create test report comment
if: github.event_name == 'pull_request' && always()
@@ -222,7 +205,6 @@ jobs:
echo "" >> $GITHUB_STEP_SUMMARY
echo "- [Test Results](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_STEP_SUMMARY
echo "- [Coverage Report](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_STEP_SUMMARY
echo "- [Benchmark Results](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_STEP_SUMMARY
# Store test metadata
- name: Store test metadata
@@ -252,24 +234,21 @@ jobs:
path: test-metadata.json
retention-days: 30
# Separate job to process and publish test results
# Publish test results as checks
publish-results:
needs: test
runs-on: ubuntu-latest
if: always()
permissions:
checks: write
pull-requests: write
steps:
- uses: actions/checkout@v4
# Download all artifacts
- name: Download all artifacts
- name: Download test results
uses: actions/download-artifact@v4
with:
path: artifacts
# Publish test results as checks
- name: Publish test results
uses: dorny/test-reporter@v1
if: always()
@@ -279,75 +258,4 @@ jobs:
path: 'artifacts/test-results-*/test-results/junit.xml'
reporter: java-junit
fail-on-error: false
fail-on-empty: false
# Create a combined artifact with all results
- name: Create combined results artifact
if: always()
run: |
mkdir -p combined-results
cp -r artifacts/* combined-results/ 2>/dev/null || true
# Create index file
cat > combined-results/index.html << 'EOF'
<!DOCTYPE html>
<html>
<head>
<title>n8n-mcp Test Results</title>
<style>
body { font-family: Arial, sans-serif; margin: 40px; }
h1 { color: #333; }
.section { margin: 20px 0; padding: 20px; border: 1px solid #ddd; border-radius: 5px; }
a { color: #0066cc; text-decoration: none; }
a:hover { text-decoration: underline; }
</style>
</head>
<body>
<h1>n8n-mcp Test Results</h1>
<div class="section">
<h2>Test Reports</h2>
<ul>
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-reports/report.html">📊 Detailed HTML Report</a></li>
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-results/html/index.html">📈 Vitest HTML Report</a></li>
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-reports/report.md">📄 Markdown Report</a></li>
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-summary.md">📝 PR Summary</a></li>
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-results/junit.xml">🔧 JUnit XML</a></li>
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-results/results.json">🔢 JSON Results</a></li>
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-reports/report.json">📊 Full JSON Report</a></li>
</ul>
</div>
<div class="section">
<h2>Coverage Reports</h2>
<ul>
<li><a href="coverage-${{ github.run_number }}-${{ github.run_attempt }}/html/index.html">HTML Coverage Report</a></li>
<li><a href="coverage-${{ github.run_number }}-${{ github.run_attempt }}/lcov.info">LCOV Report</a></li>
<li><a href="coverage-${{ github.run_number }}-${{ github.run_attempt }}/coverage-summary.json">Coverage Summary JSON</a></li>
</ul>
</div>
<div class="section">
<h2>Benchmark Results</h2>
<ul>
<li><a href="benchmark-results-${{ github.run_number }}-${{ github.run_attempt }}/benchmark-results.json">Benchmark Results JSON</a></li>
</ul>
</div>
<div class="section">
<h2>Metadata</h2>
<ul>
<li><a href="test-metadata-${{ github.run_number }}-${{ github.run_attempt }}/test-metadata.json">Test Run Metadata</a></li>
</ul>
</div>
<div class="section">
<p><em>Generated at $(date -u +%Y-%m-%dT%H:%M:%SZ)</em></p>
<p><em>Run: #${{ github.run_number }} | SHA: ${{ github.sha }}</em></p>
</div>
</body>
</html>
EOF
- name: Upload combined results
if: always()
uses: actions/upload-artifact@v4
with:
name: all-test-results-${{ github.run_number }}
path: combined-results/
retention-days: 90
fail-on-empty: false