feat: complete Phase 3.8 - test infrastructure and CI/CD enhancements

- Add test result artifacts storage with multiple formats (JUnit, JSON, HTML)
- Configure GitHub Actions to upload and preserve test outputs
- Add PR comment integration with test summaries
- Create benchmark comparison workflow for PR performance tracking
- Add detailed test report generation scripts
- Configure artifact retention policies (30 days for tests, 90 for combined)
- Set up test metadata collection for better debugging

This completes all remaining test infrastructure tasks and provides
comprehensive visibility into test results across CI/CD pipeline.
This commit is contained in:
czlonkowski
2025-07-28 22:56:15 +02:00
parent b5210e5963
commit 61de107c4b
9 changed files with 1680 additions and 12 deletions

154
.github/workflows/benchmark-pr.yml vendored Normal file
View File

@@ -0,0 +1,154 @@
name: Benchmark PR Comparison
on:
pull_request:
branches: [main]
paths:
- 'src/**'
- 'tests/benchmarks/**'
- 'package.json'
- 'vitest.config.benchmark.ts'
permissions:
pull-requests: write
contents: read
jobs:
benchmark-comparison:
runs-on: ubuntu-latest
steps:
- name: Checkout PR branch
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20
cache: 'npm'
- name: Install dependencies
run: npm ci
# Run benchmarks on current branch
- name: Run current benchmarks
run: npm run benchmark:ci
- name: Save current results
run: cp benchmark-results.json benchmark-current.json
# Checkout and run benchmarks on base branch
- name: Checkout base branch
run: |
git checkout ${{ github.event.pull_request.base.sha }}
git status
- name: Install base dependencies
run: npm ci
- name: Run baseline benchmarks
run: npm run benchmark:ci
continue-on-error: true
- name: Save baseline results
run: |
if [ -f benchmark-results.json ]; then
cp benchmark-results.json benchmark-baseline.json
else
echo '{"files":[]}' > benchmark-baseline.json
fi
# Compare results
- name: Checkout PR branch again
run: git checkout ${{ github.event.pull_request.head.sha }}
- name: Compare benchmarks
id: compare
run: |
node scripts/compare-benchmarks.js benchmark-current.json benchmark-baseline.json || echo "REGRESSION=true" >> $GITHUB_OUTPUT
# Upload comparison artifacts
- name: Upload benchmark comparison
if: always()
uses: actions/upload-artifact@v4
with:
name: benchmark-comparison-${{ github.run_number }}
path: |
benchmark-current.json
benchmark-baseline.json
benchmark-comparison.json
benchmark-comparison.md
retention-days: 30
# Post comparison to PR
- name: Post benchmark comparison to PR
if: always()
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
let comment = '## ⚡ Benchmark Comparison\n\n';
try {
if (fs.existsSync('benchmark-comparison.md')) {
const comparison = fs.readFileSync('benchmark-comparison.md', 'utf8');
comment += comparison;
} else {
comment += 'Benchmark comparison could not be generated.';
}
} catch (error) {
comment += `Error reading benchmark comparison: ${error.message}`;
}
comment += '\n\n---\n';
comment += `*[View full benchmark results](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})*`;
// Find existing comment
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const botComment = comments.find(comment =>
comment.user.type === 'Bot' &&
comment.body.includes('## ⚡ Benchmark Comparison')
);
if (botComment) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: botComment.id,
body: comment
});
} else {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: comment
});
}
# Add status check
- name: Set benchmark status
if: always()
uses: actions/github-script@v7
with:
script: |
const hasRegression = '${{ steps.compare.outputs.REGRESSION }}' === 'true';
const state = hasRegression ? 'failure' : 'success';
const description = hasRegression
? 'Performance regressions detected'
: 'No performance regressions';
await github.rest.repos.createCommitStatus({
owner: context.repo.owner,
repo: context.repo.repo,
sha: context.sha,
state: state,
target_url: `https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}`,
description: description,
context: 'benchmarks/regression-check'
});

View File

@@ -10,25 +10,281 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 20
cache: 'npm'
- run: npm ci
- run: npm run test:coverage # Run tests with coverage
- name: Install dependencies
run: npm ci
# Run tests with coverage and multiple reporters
- name: Run tests with coverage
run: npm run test:coverage
env:
CI: true
# Generate test summary
- name: Generate test summary
if: always()
run: node scripts/generate-test-summary.js
# Generate detailed reports
- name: Generate detailed reports
if: always()
run: node scripts/generate-detailed-reports.js
# Upload test results artifacts
- name: Upload test results
if: always()
uses: actions/upload-artifact@v4
with:
name: test-results-${{ github.run_number }}-${{ github.run_attempt }}
path: |
test-results/
test-summary.md
test-reports/
retention-days: 30
if-no-files-found: warn
# Upload coverage artifacts
- name: Upload coverage reports
if: always()
uses: actions/upload-artifact@v4
with:
name: coverage-${{ github.run_number }}-${{ github.run_attempt }}
path: |
coverage/
retention-days: 30
if-no-files-found: warn
# Upload coverage to Codecov
- name: Upload coverage to Codecov
if: always()
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./coverage/lcov.info
flags: unittests
name: codecov-umbrella
fail_ci_if_error: true
fail_ci_if_error: false
verbose: true
- run: npm run lint
- run: npm run typecheck || true # Allow to fail initially
# Run benchmarks as part of CI (without performance regression checks)
- name: Run benchmarks (smoke test)
run: npm run benchmark -- --run tests/benchmarks/sample.bench.ts
continue-on-error: true
# Run linting
- name: Run linting
run: npm run lint
# Run type checking
- name: Run type checking
run: npm run typecheck || true # Allow to fail initially
# Run benchmarks
- name: Run benchmarks
id: benchmarks
run: npm run benchmark:ci
continue-on-error: true
# Upload benchmark results
- name: Upload benchmark results
if: always() && steps.benchmarks.outcome != 'skipped'
uses: actions/upload-artifact@v4
with:
name: benchmark-results-${{ github.run_number }}-${{ github.run_attempt }}
path: |
benchmark-results.json
retention-days: 30
if-no-files-found: warn
# Create test report comment for PRs
- name: Create test report comment
if: github.event_name == 'pull_request' && always()
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
let summary = '## Test Results\n\nTest summary generation failed.';
try {
if (fs.existsSync('test-summary.md')) {
summary = fs.readFileSync('test-summary.md', 'utf8');
}
} catch (error) {
console.error('Error reading test summary:', error);
}
// Find existing comment
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const botComment = comments.find(comment =>
comment.user.type === 'Bot' &&
comment.body.includes('## Test Results')
);
if (botComment) {
// Update existing comment
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: botComment.id,
body: summary
});
} else {
// Create new comment
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: summary
});
}
# Generate job summary
- name: Generate job summary
if: always()
run: |
echo "# Test Run Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
if [ -f test-summary.md ]; then
cat test-summary.md >> $GITHUB_STEP_SUMMARY
else
echo "Test summary generation failed." >> $GITHUB_STEP_SUMMARY
fi
echo "" >> $GITHUB_STEP_SUMMARY
echo "## 📥 Download Artifacts" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "- [Test Results](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_STEP_SUMMARY
echo "- [Coverage Report](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_STEP_SUMMARY
echo "- [Benchmark Results](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_STEP_SUMMARY
# Store test metadata
- name: Store test metadata
if: always()
run: |
cat > test-metadata.json << EOF
{
"run_id": "${{ github.run_id }}",
"run_number": "${{ github.run_number }}",
"run_attempt": "${{ github.run_attempt }}",
"sha": "${{ github.sha }}",
"ref": "${{ github.ref }}",
"event_name": "${{ github.event_name }}",
"repository": "${{ github.repository }}",
"actor": "${{ github.actor }}",
"timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
"node_version": "$(node --version)",
"npm_version": "$(npm --version)"
}
EOF
- name: Upload test metadata
if: always()
uses: actions/upload-artifact@v4
with:
name: test-metadata-${{ github.run_number }}-${{ github.run_attempt }}
path: test-metadata.json
retention-days: 30
# Separate job to process and publish test results
publish-results:
needs: test
runs-on: ubuntu-latest
if: always()
permissions:
checks: write
pull-requests: write
steps:
- uses: actions/checkout@v4
# Download all artifacts
- name: Download all artifacts
uses: actions/download-artifact@v4
with:
path: artifacts
# Publish test results as checks
- name: Publish test results
uses: dorny/test-reporter@v1
if: always()
with:
name: Test Results
path: 'artifacts/test-results-*/test-results/junit.xml'
reporter: java-junit
fail-on-error: false
# Create a combined artifact with all results
- name: Create combined results artifact
if: always()
run: |
mkdir -p combined-results
cp -r artifacts/* combined-results/ 2>/dev/null || true
# Create index file
cat > combined-results/index.html << 'EOF'
<!DOCTYPE html>
<html>
<head>
<title>n8n-mcp Test Results</title>
<style>
body { font-family: Arial, sans-serif; margin: 40px; }
h1 { color: #333; }
.section { margin: 20px 0; padding: 20px; border: 1px solid #ddd; border-radius: 5px; }
a { color: #0066cc; text-decoration: none; }
a:hover { text-decoration: underline; }
</style>
</head>
<body>
<h1>n8n-mcp Test Results</h1>
<div class="section">
<h2>Test Reports</h2>
<ul>
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-reports/report.html">📊 Detailed HTML Report</a></li>
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-results/html/index.html">📈 Vitest HTML Report</a></li>
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-reports/report.md">📄 Markdown Report</a></li>
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-summary.md">📝 PR Summary</a></li>
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-results/junit.xml">🔧 JUnit XML</a></li>
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-results/results.json">🔢 JSON Results</a></li>
<li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-reports/report.json">📊 Full JSON Report</a></li>
</ul>
</div>
<div class="section">
<h2>Coverage Reports</h2>
<ul>
<li><a href="coverage-${{ github.run_number }}-${{ github.run_attempt }}/html/index.html">HTML Coverage Report</a></li>
<li><a href="coverage-${{ github.run_number }}-${{ github.run_attempt }}/lcov.info">LCOV Report</a></li>
<li><a href="coverage-${{ github.run_number }}-${{ github.run_attempt }}/coverage-summary.json">Coverage Summary JSON</a></li>
</ul>
</div>
<div class="section">
<h2>Benchmark Results</h2>
<ul>
<li><a href="benchmark-results-${{ github.run_number }}-${{ github.run_attempt }}/benchmark-results.json">Benchmark Results JSON</a></li>
</ul>
</div>
<div class="section">
<h2>Metadata</h2>
<ul>
<li><a href="test-metadata-${{ github.run_number }}-${{ github.run_attempt }}/test-metadata.json">Test Run Metadata</a></li>
</ul>
</div>
<div class="section">
<p><em>Generated at $(date -u +%Y-%m-%dT%H:%M:%SZ)</em></p>
<p><em>Run: #${{ github.run_number }} | SHA: ${{ github.sha }}</em></p>
</div>
</body>
</html>
EOF
- name: Upload combined results
if: always()
uses: actions/upload-artifact@v4
with:
name: all-test-results-${{ github.run_number }}
path: combined-results/
retention-days: 90