diff --git a/.github/workflows/benchmark-pr.yml b/.github/workflows/benchmark-pr.yml
new file mode 100644
index 0000000..34f9132
--- /dev/null
+++ b/.github/workflows/benchmark-pr.yml
@@ -0,0 +1,154 @@
+name: Benchmark PR Comparison
+on:
+ pull_request:
+ branches: [main]
+ paths:
+ - 'src/**'
+ - 'tests/benchmarks/**'
+ - 'package.json'
+ - 'vitest.config.benchmark.ts'
+
+permissions:
+ pull-requests: write
+ contents: read
+
+jobs:
+ benchmark-comparison:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout PR branch
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: 20
+ cache: 'npm'
+
+ - name: Install dependencies
+ run: npm ci
+
+ # Run benchmarks on current branch
+ - name: Run current benchmarks
+ run: npm run benchmark:ci
+
+ - name: Save current results
+ run: cp benchmark-results.json benchmark-current.json
+
+ # Checkout and run benchmarks on base branch
+ - name: Checkout base branch
+ run: |
+ git checkout ${{ github.event.pull_request.base.sha }}
+ git status
+
+ - name: Install base dependencies
+ run: npm ci
+
+ - name: Run baseline benchmarks
+ run: npm run benchmark:ci
+ continue-on-error: true
+
+ - name: Save baseline results
+ run: |
+ if [ -f benchmark-results.json ]; then
+ cp benchmark-results.json benchmark-baseline.json
+ else
+ echo '{"files":[]}' > benchmark-baseline.json
+ fi
+
+ # Compare results
+ - name: Checkout PR branch again
+ run: git checkout ${{ github.event.pull_request.head.sha }}
+
+ - name: Compare benchmarks
+ id: compare
+ run: |
+ node scripts/compare-benchmarks.js benchmark-current.json benchmark-baseline.json || echo "REGRESSION=true" >> $GITHUB_OUTPUT
+
+ # Upload comparison artifacts
+ - name: Upload benchmark comparison
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: benchmark-comparison-${{ github.run_number }}
+ path: |
+ benchmark-current.json
+ benchmark-baseline.json
+ benchmark-comparison.json
+ benchmark-comparison.md
+ retention-days: 30
+
+ # Post comparison to PR
+ - name: Post benchmark comparison to PR
+ if: always()
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const fs = require('fs');
+ let comment = '## ā” Benchmark Comparison\n\n';
+
+ try {
+ if (fs.existsSync('benchmark-comparison.md')) {
+ const comparison = fs.readFileSync('benchmark-comparison.md', 'utf8');
+ comment += comparison;
+ } else {
+ comment += 'Benchmark comparison could not be generated.';
+ }
+ } catch (error) {
+ comment += `Error reading benchmark comparison: ${error.message}`;
+ }
+
+ comment += '\n\n---\n';
+ comment += `*[View full benchmark results](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})*`;
+
+ // Find existing comment
+ const { data: comments } = await github.rest.issues.listComments({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ });
+
+ const botComment = comments.find(comment =>
+ comment.user.type === 'Bot' &&
+ comment.body.includes('## ā” Benchmark Comparison')
+ );
+
+ if (botComment) {
+ await github.rest.issues.updateComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ comment_id: botComment.id,
+ body: comment
+ });
+ } else {
+ await github.rest.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ body: comment
+ });
+ }
+
+ # Add status check
+ - name: Set benchmark status
+ if: always()
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const hasRegression = '${{ steps.compare.outputs.REGRESSION }}' === 'true';
+ const state = hasRegression ? 'failure' : 'success';
+ const description = hasRegression
+ ? 'Performance regressions detected'
+ : 'No performance regressions';
+
+ await github.rest.repos.createCommitStatus({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ sha: context.sha,
+ state: state,
+ target_url: `https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}`,
+ description: description,
+ context: 'benchmarks/regression-check'
+ });
\ No newline at end of file
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index bc07688..ba287a7 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -10,25 +10,281 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
+
- uses: actions/setup-node@v4
with:
node-version: 20
cache: 'npm'
- - run: npm ci
- - run: npm run test:coverage # Run tests with coverage
+
+ - name: Install dependencies
+ run: npm ci
+
+ # Run tests with coverage and multiple reporters
+ - name: Run tests with coverage
+ run: npm run test:coverage
+ env:
+ CI: true
+
+ # Generate test summary
+ - name: Generate test summary
+ if: always()
+ run: node scripts/generate-test-summary.js
+
+ # Generate detailed reports
+ - name: Generate detailed reports
+ if: always()
+ run: node scripts/generate-detailed-reports.js
+
+ # Upload test results artifacts
+ - name: Upload test results
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: test-results-${{ github.run_number }}-${{ github.run_attempt }}
+ path: |
+ test-results/
+ test-summary.md
+ test-reports/
+ retention-days: 30
+ if-no-files-found: warn
+
+ # Upload coverage artifacts
+ - name: Upload coverage reports
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: coverage-${{ github.run_number }}-${{ github.run_attempt }}
+ path: |
+ coverage/
+ retention-days: 30
+ if-no-files-found: warn
+
+ # Upload coverage to Codecov
- name: Upload coverage to Codecov
+ if: always()
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./coverage/lcov.info
flags: unittests
name: codecov-umbrella
- fail_ci_if_error: true
+ fail_ci_if_error: false
verbose: true
- - run: npm run lint
- - run: npm run typecheck || true # Allow to fail initially
- # Run benchmarks as part of CI (without performance regression checks)
- - name: Run benchmarks (smoke test)
- run: npm run benchmark -- --run tests/benchmarks/sample.bench.ts
- continue-on-error: true
\ No newline at end of file
+ # Run linting
+ - name: Run linting
+ run: npm run lint
+
+ # Run type checking
+ - name: Run type checking
+ run: npm run typecheck || true # Allow to fail initially
+
+ # Run benchmarks
+ - name: Run benchmarks
+ id: benchmarks
+ run: npm run benchmark:ci
+ continue-on-error: true
+
+ # Upload benchmark results
+ - name: Upload benchmark results
+ if: always() && steps.benchmarks.outcome != 'skipped'
+ uses: actions/upload-artifact@v4
+ with:
+ name: benchmark-results-${{ github.run_number }}-${{ github.run_attempt }}
+ path: |
+ benchmark-results.json
+ retention-days: 30
+ if-no-files-found: warn
+
+ # Create test report comment for PRs
+ - name: Create test report comment
+ if: github.event_name == 'pull_request' && always()
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const fs = require('fs');
+ let summary = '## Test Results\n\nTest summary generation failed.';
+
+ try {
+ if (fs.existsSync('test-summary.md')) {
+ summary = fs.readFileSync('test-summary.md', 'utf8');
+ }
+ } catch (error) {
+ console.error('Error reading test summary:', error);
+ }
+
+ // Find existing comment
+ const { data: comments } = await github.rest.issues.listComments({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ });
+
+ const botComment = comments.find(comment =>
+ comment.user.type === 'Bot' &&
+ comment.body.includes('## Test Results')
+ );
+
+ if (botComment) {
+ // Update existing comment
+ await github.rest.issues.updateComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ comment_id: botComment.id,
+ body: summary
+ });
+ } else {
+ // Create new comment
+ await github.rest.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ body: summary
+ });
+ }
+
+ # Generate job summary
+ - name: Generate job summary
+ if: always()
+ run: |
+ echo "# Test Run Summary" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+
+ if [ -f test-summary.md ]; then
+ cat test-summary.md >> $GITHUB_STEP_SUMMARY
+ else
+ echo "Test summary generation failed." >> $GITHUB_STEP_SUMMARY
+ fi
+
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "## š„ Download Artifacts" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "- [Test Results](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_STEP_SUMMARY
+ echo "- [Coverage Report](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_STEP_SUMMARY
+ echo "- [Benchmark Results](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_STEP_SUMMARY
+
+ # Store test metadata
+ - name: Store test metadata
+ if: always()
+ run: |
+ cat > test-metadata.json << EOF
+ {
+ "run_id": "${{ github.run_id }}",
+ "run_number": "${{ github.run_number }}",
+ "run_attempt": "${{ github.run_attempt }}",
+ "sha": "${{ github.sha }}",
+ "ref": "${{ github.ref }}",
+ "event_name": "${{ github.event_name }}",
+ "repository": "${{ github.repository }}",
+ "actor": "${{ github.actor }}",
+ "timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
+ "node_version": "$(node --version)",
+ "npm_version": "$(npm --version)"
+ }
+ EOF
+
+ - name: Upload test metadata
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: test-metadata-${{ github.run_number }}-${{ github.run_attempt }}
+ path: test-metadata.json
+ retention-days: 30
+
+ # Separate job to process and publish test results
+ publish-results:
+ needs: test
+ runs-on: ubuntu-latest
+ if: always()
+ permissions:
+ checks: write
+ pull-requests: write
+ steps:
+ - uses: actions/checkout@v4
+
+ # Download all artifacts
+ - name: Download all artifacts
+ uses: actions/download-artifact@v4
+ with:
+ path: artifacts
+
+ # Publish test results as checks
+ - name: Publish test results
+ uses: dorny/test-reporter@v1
+ if: always()
+ with:
+ name: Test Results
+ path: 'artifacts/test-results-*/test-results/junit.xml'
+ reporter: java-junit
+ fail-on-error: false
+
+ # Create a combined artifact with all results
+ - name: Create combined results artifact
+ if: always()
+ run: |
+ mkdir -p combined-results
+ cp -r artifacts/* combined-results/ 2>/dev/null || true
+
+ # Create index file
+ cat > combined-results/index.html << 'EOF'
+
+
+
+ n8n-mcp Test Results
+
+
+
+ n8n-mcp Test Results
+
+
+
+
+
+
Generated at $(date -u +%Y-%m-%dT%H:%M:%SZ)
+
Run: #${{ github.run_number }} | SHA: ${{ github.sha }}
+
+
+
+ EOF
+
+ - name: Upload combined results
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: all-test-results-${{ github.run_number }}
+ path: combined-results/
+ retention-days: 90
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 402d05a..3301300 100644
--- a/.gitignore
+++ b/.gitignore
@@ -40,6 +40,14 @@ logs/
coverage/
.nyc_output/
test-results/
+test-reports/
+test-summary.md
+test-metadata.json
+benchmark-results.json
+benchmark-comparison.md
+benchmark-comparison.json
+benchmark-current.json
+benchmark-baseline.json
tests/data/*.db
tests/fixtures/*.tmp
.vitest/
diff --git a/docs/test-artifacts.md b/docs/test-artifacts.md
new file mode 100644
index 0000000..763ba9b
--- /dev/null
+++ b/docs/test-artifacts.md
@@ -0,0 +1,146 @@
+# Test Artifacts Documentation
+
+This document describes the comprehensive test result artifact storage system implemented in the n8n-mcp project.
+
+## Overview
+
+The test artifact system captures, stores, and presents test results in multiple formats to facilitate debugging, analysis, and historical tracking of test performance.
+
+## Artifact Types
+
+### 1. Test Results
+- **JUnit XML** (`test-results/junit.xml`): Standard format for CI integration
+- **JSON Results** (`test-results/results.json`): Detailed test data for analysis
+- **HTML Report** (`test-results/html/index.html`): Interactive test report
+- **Test Summary** (`test-summary.md`): Markdown summary for PR comments
+
+### 2. Coverage Reports
+- **LCOV** (`coverage/lcov.info`): Standard coverage format
+- **HTML Coverage** (`coverage/html/index.html`): Interactive coverage browser
+- **Coverage Summary** (`coverage/coverage-summary.json`): JSON coverage data
+
+### 3. Benchmark Results
+- **Benchmark JSON** (`benchmark-results.json`): Raw benchmark data
+- **Comparison Reports** (`benchmark-comparison.md`): PR benchmark comparisons
+
+### 4. Detailed Reports
+- **HTML Report** (`test-reports/report.html`): Comprehensive styled report
+- **Markdown Report** (`test-reports/report.md`): Full markdown report
+- **JSON Report** (`test-reports/report.json`): Complete test data
+
+## GitHub Actions Integration
+
+### Test Workflow (`test.yml`)
+
+The main test workflow:
+1. Runs tests with coverage using multiple reporters
+2. Generates test summaries and detailed reports
+3. Uploads artifacts with metadata
+4. Posts summaries to PRs
+5. Creates a combined artifact index
+
+### Benchmark PR Workflow (`benchmark-pr.yml`)
+
+For pull requests:
+1. Runs benchmarks on PR branch
+2. Runs benchmarks on base branch
+3. Compares results
+4. Posts comparison to PR
+5. Sets status checks for regressions
+
+## Artifact Retention
+
+- **Test Results**: 30 days
+- **Coverage Reports**: 30 days
+- **Benchmark Results**: 30 days
+- **Combined Results**: 90 days
+- **Test Metadata**: 30 days
+
+## PR Comment Integration
+
+The system automatically:
+- Posts test summaries to PR comments
+- Updates existing comments instead of creating duplicates
+- Includes links to full artifacts
+- Shows coverage and benchmark changes
+
+## Job Summary
+
+Each workflow run includes a job summary with:
+- Test results overview
+- Coverage summary
+- Benchmark results
+- Direct links to download artifacts
+
+## Local Development
+
+### Running Tests with Reports
+
+```bash
+# Run tests with all reporters
+CI=true npm run test:coverage
+
+# Generate detailed reports
+node scripts/generate-detailed-reports.js
+
+# Generate test summary
+node scripts/generate-test-summary.js
+
+# Compare benchmarks
+node scripts/compare-benchmarks.js benchmark-results.json benchmark-baseline.json
+```
+
+### Report Locations
+
+When running locally, reports are generated in:
+- `test-results/` - Vitest outputs
+- `test-reports/` - Detailed reports
+- `coverage/` - Coverage reports
+- Root directory - Summary files
+
+## Report Formats
+
+### HTML Report Features
+- Responsive design
+- Test suite breakdown
+- Failed test details with error messages
+- Coverage visualization with progress bars
+- Benchmark performance metrics
+- Sortable tables
+
+### Markdown Report Features
+- GitHub-compatible formatting
+- Summary statistics
+- Failed test listings
+- Coverage breakdown
+- Benchmark comparisons
+
+### JSON Report Features
+- Complete test data
+- Programmatic access
+- Historical comparison
+- CI/CD integration
+
+## Best Practices
+
+1. **Always Check Artifacts**: When tests fail in CI, download and review the HTML report
+2. **Monitor Coverage**: Use the coverage reports to identify untested code
+3. **Track Benchmarks**: Review benchmark comparisons on performance-critical PRs
+4. **Archive Important Runs**: Download artifacts from significant releases
+
+## Troubleshooting
+
+### Missing Artifacts
+- Check if tests ran to completion
+- Verify artifact upload steps executed
+- Check retention period hasn't expired
+
+### Report Generation Failures
+- Ensure all dependencies are installed
+- Check for valid test/coverage output files
+- Review workflow logs for errors
+
+### PR Comment Issues
+- Verify GitHub Actions permissions
+- Check bot authentication
+- Review comment posting logs
\ No newline at end of file
diff --git a/scripts/compare-benchmarks.js b/scripts/compare-benchmarks.js
new file mode 100644
index 0000000..7486fea
--- /dev/null
+++ b/scripts/compare-benchmarks.js
@@ -0,0 +1,260 @@
+#!/usr/bin/env node
+import { readFileSync, existsSync, writeFileSync } from 'fs';
+import { resolve } from 'path';
+
+/**
+ * Compare benchmark results between runs
+ */
+class BenchmarkComparator {
+ constructor() {
+ this.threshold = 0.1; // 10% threshold for significant changes
+ }
+
+ loadBenchmarkResults(path) {
+ if (!existsSync(path)) {
+ return null;
+ }
+
+ try {
+ return JSON.parse(readFileSync(path, 'utf-8'));
+ } catch (error) {
+ console.error(`Error loading benchmark results from ${path}:`, error);
+ return null;
+ }
+ }
+
+ compareBenchmarks(current, baseline) {
+ const comparison = {
+ timestamp: new Date().toISOString(),
+ summary: {
+ improved: 0,
+ regressed: 0,
+ unchanged: 0,
+ added: 0,
+ removed: 0
+ },
+ benchmarks: []
+ };
+
+ // Create maps for easy lookup
+ const currentMap = new Map();
+ const baselineMap = new Map();
+
+ // Process current benchmarks
+ if (current && current.files) {
+ for (const file of current.files) {
+ for (const group of file.groups || []) {
+ for (const bench of group.benchmarks || []) {
+ const key = `${group.name}::${bench.name}`;
+ currentMap.set(key, {
+ ops: bench.result.hz,
+ mean: bench.result.mean,
+ file: file.filepath
+ });
+ }
+ }
+ }
+ }
+
+ // Process baseline benchmarks
+ if (baseline && baseline.files) {
+ for (const file of baseline.files) {
+ for (const group of file.groups || []) {
+ for (const bench of group.benchmarks || []) {
+ const key = `${group.name}::${bench.name}`;
+ baselineMap.set(key, {
+ ops: bench.result.hz,
+ mean: bench.result.mean,
+ file: file.filepath
+ });
+ }
+ }
+ }
+ }
+
+ // Compare benchmarks
+ for (const [key, current] of currentMap) {
+ const baseline = baselineMap.get(key);
+
+ if (!baseline) {
+ // New benchmark
+ comparison.summary.added++;
+ comparison.benchmarks.push({
+ name: key,
+ status: 'added',
+ current: current.ops,
+ baseline: null,
+ change: null,
+ file: current.file
+ });
+ } else {
+ // Compare performance
+ const change = ((current.ops - baseline.ops) / baseline.ops) * 100;
+ let status = 'unchanged';
+
+ if (Math.abs(change) >= this.threshold * 100) {
+ if (change > 0) {
+ status = 'improved';
+ comparison.summary.improved++;
+ } else {
+ status = 'regressed';
+ comparison.summary.regressed++;
+ }
+ } else {
+ comparison.summary.unchanged++;
+ }
+
+ comparison.benchmarks.push({
+ name: key,
+ status,
+ current: current.ops,
+ baseline: baseline.ops,
+ change,
+ meanCurrent: current.mean,
+ meanBaseline: baseline.mean,
+ file: current.file
+ });
+ }
+ }
+
+ // Check for removed benchmarks
+ for (const [key, baseline] of baselineMap) {
+ if (!currentMap.has(key)) {
+ comparison.summary.removed++;
+ comparison.benchmarks.push({
+ name: key,
+ status: 'removed',
+ current: null,
+ baseline: baseline.ops,
+ change: null,
+ file: baseline.file
+ });
+ }
+ }
+
+ // Sort by change percentage (regressions first)
+ comparison.benchmarks.sort((a, b) => {
+ if (a.status === 'regressed' && b.status !== 'regressed') return -1;
+ if (b.status === 'regressed' && a.status !== 'regressed') return 1;
+ if (a.change !== null && b.change !== null) {
+ return a.change - b.change;
+ }
+ return 0;
+ });
+
+ return comparison;
+ }
+
+ generateMarkdownReport(comparison) {
+ let report = '## Benchmark Comparison Report\n\n';
+
+ const { summary } = comparison;
+ report += '### Summary\n\n';
+ report += `- **Improved**: ${summary.improved} benchmarks\n`;
+ report += `- **Regressed**: ${summary.regressed} benchmarks\n`;
+ report += `- **Unchanged**: ${summary.unchanged} benchmarks\n`;
+ report += `- **Added**: ${summary.added} benchmarks\n`;
+ report += `- **Removed**: ${summary.removed} benchmarks\n\n`;
+
+ // Regressions
+ const regressions = comparison.benchmarks.filter(b => b.status === 'regressed');
+ if (regressions.length > 0) {
+ report += '### ā ļø Performance Regressions\n\n';
+ report += '| Benchmark | Current | Baseline | Change |\n';
+ report += '|-----------|---------|----------|--------|\n';
+
+ for (const bench of regressions) {
+ const currentOps = bench.current.toLocaleString('en-US', { maximumFractionDigits: 0 });
+ const baselineOps = bench.baseline.toLocaleString('en-US', { maximumFractionDigits: 0 });
+ const changeStr = bench.change.toFixed(2);
+ report += `| ${bench.name} | ${currentOps} ops/s | ${baselineOps} ops/s | **${changeStr}%** |\n`;
+ }
+ report += '\n';
+ }
+
+ // Improvements
+ const improvements = comparison.benchmarks.filter(b => b.status === 'improved');
+ if (improvements.length > 0) {
+ report += '### ā
Performance Improvements\n\n';
+ report += '| Benchmark | Current | Baseline | Change |\n';
+ report += '|-----------|---------|----------|--------|\n';
+
+ for (const bench of improvements) {
+ const currentOps = bench.current.toLocaleString('en-US', { maximumFractionDigits: 0 });
+ const baselineOps = bench.baseline.toLocaleString('en-US', { maximumFractionDigits: 0 });
+ const changeStr = bench.change.toFixed(2);
+ report += `| ${bench.name} | ${currentOps} ops/s | ${baselineOps} ops/s | **+${changeStr}%** |\n`;
+ }
+ report += '\n';
+ }
+
+ // New benchmarks
+ const added = comparison.benchmarks.filter(b => b.status === 'added');
+ if (added.length > 0) {
+ report += '### š New Benchmarks\n\n';
+ report += '| Benchmark | Performance |\n';
+ report += '|-----------|-------------|\n';
+
+ for (const bench of added) {
+ const ops = bench.current.toLocaleString('en-US', { maximumFractionDigits: 0 });
+ report += `| ${bench.name} | ${ops} ops/s |\n`;
+ }
+ report += '\n';
+ }
+
+ return report;
+ }
+
+ generateJsonReport(comparison) {
+ return JSON.stringify(comparison, null, 2);
+ }
+
+ async compare(currentPath, baselinePath) {
+ // Load results
+ const current = this.loadBenchmarkResults(currentPath);
+ const baseline = this.loadBenchmarkResults(baselinePath);
+
+ if (!current && !baseline) {
+ console.error('No benchmark results found');
+ return;
+ }
+
+ // Generate comparison
+ const comparison = this.compareBenchmarks(current, baseline);
+
+ // Generate reports
+ const markdownReport = this.generateMarkdownReport(comparison);
+ const jsonReport = this.generateJsonReport(comparison);
+
+ // Write reports
+ writeFileSync('benchmark-comparison.md', markdownReport);
+ writeFileSync('benchmark-comparison.json', jsonReport);
+
+ // Output summary to console
+ console.log(markdownReport);
+
+ // Return exit code based on regressions
+ if (comparison.summary.regressed > 0) {
+ console.error(`\nā Found ${comparison.summary.regressed} performance regressions`);
+ process.exit(1);
+ } else {
+ console.log(`\nā
No performance regressions found`);
+ process.exit(0);
+ }
+ }
+}
+
+// Parse command line arguments
+const args = process.argv.slice(2);
+if (args.length < 1) {
+ console.error('Usage: node compare-benchmarks.js [baseline-results]');
+ console.error('If baseline-results is not provided, it will look for benchmark-baseline.json');
+ process.exit(1);
+}
+
+const currentPath = args[0];
+const baselinePath = args[1] || 'benchmark-baseline.json';
+
+// Run comparison
+const comparator = new BenchmarkComparator();
+comparator.compare(currentPath, baselinePath).catch(console.error);
\ No newline at end of file
diff --git a/scripts/generate-detailed-reports.js b/scripts/generate-detailed-reports.js
new file mode 100644
index 0000000..307e6a1
--- /dev/null
+++ b/scripts/generate-detailed-reports.js
@@ -0,0 +1,675 @@
+#!/usr/bin/env node
+import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'fs';
+import { resolve, dirname } from 'path';
+
+/**
+ * Generate detailed test reports in multiple formats
+ */
+class TestReportGenerator {
+ constructor() {
+ this.results = {
+ tests: null,
+ coverage: null,
+ benchmarks: null,
+ metadata: {
+ timestamp: new Date().toISOString(),
+ repository: process.env.GITHUB_REPOSITORY || 'n8n-mcp',
+ sha: process.env.GITHUB_SHA || 'unknown',
+ branch: process.env.GITHUB_REF || 'unknown',
+ runId: process.env.GITHUB_RUN_ID || 'local',
+ runNumber: process.env.GITHUB_RUN_NUMBER || '0',
+ }
+ };
+ }
+
+ loadTestResults() {
+ const testResultPath = resolve(process.cwd(), 'test-results/results.json');
+ if (existsSync(testResultPath)) {
+ try {
+ const data = JSON.parse(readFileSync(testResultPath, 'utf-8'));
+ this.results.tests = this.processTestResults(data);
+ } catch (error) {
+ console.error('Error loading test results:', error);
+ }
+ }
+ }
+
+ processTestResults(data) {
+ const processedResults = {
+ summary: {
+ total: data.numTotalTests || 0,
+ passed: data.numPassedTests || 0,
+ failed: data.numFailedTests || 0,
+ skipped: data.numSkippedTests || 0,
+ duration: data.duration || 0,
+ success: (data.numFailedTests || 0) === 0
+ },
+ testSuites: [],
+ failedTests: []
+ };
+
+ // Process test suites
+ if (data.testResults) {
+ for (const suite of data.testResults) {
+ const suiteInfo = {
+ name: suite.name,
+ duration: suite.duration || 0,
+ tests: {
+ total: suite.numPassingTests + suite.numFailingTests + suite.numPendingTests,
+ passed: suite.numPassingTests || 0,
+ failed: suite.numFailingTests || 0,
+ skipped: suite.numPendingTests || 0
+ },
+ status: suite.numFailingTests === 0 ? 'passed' : 'failed'
+ };
+
+ processedResults.testSuites.push(suiteInfo);
+
+ // Collect failed tests
+ if (suite.testResults) {
+ for (const test of suite.testResults) {
+ if (test.status === 'failed') {
+ processedResults.failedTests.push({
+ suite: suite.name,
+ test: test.title,
+ duration: test.duration || 0,
+ error: test.failureMessages ? test.failureMessages.join('\n') : 'Unknown error'
+ });
+ }
+ }
+ }
+ }
+ }
+
+ return processedResults;
+ }
+
+ loadCoverageResults() {
+ const coveragePath = resolve(process.cwd(), 'coverage/coverage-summary.json');
+ if (existsSync(coveragePath)) {
+ try {
+ const data = JSON.parse(readFileSync(coveragePath, 'utf-8'));
+ this.results.coverage = this.processCoverageResults(data);
+ } catch (error) {
+ console.error('Error loading coverage results:', error);
+ }
+ }
+ }
+
+ processCoverageResults(data) {
+ const coverage = {
+ summary: {
+ lines: data.total.lines.pct,
+ statements: data.total.statements.pct,
+ functions: data.total.functions.pct,
+ branches: data.total.branches.pct,
+ average: 0
+ },
+ files: []
+ };
+
+ // Calculate average
+ coverage.summary.average = (
+ coverage.summary.lines +
+ coverage.summary.statements +
+ coverage.summary.functions +
+ coverage.summary.branches
+ ) / 4;
+
+ // Process file coverage
+ for (const [filePath, fileData] of Object.entries(data)) {
+ if (filePath !== 'total') {
+ coverage.files.push({
+ path: filePath,
+ lines: fileData.lines.pct,
+ statements: fileData.statements.pct,
+ functions: fileData.functions.pct,
+ branches: fileData.branches.pct,
+ uncoveredLines: fileData.lines.total - fileData.lines.covered
+ });
+ }
+ }
+
+ // Sort files by coverage (lowest first)
+ coverage.files.sort((a, b) => a.lines - b.lines);
+
+ return coverage;
+ }
+
+ loadBenchmarkResults() {
+ const benchmarkPath = resolve(process.cwd(), 'benchmark-results.json');
+ if (existsSync(benchmarkPath)) {
+ try {
+ const data = JSON.parse(readFileSync(benchmarkPath, 'utf-8'));
+ this.results.benchmarks = this.processBenchmarkResults(data);
+ } catch (error) {
+ console.error('Error loading benchmark results:', error);
+ }
+ }
+ }
+
+ processBenchmarkResults(data) {
+ const benchmarks = {
+ timestamp: data.timestamp,
+ results: []
+ };
+
+ for (const file of data.files || []) {
+ for (const group of file.groups || []) {
+ for (const benchmark of group.benchmarks || []) {
+ benchmarks.results.push({
+ file: file.filepath,
+ group: group.name,
+ name: benchmark.name,
+ ops: benchmark.result.hz,
+ mean: benchmark.result.mean,
+ min: benchmark.result.min,
+ max: benchmark.result.max,
+ p75: benchmark.result.p75,
+ p99: benchmark.result.p99,
+ samples: benchmark.result.samples
+ });
+ }
+ }
+ }
+
+ // Sort by ops/sec (highest first)
+ benchmarks.results.sort((a, b) => b.ops - a.ops);
+
+ return benchmarks;
+ }
+
+ generateMarkdownReport() {
+ let report = '# n8n-mcp Test Report\n\n';
+ report += `Generated: ${this.results.metadata.timestamp}\n\n`;
+
+ // Metadata
+ report += '## Build Information\n\n';
+ report += `- **Repository**: ${this.results.metadata.repository}\n`;
+ report += `- **Commit**: ${this.results.metadata.sha.substring(0, 7)}\n`;
+ report += `- **Branch**: ${this.results.metadata.branch}\n`;
+ report += `- **Run**: #${this.results.metadata.runNumber}\n\n`;
+
+ // Test Results
+ if (this.results.tests) {
+ const { summary, testSuites, failedTests } = this.results.tests;
+ const emoji = summary.success ? 'ā
' : 'ā';
+
+ report += `## ${emoji} Test Results\n\n`;
+ report += `### Summary\n\n`;
+ report += `- **Total Tests**: ${summary.total}\n`;
+ report += `- **Passed**: ${summary.passed} (${((summary.passed / summary.total) * 100).toFixed(1)}%)\n`;
+ report += `- **Failed**: ${summary.failed}\n`;
+ report += `- **Skipped**: ${summary.skipped}\n`;
+ report += `- **Duration**: ${(summary.duration / 1000).toFixed(2)}s\n\n`;
+
+ // Test Suites
+ if (testSuites.length > 0) {
+ report += '### Test Suites\n\n';
+ report += '| Suite | Status | Tests | Duration |\n';
+ report += '|-------|--------|-------|----------|\n';
+
+ for (const suite of testSuites) {
+ const status = suite.status === 'passed' ? 'ā
' : 'ā';
+ const tests = `${suite.tests.passed}/${suite.tests.total}`;
+ const duration = `${(suite.duration / 1000).toFixed(2)}s`;
+ report += `| ${suite.name} | ${status} | ${tests} | ${duration} |\n`;
+ }
+ report += '\n';
+ }
+
+ // Failed Tests
+ if (failedTests.length > 0) {
+ report += '### Failed Tests\n\n';
+ for (const failed of failedTests) {
+ report += `#### ${failed.suite} > ${failed.test}\n\n`;
+ report += '```\n';
+ report += failed.error;
+ report += '\n```\n\n';
+ }
+ }
+ }
+
+ // Coverage Results
+ if (this.results.coverage) {
+ const { summary, files } = this.results.coverage;
+ const emoji = summary.average >= 80 ? 'ā
' : summary.average >= 60 ? 'ā ļø' : 'ā';
+
+ report += `## ${emoji} Coverage Report\n\n`;
+ report += '### Summary\n\n';
+ report += `- **Lines**: ${summary.lines.toFixed(2)}%\n`;
+ report += `- **Statements**: ${summary.statements.toFixed(2)}%\n`;
+ report += `- **Functions**: ${summary.functions.toFixed(2)}%\n`;
+ report += `- **Branches**: ${summary.branches.toFixed(2)}%\n`;
+ report += `- **Average**: ${summary.average.toFixed(2)}%\n\n`;
+
+ // Files with low coverage
+ const lowCoverageFiles = files.filter(f => f.lines < 80).slice(0, 10);
+ if (lowCoverageFiles.length > 0) {
+ report += '### Files with Low Coverage\n\n';
+ report += '| File | Lines | Uncovered Lines |\n';
+ report += '|------|-------|----------------|\n';
+
+ for (const file of lowCoverageFiles) {
+ const fileName = file.path.split('/').pop();
+ report += `| ${fileName} | ${file.lines.toFixed(1)}% | ${file.uncoveredLines} |\n`;
+ }
+ report += '\n';
+ }
+ }
+
+ // Benchmark Results
+ if (this.results.benchmarks && this.results.benchmarks.results.length > 0) {
+ report += '## ā” Benchmark Results\n\n';
+ report += '### Top Performers\n\n';
+ report += '| Benchmark | Ops/sec | Mean (ms) | Samples |\n';
+ report += '|-----------|---------|-----------|----------|\n';
+
+ for (const bench of this.results.benchmarks.results.slice(0, 10)) {
+ const opsFormatted = bench.ops.toLocaleString('en-US', { maximumFractionDigits: 0 });
+ const meanFormatted = (bench.mean * 1000).toFixed(3);
+ report += `| ${bench.name} | ${opsFormatted} | ${meanFormatted} | ${bench.samples} |\n`;
+ }
+ report += '\n';
+ }
+
+ return report;
+ }
+
+ generateJsonReport() {
+ return JSON.stringify(this.results, null, 2);
+ }
+
+ generateHtmlReport() {
+ const htmlTemplate = `
+
+
+
+
+ n8n-mcp Test Report
+
+
+
+
+
+ ${this.generateTestResultsHtml()}
+ ${this.generateCoverageHtml()}
+ ${this.generateBenchmarkHtml()}
+
+`;
+
+ return htmlTemplate;
+ }
+
+ generateTestResultsHtml() {
+ if (!this.results.tests) return '';
+
+ const { summary, testSuites, failedTests } = this.results.tests;
+ const successRate = ((summary.passed / summary.total) * 100).toFixed(1);
+ const statusClass = summary.success ? 'success' : 'danger';
+ const statusIcon = summary.success ? 'ā
' : 'ā';
+
+ let html = `
+
+
${statusIcon} Test Results
+
+
+
${summary.total}
+
Total Tests
+
+
+
${summary.passed}
+
Passed
+
+
+
${summary.failed}
+
Failed
+
+
+
${successRate}%
+
Success Rate
+
+
+
${(summary.duration / 1000).toFixed(1)}s
+
Duration
+
+
`;
+
+ if (testSuites.length > 0) {
+ html += `
+
Test Suites
+
+
+
+ | Suite |
+ Status |
+ Tests |
+ Duration |
+
+
+ `;
+
+ for (const suite of testSuites) {
+ const status = suite.status === 'passed' ? 'ā
' : 'ā';
+ const statusClass = suite.status === 'passed' ? 'success' : 'danger';
+ html += `
+
+ | ${suite.name} |
+ ${status} |
+ ${suite.tests.passed}/${suite.tests.total} |
+ ${(suite.duration / 1000).toFixed(2)}s |
+
`;
+ }
+
+ html += `
+
+
`;
+ }
+
+ if (failedTests.length > 0) {
+ html += `
+
Failed Tests
`;
+
+ for (const failed of failedTests) {
+ html += `
+
+
${failed.suite} > ${failed.test}
+
${this.escapeHtml(failed.error)}
+
`;
+ }
+ }
+
+ html += `
`;
+ return html;
+ }
+
+ generateCoverageHtml() {
+ if (!this.results.coverage) return '';
+
+ const { summary, files } = this.results.coverage;
+ const coverageClass = summary.average >= 80 ? 'success' : summary.average >= 60 ? 'warning' : 'danger';
+ const progressClass = summary.average >= 80 ? '' : summary.average >= 60 ? 'coverage-medium' : 'coverage-low';
+
+ let html = `
+
+
š Coverage Report
+
+
+
${summary.average.toFixed(1)}%
+
Average Coverage
+
+
+
${summary.lines.toFixed(1)}%
+
Lines
+
+
+
${summary.statements.toFixed(1)}%
+
Statements
+
+
+
${summary.functions.toFixed(1)}%
+
Functions
+
+
+
${summary.branches.toFixed(1)}%
+
Branches
+
+
+
+
`;
+
+ const lowCoverageFiles = files.filter(f => f.lines < 80).slice(0, 10);
+ if (lowCoverageFiles.length > 0) {
+ html += `
+
Files with Low Coverage
+
+
+
+ | File |
+ Lines |
+ Statements |
+ Functions |
+ Branches |
+
+
+ `;
+
+ for (const file of lowCoverageFiles) {
+ const fileName = file.path.split('/').pop();
+ html += `
+
+ | ${fileName} |
+ ${file.lines.toFixed(1)}% |
+ ${file.statements.toFixed(1)}% |
+ ${file.functions.toFixed(1)}% |
+ ${file.branches.toFixed(1)}% |
+
`;
+ }
+
+ html += `
+
+
`;
+ }
+
+ html += `
`;
+ return html;
+ }
+
+ generateBenchmarkHtml() {
+ if (!this.results.benchmarks || this.results.benchmarks.results.length === 0) return '';
+
+ let html = `
+
+
ā” Benchmark Results
+
+
+
+ | Benchmark |
+ Operations/sec |
+ Mean Time (ms) |
+ Min (ms) |
+ Max (ms) |
+ Samples |
+
+
+ `;
+
+ for (const bench of this.results.benchmarks.results.slice(0, 20)) {
+ const opsFormatted = bench.ops.toLocaleString('en-US', { maximumFractionDigits: 0 });
+ const meanFormatted = (bench.mean * 1000).toFixed(3);
+ const minFormatted = (bench.min * 1000).toFixed(3);
+ const maxFormatted = (bench.max * 1000).toFixed(3);
+
+ html += `
+
+ | ${bench.name} |
+ ${opsFormatted} |
+ ${meanFormatted} |
+ ${minFormatted} |
+ ${maxFormatted} |
+ ${bench.samples} |
+
`;
+ }
+
+ html += `
+
+
`;
+
+ if (this.results.benchmarks.results.length > 20) {
+ html += `
Showing top 20 of ${this.results.benchmarks.results.length} benchmarks
`;
+ }
+
+ html += `
`;
+ return html;
+ }
+
+ escapeHtml(text) {
+ const map = {
+ '&': '&',
+ '<': '<',
+ '>': '>',
+ '"': '"',
+ "'": '''
+ };
+ return text.replace(/[&<>"']/g, m => map[m]);
+ }
+
+ async generate() {
+ // Load all results
+ this.loadTestResults();
+ this.loadCoverageResults();
+ this.loadBenchmarkResults();
+
+ // Ensure output directory exists
+ const outputDir = resolve(process.cwd(), 'test-reports');
+ if (!existsSync(outputDir)) {
+ mkdirSync(outputDir, { recursive: true });
+ }
+
+ // Generate reports in different formats
+ const markdownReport = this.generateMarkdownReport();
+ const jsonReport = this.generateJsonReport();
+ const htmlReport = this.generateHtmlReport();
+
+ // Write reports
+ writeFileSync(resolve(outputDir, 'report.md'), markdownReport);
+ writeFileSync(resolve(outputDir, 'report.json'), jsonReport);
+ writeFileSync(resolve(outputDir, 'report.html'), htmlReport);
+
+ console.log('Test reports generated successfully:');
+ console.log('- test-reports/report.md');
+ console.log('- test-reports/report.json');
+ console.log('- test-reports/report.html');
+ }
+}
+
+// Run the generator
+const generator = new TestReportGenerator();
+generator.generate().catch(console.error);
\ No newline at end of file
diff --git a/scripts/generate-test-summary.js b/scripts/generate-test-summary.js
new file mode 100644
index 0000000..f123494
--- /dev/null
+++ b/scripts/generate-test-summary.js
@@ -0,0 +1,167 @@
+#!/usr/bin/env node
+import { readFileSync, existsSync } from 'fs';
+import { resolve } from 'path';
+
+/**
+ * Generate a markdown summary of test results for PR comments
+ */
+function generateTestSummary() {
+ const results = {
+ tests: null,
+ coverage: null,
+ benchmarks: null,
+ timestamp: new Date().toISOString()
+ };
+
+ // Read test results
+ const testResultPath = resolve(process.cwd(), 'test-results/results.json');
+ if (existsSync(testResultPath)) {
+ try {
+ const testData = JSON.parse(readFileSync(testResultPath, 'utf-8'));
+ const totalTests = testData.numTotalTests || 0;
+ const passedTests = testData.numPassedTests || 0;
+ const failedTests = testData.numFailedTests || 0;
+ const skippedTests = testData.numSkippedTests || 0;
+ const duration = testData.duration || 0;
+
+ results.tests = {
+ total: totalTests,
+ passed: passedTests,
+ failed: failedTests,
+ skipped: skippedTests,
+ duration: duration,
+ success: failedTests === 0
+ };
+ } catch (error) {
+ console.error('Error reading test results:', error);
+ }
+ }
+
+ // Read coverage results
+ const coveragePath = resolve(process.cwd(), 'coverage/coverage-summary.json');
+ if (existsSync(coveragePath)) {
+ try {
+ const coverageData = JSON.parse(readFileSync(coveragePath, 'utf-8'));
+ const total = coverageData.total;
+
+ results.coverage = {
+ lines: total.lines.pct,
+ statements: total.statements.pct,
+ functions: total.functions.pct,
+ branches: total.branches.pct
+ };
+ } catch (error) {
+ console.error('Error reading coverage results:', error);
+ }
+ }
+
+ // Read benchmark results
+ const benchmarkPath = resolve(process.cwd(), 'benchmark-results.json');
+ if (existsSync(benchmarkPath)) {
+ try {
+ const benchmarkData = JSON.parse(readFileSync(benchmarkPath, 'utf-8'));
+ const benchmarks = [];
+
+ for (const file of benchmarkData.files || []) {
+ for (const group of file.groups || []) {
+ for (const benchmark of group.benchmarks || []) {
+ benchmarks.push({
+ name: `${group.name} - ${benchmark.name}`,
+ mean: benchmark.result.mean,
+ ops: benchmark.result.hz
+ });
+ }
+ }
+ }
+
+ results.benchmarks = benchmarks;
+ } catch (error) {
+ console.error('Error reading benchmark results:', error);
+ }
+ }
+
+ // Generate markdown summary
+ let summary = '## Test Results Summary\n\n';
+
+ // Test results
+ if (results.tests) {
+ const { total, passed, failed, skipped, duration, success } = results.tests;
+ const emoji = success ? 'ā
' : 'ā';
+ const status = success ? 'PASSED' : 'FAILED';
+
+ summary += `### ${emoji} Tests ${status}\n\n`;
+ summary += `| Metric | Value |\n`;
+ summary += `|--------|-------|\n`;
+ summary += `| Total Tests | ${total} |\n`;
+ summary += `| Passed | ${passed} |\n`;
+ summary += `| Failed | ${failed} |\n`;
+ summary += `| Skipped | ${skipped} |\n`;
+ summary += `| Duration | ${(duration / 1000).toFixed(2)}s |\n\n`;
+ }
+
+ // Coverage results
+ if (results.coverage) {
+ const { lines, statements, functions, branches } = results.coverage;
+ const avgCoverage = (lines + statements + functions + branches) / 4;
+ const emoji = avgCoverage >= 80 ? 'ā
' : avgCoverage >= 60 ? 'ā ļø' : 'ā';
+
+ summary += `### ${emoji} Coverage Report\n\n`;
+ summary += `| Type | Coverage |\n`;
+ summary += `|------|----------|\n`;
+ summary += `| Lines | ${lines.toFixed(2)}% |\n`;
+ summary += `| Statements | ${statements.toFixed(2)}% |\n`;
+ summary += `| Functions | ${functions.toFixed(2)}% |\n`;
+ summary += `| Branches | ${branches.toFixed(2)}% |\n`;
+ summary += `| **Average** | **${avgCoverage.toFixed(2)}%** |\n\n`;
+ }
+
+ // Benchmark results
+ if (results.benchmarks && results.benchmarks.length > 0) {
+ summary += `### ā” Benchmark Results\n\n`;
+ summary += `| Benchmark | Ops/sec | Mean (ms) |\n`;
+ summary += `|-----------|---------|------------|\n`;
+
+ for (const bench of results.benchmarks.slice(0, 10)) { // Show top 10
+ const opsFormatted = bench.ops.toLocaleString('en-US', { maximumFractionDigits: 0 });
+ const meanFormatted = (bench.mean * 1000).toFixed(3);
+ summary += `| ${bench.name} | ${opsFormatted} | ${meanFormatted} |\n`;
+ }
+
+ if (results.benchmarks.length > 10) {
+ summary += `\n*...and ${results.benchmarks.length - 10} more benchmarks*\n`;
+ }
+ summary += '\n';
+ }
+
+ // Links to artifacts
+ const runId = process.env.GITHUB_RUN_ID;
+ const runNumber = process.env.GITHUB_RUN_NUMBER;
+ const sha = process.env.GITHUB_SHA;
+
+ if (runId) {
+ summary += `### š Artifacts\n\n`;
+ summary += `- š [Test Results](https://github.com/${process.env.GITHUB_REPOSITORY}/actions/runs/${runId})\n`;
+ summary += `- š [Coverage Report](https://github.com/${process.env.GITHUB_REPOSITORY}/actions/runs/${runId})\n`;
+ summary += `- ā” [Benchmark Results](https://github.com/${process.env.GITHUB_REPOSITORY}/actions/runs/${runId})\n\n`;
+ }
+
+ // Metadata
+ summary += `---\n`;
+ summary += `*Generated at ${new Date().toUTCString()}*\n`;
+ if (sha) {
+ summary += `*Commit: ${sha.substring(0, 7)}*\n`;
+ }
+ if (runNumber) {
+ summary += `*Run: #${runNumber}*\n`;
+ }
+
+ return summary;
+}
+
+// Generate and output summary
+const summary = generateTestSummary();
+console.log(summary);
+
+// Also write to file for artifact
+import { writeFileSync } from 'fs';
+writeFileSync('test-summary.md', summary);
\ No newline at end of file
diff --git a/vitest.config.benchmark.ts b/vitest.config.benchmark.ts
index ea87629..fd0cca7 100644
--- a/vitest.config.benchmark.ts
+++ b/vitest.config.benchmark.ts
@@ -10,8 +10,9 @@ export default defineConfig({
// Benchmark specific options
include: ['tests/benchmarks/**/*.bench.ts'],
reporters: process.env.CI
- ? [['./scripts/vitest-benchmark-json-reporter.js', {}]]
+ ? ['default', ['./scripts/vitest-benchmark-json-reporter.js', {}]]
: ['default'],
+ outputFile: './benchmark-results.json',
},
setupFiles: [],
pool: 'forks',
diff --git a/vitest.config.ts b/vitest.config.ts
index 05c8eaa..74717cb 100644
--- a/vitest.config.ts
+++ b/vitest.config.ts
@@ -22,10 +22,11 @@ export default defineConfig({
// Retry configuration
retry: parseInt(process.env.TEST_RETRY_ATTEMPTS || '2', 10),
// Test reporter
- reporters: process.env.CI ? ['default', 'json', 'junit'] : ['default'],
+ reporters: process.env.CI ? ['default', 'json', 'junit', 'html'] : ['default'],
outputFile: {
json: './test-results/results.json',
- junit: './test-results/junit.xml'
+ junit: './test-results/junit.xml',
+ html: './test-results/html/index.html'
},
coverage: {
provider: 'v8',