- Create benchmark test suites for critical operations: - Node loading performance - Database query performance - Search operations performance - Validation performance - MCP tool execution performance - Add GitHub Actions workflow for benchmark tracking: - Runs on push to main and PRs - Uses github-action-benchmark for historical tracking - Comments on PRs with performance results - Alerts on >10% performance regressions - Stores results in GitHub Pages - Create benchmark infrastructure: - Custom Vitest benchmark configuration - JSON reporter for CI results - Result formatter for github-action-benchmark - Performance threshold documentation - Add supporting utilities: - SQLiteStorageService for benchmark database setup - MCPEngine wrapper for testing MCP tools - Test factories for generating benchmark data - Enhanced NodeRepository with benchmark methods - Document benchmark system: - Comprehensive benchmark guide in docs/BENCHMARKS.md - Performance thresholds in .github/BENCHMARK_THRESHOLDS.md - README for benchmarks directory - Integration with existing test suite The benchmark system will help monitor performance over time and catch regressions before they reach production. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
73 lines
2.0 KiB
JavaScript
73 lines
2.0 KiB
JavaScript
import { writeFileSync } from 'fs';
|
|
import { resolve } from 'path';
|
|
|
|
export default class BenchmarkJsonReporter {
|
|
constructor() {
|
|
this.results = [];
|
|
}
|
|
|
|
onTaskUpdate(tasks) {
|
|
// Called when tasks are updated
|
|
}
|
|
|
|
onFinished(files) {
|
|
const results = {
|
|
timestamp: new Date().toISOString(),
|
|
files: []
|
|
};
|
|
|
|
for (const file of files || []) {
|
|
if (!file) continue;
|
|
|
|
const fileResult = {
|
|
filepath: file.filepath || file.name,
|
|
groups: []
|
|
};
|
|
|
|
// Process benchmarks
|
|
if (file.tasks) {
|
|
for (const task of file.tasks) {
|
|
if (task.type === 'suite' && task.tasks) {
|
|
const group = {
|
|
name: task.name,
|
|
benchmarks: []
|
|
};
|
|
|
|
for (const benchmark of task.tasks) {
|
|
if (benchmark.result?.benchmark) {
|
|
group.benchmarks.push({
|
|
name: benchmark.name,
|
|
result: {
|
|
mean: benchmark.result.benchmark.mean,
|
|
min: benchmark.result.benchmark.min,
|
|
max: benchmark.result.benchmark.max,
|
|
hz: benchmark.result.benchmark.hz,
|
|
p75: benchmark.result.benchmark.p75,
|
|
p99: benchmark.result.benchmark.p99,
|
|
p995: benchmark.result.benchmark.p995,
|
|
p999: benchmark.result.benchmark.p999,
|
|
rme: benchmark.result.benchmark.rme,
|
|
samples: benchmark.result.benchmark.samples
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
if (group.benchmarks.length > 0) {
|
|
fileResult.groups.push(group);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
if (fileResult.groups.length > 0) {
|
|
results.files.push(fileResult);
|
|
}
|
|
}
|
|
|
|
// Write results
|
|
const outputPath = resolve(process.cwd(), 'benchmark-results.json');
|
|
writeFileSync(outputPath, JSON.stringify(results, null, 2));
|
|
console.log(`Benchmark results written to ${outputPath}`);
|
|
}
|
|
} |