Skip to content

Commit 960f629

Browse files
committed
chore: update benchmark
1 parent a076581 commit 960f629

File tree

3 files changed

+33
-90
lines changed

3 files changed

+33
-90
lines changed

testing/benchmark.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,10 @@ This document outlines the benchmarking process for comparing the performance of
88

99
```bash
1010
python3 testing/benchmark_hooks.py
11+
12+
# or
13+
14+
hyperfine -i --warmup 1 -r 5 'pre-commit run --config ../testing/benchmark_hook_1.yaml --all-files' 'pre-commit run --config ../testing/benchmark_hook_2.yaml --all-files'
1115
```
1216

1317
## Results

testing/benchmark_hook_1.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
repos:
22
- repo: https://github.com/cpp-linter/cpp-linter-hooks
3-
rev: v1.1.2
3+
rev: v1.1.3
44
hooks:
55
- id: clang-format
6-
args: [--style=file, --version=21]
6+
args: [--style=file]

testing/benchmark_hooks.py

Lines changed: 27 additions & 88 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
11
#!/usr/bin/env python3
22
"""
3-
Benchmark script to compare performance of cpp-linter-hooks vs mirrors-clang-format.
3+
Benchmark script to compare performance of cpp-linter-hooks vs mirrors-clang-format using hyperfine.
44
55
Usage:
6-
python benchmark_hooks.py
6+
python benchmark_hooks.py
7+
# or directly with hyperfine:
8+
hyperfine --warmup 1 -r 5 'pre-commit run --config ../testing/benchmark_hook_1.yaml --all-files' 'pre-commit run --config ../testing/benchmark_hook_2.yaml --all-files'
79
810
Requirements:
911
- pre-commit must be installed and available in PATH
@@ -15,8 +17,7 @@
1517

1618
import os
1719
import subprocess
18-
import time
19-
import statistics
20+
import sys
2021

2122
HOOKS = [
2223
{
@@ -50,95 +51,33 @@ def prepare_code():
5051
pass
5152

5253

53-
def run_hook(config):
54-
cmd = ["pre-commit", "run", "--config", config, "--all-files"]
55-
start = time.perf_counter()
56-
try:
57-
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
58-
except subprocess.CalledProcessError:
59-
# Still record time even if hook fails
60-
pass
61-
end = time.perf_counter()
62-
return end - start
63-
64-
65-
def benchmark():
66-
results = {}
67-
prepare_code()
54+
def run_hyperfine():
6855
os.chdir("examples")
69-
for hook in HOOKS:
70-
subprocess.run(["git", "restore", "."], check=True)
71-
times = []
72-
print(f"\nBenchmarking {hook['name']}...")
73-
for i in range(REPEATS):
74-
subprocess.run(["pre-commit", "clean"])
75-
t = run_hook(hook["config"])
76-
print(f" Run {i + 1}: {t:.3f} seconds")
77-
times.append(t)
78-
results[hook["name"]] = times
79-
return results
80-
81-
82-
def report(results):
83-
headers = ["Hook", "Avg (s)", "Std (s)", "Min (s)", "Max (s)", "Runs"]
84-
col_widths = [max(len(h), 16) for h in headers]
85-
# Calculate max width for each column
86-
for name, times in results.items():
87-
col_widths[0] = max(col_widths[0], len(name))
88-
print("\nBenchmark Results:\n")
89-
# Print header
90-
header_row = " | ".join(h.ljust(w) for h, w in zip(headers, col_widths))
91-
print(header_row)
92-
print("-+-".join("-" * w for w in col_widths))
93-
# Print rows
94-
lines = []
95-
for name, times in results.items():
96-
avg = statistics.mean(times)
97-
std = statistics.stdev(times) if len(times) > 1 else 0.0
98-
min_t = min(times)
99-
max_t = max(times)
100-
row = [
101-
name.ljust(col_widths[0]),
102-
f"{avg:.3f}".ljust(col_widths[1]),
103-
f"{std:.3f}".ljust(col_widths[2]),
104-
f"{min_t:.3f}".ljust(col_widths[3]),
105-
f"{max_t:.3f}".ljust(col_widths[4]),
106-
str(len(times)).ljust(col_widths[5]),
107-
]
108-
print(" | ".join(row))
109-
lines.append(" | ".join(row))
110-
# Save to file
56+
commands = [
57+
f"pre-commit run --config {hook['config']} --all-files" for hook in HOOKS
58+
]
59+
hyperfine_cmd = [
60+
"hyperfine",
61+
"--warmup",
62+
"1",
63+
"-r",
64+
str(REPEATS),
65+
] + commands
66+
print("Running benchmark with hyperfine:")
67+
print(" ".join(hyperfine_cmd))
68+
try:
69+
subprocess.run(hyperfine_cmd, check=True)
70+
except FileNotFoundError:
71+
print(
72+
"hyperfine is not installed. Please install it with 'cargo install hyperfine' or 'brew install hyperfine'."
73+
)
74+
sys.exit(1)
11175
os.chdir("..")
112-
with open(RESULTS_FILE, "w") as f:
113-
f.write(header_row + "\n")
114-
f.write("-+-".join("-" * w for w in col_widths) + "\n")
115-
for line in lines:
116-
f.write(line + "\n")
117-
print(f"\nResults saved to {RESULTS_FILE}")
118-
119-
# Write to GitHub Actions summary
120-
summary_path = os.environ.get("GITHUB_STEP_SUMMARY")
121-
if summary_path:
122-
with open(summary_path, "a") as f:
123-
f.write("## Benchmark Results\n\n")
124-
# Markdown table header
125-
md_header = "| " + " | ".join(headers) + " |\n"
126-
md_sep = "|" + "|".join(["-" * (w + 2) for w in col_widths]) + "|\n"
127-
f.write(md_header)
128-
f.write(md_sep)
129-
for name, times in results.items():
130-
avg = statistics.mean(times)
131-
std = statistics.stdev(times) if len(times) > 1 else 0.0
132-
min_t = min(times)
133-
max_t = max(times)
134-
md_row = f"| {name} | {avg:.3f} | {std:.3f} | {min_t:.3f} | {max_t:.3f} | {len(times)} |\n"
135-
f.write(md_row)
136-
f.write("\n")
13776

13877

13978
def main():
140-
results = benchmark()
141-
report(results)
79+
prepare_code()
80+
run_hyperfine()
14281

14382

14483
if __name__ == "__main__":

0 commit comments

Comments
 (0)