|
1 | 1 | #!/usr/bin/env python3 |
2 | 2 | """ |
3 | | -Benchmark script to compare performance of cpp-linter-hooks vs mirrors-clang-format. |
| 3 | +Benchmark script to compare performance of cpp-linter-hooks vs mirrors-clang-format using hyperfine. |
4 | 4 |
|
5 | 5 | Usage: |
6 | | - python benchmark_hooks.py |
| 6 | + python benchmark_hooks.py |
| 7 | + # or directly with hyperfine: |
| 8 | + hyperfine --warmup 1 -r 5 'pre-commit run --config ../testing/benchmark_hook_1.yaml --all-files' 'pre-commit run --config ../testing/benchmark_hook_2.yaml --all-files' |
7 | 9 |
|
8 | 10 | Requirements: |
9 | 11 | - pre-commit must be installed and available in PATH |
|
15 | 17 |
|
16 | 18 | import os |
17 | 19 | import subprocess |
18 | | -import time |
19 | | -import statistics |
| 20 | +import sys |
20 | 21 |
|
21 | 22 | HOOKS = [ |
22 | 23 | { |
@@ -50,95 +51,33 @@ def prepare_code(): |
50 | 51 | pass |
51 | 52 |
|
52 | 53 |
|
53 | | -def run_hook(config): |
54 | | - cmd = ["pre-commit", "run", "--config", config, "--all-files"] |
55 | | - start = time.perf_counter() |
56 | | - try: |
57 | | - subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) |
58 | | - except subprocess.CalledProcessError: |
59 | | - # Still record time even if hook fails |
60 | | - pass |
61 | | - end = time.perf_counter() |
62 | | - return end - start |
63 | | - |
64 | | - |
65 | | -def benchmark(): |
66 | | - results = {} |
67 | | - prepare_code() |
| 54 | +def run_hyperfine(): |
68 | 55 | os.chdir("examples") |
69 | | - for hook in HOOKS: |
70 | | - subprocess.run(["git", "restore", "."], check=True) |
71 | | - times = [] |
72 | | - print(f"\nBenchmarking {hook['name']}...") |
73 | | - for i in range(REPEATS): |
74 | | - subprocess.run(["pre-commit", "clean"]) |
75 | | - t = run_hook(hook["config"]) |
76 | | - print(f" Run {i + 1}: {t:.3f} seconds") |
77 | | - times.append(t) |
78 | | - results[hook["name"]] = times |
79 | | - return results |
80 | | - |
81 | | - |
82 | | -def report(results): |
83 | | - headers = ["Hook", "Avg (s)", "Std (s)", "Min (s)", "Max (s)", "Runs"] |
84 | | - col_widths = [max(len(h), 16) for h in headers] |
85 | | - # Calculate max width for each column |
86 | | - for name, times in results.items(): |
87 | | - col_widths[0] = max(col_widths[0], len(name)) |
88 | | - print("\nBenchmark Results:\n") |
89 | | - # Print header |
90 | | - header_row = " | ".join(h.ljust(w) for h, w in zip(headers, col_widths)) |
91 | | - print(header_row) |
92 | | - print("-+-".join("-" * w for w in col_widths)) |
93 | | - # Print rows |
94 | | - lines = [] |
95 | | - for name, times in results.items(): |
96 | | - avg = statistics.mean(times) |
97 | | - std = statistics.stdev(times) if len(times) > 1 else 0.0 |
98 | | - min_t = min(times) |
99 | | - max_t = max(times) |
100 | | - row = [ |
101 | | - name.ljust(col_widths[0]), |
102 | | - f"{avg:.3f}".ljust(col_widths[1]), |
103 | | - f"{std:.3f}".ljust(col_widths[2]), |
104 | | - f"{min_t:.3f}".ljust(col_widths[3]), |
105 | | - f"{max_t:.3f}".ljust(col_widths[4]), |
106 | | - str(len(times)).ljust(col_widths[5]), |
107 | | - ] |
108 | | - print(" | ".join(row)) |
109 | | - lines.append(" | ".join(row)) |
110 | | - # Save to file |
| 56 | + commands = [ |
| 57 | + f"pre-commit run --config {hook['config']} --all-files" for hook in HOOKS |
| 58 | + ] |
| 59 | + hyperfine_cmd = [ |
| 60 | + "hyperfine", |
| 61 | + "--warmup", |
| 62 | + "1", |
| 63 | + "-r", |
| 64 | + str(REPEATS), |
| 65 | + ] + commands |
| 66 | + print("Running benchmark with hyperfine:") |
| 67 | + print(" ".join(hyperfine_cmd)) |
| 68 | + try: |
| 69 | + subprocess.run(hyperfine_cmd, check=True) |
| 70 | + except FileNotFoundError: |
| 71 | + print( |
| 72 | + "hyperfine is not installed. Please install it with 'cargo install hyperfine' or 'brew install hyperfine'." |
| 73 | + ) |
| 74 | + sys.exit(1) |
111 | 75 | os.chdir("..") |
112 | | - with open(RESULTS_FILE, "w") as f: |
113 | | - f.write(header_row + "\n") |
114 | | - f.write("-+-".join("-" * w for w in col_widths) + "\n") |
115 | | - for line in lines: |
116 | | - f.write(line + "\n") |
117 | | - print(f"\nResults saved to {RESULTS_FILE}") |
118 | | - |
119 | | - # Write to GitHub Actions summary |
120 | | - summary_path = os.environ.get("GITHUB_STEP_SUMMARY") |
121 | | - if summary_path: |
122 | | - with open(summary_path, "a") as f: |
123 | | - f.write("## Benchmark Results\n\n") |
124 | | - # Markdown table header |
125 | | - md_header = "| " + " | ".join(headers) + " |\n" |
126 | | - md_sep = "|" + "|".join(["-" * (w + 2) for w in col_widths]) + "|\n" |
127 | | - f.write(md_header) |
128 | | - f.write(md_sep) |
129 | | - for name, times in results.items(): |
130 | | - avg = statistics.mean(times) |
131 | | - std = statistics.stdev(times) if len(times) > 1 else 0.0 |
132 | | - min_t = min(times) |
133 | | - max_t = max(times) |
134 | | - md_row = f"| {name} | {avg:.3f} | {std:.3f} | {min_t:.3f} | {max_t:.3f} | {len(times)} |\n" |
135 | | - f.write(md_row) |
136 | | - f.write("\n") |
137 | 76 |
|
138 | 77 |
|
139 | 78 | def main(): |
140 | | - results = benchmark() |
141 | | - report(results) |
| 79 | + prepare_code() |
| 80 | + run_hyperfine() |
142 | 81 |
|
143 | 82 |
|
144 | 83 | if __name__ == "__main__": |
|
0 commit comments