From 89d7faa357543da53d4a028ce7882871742466fc Mon Sep 17 00:00:00 2001 From: shenxianpeng Date: Sun, 31 Aug 2025 22:06:54 +0300 Subject: [PATCH 1/6] feat: add benchmark --- .gitignore | 1 + docs/benchmark.md | 18 ++++++++ testing/benchmark_hooks.py | 95 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 114 insertions(+) create mode 100644 docs/benchmark.md create mode 100644 testing/benchmark_hooks.py diff --git a/.gitignore b/.gitignore index ab1c8ff..e48b78d 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ venv result.txt testing/main.c */*compile_commands.json +testing/benchmark_results.txt # Ignore Python wheel packages (clang-format, clang-tidy) clang-tidy-1* diff --git a/docs/benchmark.md b/docs/benchmark.md new file mode 100644 index 0000000..fea7ca4 --- /dev/null +++ b/docs/benchmark.md @@ -0,0 +1,18 @@ +# Benchmarking + +This document outlines the benchmarking process for comparing the performance of cpp-linter-hooks and mirrors-clang-format. + +## Running the Benchmark + +```bash +python3 testing/benchmark_hooks.py +``` + +## Results + +The results of the benchmarking process will be saved to `testing/benchmark_results.txt`. + +## To Do + +- Run benchmark against a larger codebase, such as [TheAlgorithms/C-Plus-Plus](https://github.com/TheAlgorithms/C-Plus-Plus). +- Run benchmark with GitHub Actions for continuous integration. diff --git a/testing/benchmark_hooks.py b/testing/benchmark_hooks.py new file mode 100644 index 0000000..1e6009c --- /dev/null +++ b/testing/benchmark_hooks.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 +""" +Benchmark script to compare performance of cpp-linter-hooks vs mirrors-clang-format. + +Usage: + python benchmark_hooks.py + +Requirements: +- pre-commit must be installed and available in PATH +- Two config files: + - testing/pre-commit-config-cpp-linter-hooks.yaml + - testing/pre-commit-config-mirrors-clang-format.yaml +- Target files: testing/main.c (or adjust as needed) +""" + +import subprocess +import time +import statistics +import glob + +HOOKS = [ + { + "name": "cpp-linter-hooks", + "config": "testing/pre-commit-config-cpp-linter-hooks.yaml", + }, + { + "name": "mirrors-clang-format", + "config": "testing/pre-commit-config-mirrors-clang-format.yaml", + }, +] + +# Automatically find all C/C++ files in testing/ (and optionally src/, include/) +TARGET_FILES = ( + glob.glob("testing/**/*.c", recursive=True) + + glob.glob("testing/**/*.cpp", recursive=True) + + glob.glob("testing/**/*.h", recursive=True) + + glob.glob("testing/**/*.hpp", recursive=True) +) + +REPEATS = 5 +RESULTS_FILE = "testing/benchmark_results.txt" + + +def run_hook(config, files): + cmd = ["pre-commit", "run", "--config", config, "--files"] + files + start = time.perf_counter() + try: + subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except subprocess.CalledProcessError: + # Still record time even if hook fails + pass + end = time.perf_counter() + return end - start + + +def benchmark(): + results = {} + for hook in HOOKS: + times = [] + print(f"Benchmarking {hook['name']}...") + for i in range(REPEATS): + # Clean up any changes before each run + subprocess.run(["git", "restore"] + TARGET_FILES) + subprocess.run(["pre-commit", "clean"]) + t = run_hook(hook["config"], TARGET_FILES) + print(f" Run {i + 1}: {t:.3f} seconds") + times.append(t) + results[hook["name"]] = times + return results + + +def report(results): + lines = [] + for name, times in results.items(): + avg = statistics.mean(times) + std = statistics.stdev(times) if len(times) > 1 else 0.0 + min_t = min(times) + max_t = max(times) + lines.append( + f"{name}: avg={avg:.3f}s, std={std:.3f}s, min={min_t:.3f}s, max={max_t:.3f}s, runs={len(times)}" + ) + print("\nBenchmark Results:") + print("\n".join(lines)) + with open(RESULTS_FILE, "w") as f: + f.write("\n".join(lines) + "\n") + print(f"Results saved to {RESULTS_FILE}") + + +def main(): + results = benchmark() + report(results) + + +if __name__ == "__main__": + main() From c21a796b44c4c343b93fcb7e981469087516236e Mon Sep 17 00:00:00 2001 From: Xianpeng Shen Date: Mon, 1 Sep 2025 14:37:21 +0300 Subject: [PATCH 2/6] Add CodSpeed badge to benchmark.md Added CodSpeed badge to benchmarking documentation. --- docs/benchmark.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/benchmark.md b/docs/benchmark.md index fea7ca4..0f2189d 100644 --- a/docs/benchmark.md +++ b/docs/benchmark.md @@ -1,5 +1,7 @@ # Benchmarking +[![CodSpeed Badge](https://img.shields.io/endpoint?url=https://codspeed.io/badge.json)](https://codspeed.io/cpp-linter/cpp-linter-hooks) + This document outlines the benchmarking process for comparing the performance of cpp-linter-hooks and mirrors-clang-format. ## Running the Benchmark From 60eea57950ff2b8d4b6369321a7fb36765774b12 Mon Sep 17 00:00:00 2001 From: shenxianpeng Date: Mon, 1 Sep 2025 22:37:34 +0300 Subject: [PATCH 3/6] fix: update benchmark test script --- .gitignore | 1 + .pre-commit-config.yaml | 1 + testing/benchmark_hooks.py | 77 ++++++++++++++++++++++++++++++-------- 3 files changed, 64 insertions(+), 15 deletions(-) diff --git a/.gitignore b/.gitignore index e48b78d..a6743d1 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ result.txt testing/main.c */*compile_commands.json testing/benchmark_results.txt +testing/test-examples/ # Ignore Python wheel packages (clang-format, clang-tidy) clang-tidy-1* diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9f3f80d..f1db01f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,4 +17,5 @@ repos: rev: v0.12.11 hooks: - id: ruff + args: [--fix] - id: ruff-format diff --git a/testing/benchmark_hooks.py b/testing/benchmark_hooks.py index 1e6009c..c374fb8 100644 --- a/testing/benchmark_hooks.py +++ b/testing/benchmark_hooks.py @@ -30,17 +30,29 @@ ] # Automatically find all C/C++ files in testing/ (and optionally src/, include/) -TARGET_FILES = ( - glob.glob("testing/**/*.c", recursive=True) - + glob.glob("testing/**/*.cpp", recursive=True) - + glob.glob("testing/**/*.h", recursive=True) - + glob.glob("testing/**/*.hpp", recursive=True) -) +TARGET_FILES = glob.glob("testing/test-examples/*.c", recursive=True) REPEATS = 5 RESULTS_FILE = "testing/benchmark_results.txt" +def git_clone(): + try: + subprocess.run( + [ + "git", + "clone", + "--depth", + "1", + "https://github.com/gouravthakur39/beginners-C-program-examples.git", + "testing/test-examples", + ], + check=True, + ) + except subprocess.CalledProcessError: + pass + + def run_hook(config, files): cmd = ["pre-commit", "run", "--config", config, "--files"] + files start = time.perf_counter() @@ -53,14 +65,28 @@ def run_hook(config, files): return end - start +def safe_git_restore(files): + # Only restore files tracked by git + tracked = [] + for f in files: + result = subprocess.run( + ["git", "ls-files", "--error-unmatch", f], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + if result.returncode == 0: + tracked.append(f) + if tracked: + subprocess.run(["git", "restore"] + tracked) + + def benchmark(): results = {} for hook in HOOKS: times = [] print(f"Benchmarking {hook['name']}...") for i in range(REPEATS): - # Clean up any changes before each run - subprocess.run(["git", "restore"] + TARGET_FILES) + safe_git_restore(TARGET_FILES) subprocess.run(["pre-commit", "clean"]) t = run_hook(hook["config"], TARGET_FILES) print(f" Run {i + 1}: {t:.3f} seconds") @@ -70,23 +96,44 @@ def benchmark(): def report(results): + headers = ["Hook", "Avg (s)", "Std (s)", "Min (s)", "Max (s)", "Runs"] + col_widths = [max(len(h), 16) for h in headers] + # Calculate max width for each column + for name, times in results.items(): + col_widths[0] = max(col_widths[0], len(name)) + print("\nBenchmark Results:\n") + # Print header + header_row = " | ".join(h.ljust(w) for h, w in zip(headers, col_widths)) + print(header_row) + print("-+-".join("-" * w for w in col_widths)) + # Print rows lines = [] for name, times in results.items(): avg = statistics.mean(times) std = statistics.stdev(times) if len(times) > 1 else 0.0 min_t = min(times) max_t = max(times) - lines.append( - f"{name}: avg={avg:.3f}s, std={std:.3f}s, min={min_t:.3f}s, max={max_t:.3f}s, runs={len(times)}" - ) - print("\nBenchmark Results:") - print("\n".join(lines)) + row = [ + name.ljust(col_widths[0]), + f"{avg:.3f}".ljust(col_widths[1]), + f"{std:.3f}".ljust(col_widths[2]), + f"{min_t:.3f}".ljust(col_widths[3]), + f"{max_t:.3f}".ljust(col_widths[4]), + str(len(times)).ljust(col_widths[5]), + ] + print(" | ".join(row)) + lines.append(" | ".join(row)) + # Save to file with open(RESULTS_FILE, "w") as f: - f.write("\n".join(lines) + "\n") - print(f"Results saved to {RESULTS_FILE}") + f.write(header_row + "\n") + f.write("-+-".join("-" * w for w in col_widths) + "\n") + for line in lines: + f.write(line + "\n") + print(f"\nResults saved to {RESULTS_FILE}") def main(): + git_clone() results = benchmark() report(results) From dd0a13ae63cede682bdedec16fa69c4438275f8d Mon Sep 17 00:00:00 2001 From: shenxianpeng Date: Mon, 1 Sep 2025 22:49:17 +0300 Subject: [PATCH 4/6] fix: update benchmark test script --- .gitignore | 2 +- testing/benchmark_hook_1.yaml | 6 ++++++ testing/benchmark_hook_2.yaml | 5 +++++ testing/benchmark_hooks.py | 6 +++--- 4 files changed, 15 insertions(+), 4 deletions(-) create mode 100644 testing/benchmark_hook_1.yaml create mode 100644 testing/benchmark_hook_2.yaml diff --git a/.gitignore b/.gitignore index a6743d1..c947a67 100644 --- a/.gitignore +++ b/.gitignore @@ -13,7 +13,7 @@ result.txt testing/main.c */*compile_commands.json testing/benchmark_results.txt -testing/test-examples/ +testing/test-examples/* # Ignore Python wheel packages (clang-format, clang-tidy) clang-tidy-1* diff --git a/testing/benchmark_hook_1.yaml b/testing/benchmark_hook_1.yaml new file mode 100644 index 0000000..5da643a --- /dev/null +++ b/testing/benchmark_hook_1.yaml @@ -0,0 +1,6 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v1.1.0 + hooks: + - id: clang-format + args: [--style=file, --version=21] diff --git a/testing/benchmark_hook_2.yaml b/testing/benchmark_hook_2.yaml new file mode 100644 index 0000000..79f903c --- /dev/null +++ b/testing/benchmark_hook_2.yaml @@ -0,0 +1,5 @@ +repos: + - repo: https://github.com/pre-commit/mirrors-clang-format + rev: v21.1.0 + hooks: + - id: clang-format diff --git a/testing/benchmark_hooks.py b/testing/benchmark_hooks.py index c374fb8..b3a7313 100644 --- a/testing/benchmark_hooks.py +++ b/testing/benchmark_hooks.py @@ -21,11 +21,11 @@ HOOKS = [ { "name": "cpp-linter-hooks", - "config": "testing/pre-commit-config-cpp-linter-hooks.yaml", + "config": "testing/benchmark_hook_1.yaml", }, { "name": "mirrors-clang-format", - "config": "testing/pre-commit-config-mirrors-clang-format.yaml", + "config": "testing/benchmark_hook_2.yaml", }, ] @@ -84,7 +84,7 @@ def benchmark(): results = {} for hook in HOOKS: times = [] - print(f"Benchmarking {hook['name']}...") + print(f"\nBenchmarking {hook['name']}...") for i in range(REPEATS): safe_git_restore(TARGET_FILES) subprocess.run(["pre-commit", "clean"]) From 2e6797aef3f13ab0f71cb0881ede0d9cdf10e862 Mon Sep 17 00:00:00 2001 From: shenxianpeng Date: Tue, 2 Sep 2025 05:12:03 +0300 Subject: [PATCH 5/6] feat: create benchmark worflow and write to summaryy --- .github/workflows/benchmark.yml | 25 +++++++++++++++++++++++++ .pre-commit-config.yaml | 4 ---- testing/benchmark_hooks.py | 12 ++++++++++++ 3 files changed, 37 insertions(+), 4 deletions(-) create mode 100644 .github/workflows/benchmark.yml diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 0000000..947ff3e --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,25 @@ +name: Benchmark Hooks + +on: + workflow_dispatch: + +jobs: + benchmark: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.8' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pre-commit + + - name: Run benchmarks + run: | + python testing/benchmark_hooks.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f1db01f..7253b24 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,10 +9,6 @@ repos: - id: check-yaml - id: check-toml - id: requirements-txt-fixer - - repo: https://github.com/asottile/pyupgrade - rev: v3.20.0 - hooks: - - id: pyupgrade - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.12.11 hooks: diff --git a/testing/benchmark_hooks.py b/testing/benchmark_hooks.py index b3a7313..665b59e 100644 --- a/testing/benchmark_hooks.py +++ b/testing/benchmark_hooks.py @@ -13,6 +13,7 @@ - Target files: testing/main.c (or adjust as needed) """ +import os import subprocess import time import statistics @@ -131,6 +132,17 @@ def report(results): f.write(line + "\n") print(f"\nResults saved to {RESULTS_FILE}") + # Write to GitHub Actions summary if available + summary_path = os.environ.get("GITHUB_STEP_SUMMARY") + if summary_path: + with open(summary_path, "a") as f: + f.write("## Benchmark Results\n\n") + f.write(header_row + "\n") + f.write("-+-".join("-" * w for w in col_widths) + "\n") + for line in lines: + f.write(line + "\n") + f.write("\n") + def main(): git_clone() From 447bd7f8ff14224ba084b570d9cae25700f5a62a Mon Sep 17 00:00:00 2001 From: shenxianpeng Date: Tue, 2 Sep 2025 05:18:20 +0300 Subject: [PATCH 6/6] docs: update benchmark.md --- docs/benchmark.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/benchmark.md b/docs/benchmark.md index 0f2189d..6c485d3 100644 --- a/docs/benchmark.md +++ b/docs/benchmark.md @@ -13,8 +13,3 @@ python3 testing/benchmark_hooks.py ## Results The results of the benchmarking process will be saved to `testing/benchmark_results.txt`. - -## To Do - -- Run benchmark against a larger codebase, such as [TheAlgorithms/C-Plus-Plus](https://github.com/TheAlgorithms/C-Plus-Plus). -- Run benchmark with GitHub Actions for continuous integration.