diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 947ff3e..7d5c053 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -8,12 +8,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 #v5 with: - python-version: '3.8' + python-version: '3.13' - name: Install dependencies run: | diff --git a/.gitignore b/.gitignore index c947a67..8b74de5 100644 --- a/.gitignore +++ b/.gitignore @@ -13,7 +13,7 @@ result.txt testing/main.c */*compile_commands.json testing/benchmark_results.txt -testing/test-examples/* +testing/examples/* # Ignore Python wheel packages (clang-format, clang-tidy) clang-tidy-1* diff --git a/README.md b/README.md index c33048c..f4ec609 100644 --- a/README.md +++ b/README.md @@ -195,6 +195,8 @@ repos: | Supports passing code string | ✅ via `--style` | ❌ | | Verbose output | ✅ via `--verbose` | ❌ | +> [!TIP] +> In most cases, there is no significant performance difference between `cpp-linter-hooks` and `mirrors-clang-format`. See the [benchmark results](testing/benchmark.md) for details. ## Contributing diff --git a/docs/benchmark.md b/docs/benchmark.md deleted file mode 100644 index 6c485d3..0000000 --- a/docs/benchmark.md +++ /dev/null @@ -1,15 +0,0 @@ -# Benchmarking - -[![CodSpeed Badge](https://img.shields.io/endpoint?url=https://codspeed.io/badge.json)](https://codspeed.io/cpp-linter/cpp-linter-hooks) - -This document outlines the benchmarking process for comparing the performance of cpp-linter-hooks and mirrors-clang-format. - -## Running the Benchmark - -```bash -python3 testing/benchmark_hooks.py -``` - -## Results - -The results of the benchmarking process will be saved to `testing/benchmark_results.txt`. diff --git a/testing/README.md b/testing/README.md index 7c339a3..2008ccf 100644 --- a/testing/README.md +++ b/testing/README.md @@ -6,3 +6,9 @@ pre-commit try-repo ./.. clang-format --verbose --all-files pre-commit try-repo ./.. clang-tidy --verbose --all-files ``` + +## Benchmark + +```bash +python3 testing/benchmark_hooks.py +``` diff --git a/testing/benchmark.md b/testing/benchmark.md new file mode 100644 index 0000000..4c49ed3 --- /dev/null +++ b/testing/benchmark.md @@ -0,0 +1,25 @@ +# Benchmarking + +This document outlines the benchmarking process for comparing the performance of cpp-linter-hooks and mirrors-clang-format. + +> About tests performance can be found at: [![CodSpeed Badge](https://img.shields.io/endpoint?url=https://codspeed.io/badge.json)](https://codspeed.io/cpp-linter/cpp-linter-hooks) + +## Running the Benchmark + +```bash +python3 testing/benchmark_hooks.py +``` + +## Results + +```bash +# Updated on 2025-09-02 +Benchmark Results: + +Hook | Avg (s) | Std (s) | Min (s) | Max (s) | Runs +---------------------+------------------+------------------+------------------+------------------+----------------- +mirrors-clang-format | 0.116 | 0.003 | 0.113 | 0.118 | 5 +cpp-linter-hooks | 0.114 | 0.003 | 0.109 | 0.117 | 5 + +Results saved to testing/benchmark_results.txt +``` diff --git a/testing/benchmark_hooks.py b/testing/benchmark_hooks.py index 665b59e..149cb64 100644 --- a/testing/benchmark_hooks.py +++ b/testing/benchmark_hooks.py @@ -8,37 +8,34 @@ Requirements: - pre-commit must be installed and available in PATH - Two config files: - - testing/pre-commit-config-cpp-linter-hooks.yaml - - testing/pre-commit-config-mirrors-clang-format.yaml -- Target files: testing/main.c (or adjust as needed) + - testing/cpp-linter-hooks.yaml + - testing/mirrors-clang-format.yaml +- Target files: testing/examples/*.c (or adjust as needed) """ import os import subprocess import time import statistics -import glob HOOKS = [ - { - "name": "cpp-linter-hooks", - "config": "testing/benchmark_hook_1.yaml", - }, { "name": "mirrors-clang-format", "config": "testing/benchmark_hook_2.yaml", }, + { + "name": "cpp-linter-hooks", + "config": "testing/benchmark_hook_1.yaml", + }, ] -# Automatically find all C/C++ files in testing/ (and optionally src/, include/) -TARGET_FILES = glob.glob("testing/test-examples/*.c", recursive=True) - REPEATS = 5 RESULTS_FILE = "testing/benchmark_results.txt" -def git_clone(): +def prepare_code(): try: + subprocess.run(["rm", "-rf", "testing/examples"], check=True) subprocess.run( [ "git", @@ -46,7 +43,7 @@ def git_clone(): "--depth", "1", "https://github.com/gouravthakur39/beginners-C-program-examples.git", - "testing/test-examples", + "testing/examples", ], check=True, ) @@ -54,8 +51,8 @@ def git_clone(): pass -def run_hook(config, files): - cmd = ["pre-commit", "run", "--config", config, "--files"] + files +def run_hook(config): + cmd = ["pre-commit", "run", "--config", config, "--all-files"] start = time.perf_counter() try: subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -66,30 +63,16 @@ def run_hook(config, files): return end - start -def safe_git_restore(files): - # Only restore files tracked by git - tracked = [] - for f in files: - result = subprocess.run( - ["git", "ls-files", "--error-unmatch", f], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - if result.returncode == 0: - tracked.append(f) - if tracked: - subprocess.run(["git", "restore"] + tracked) - - def benchmark(): results = {} + os.chdir("testing/examples") for hook in HOOKS: times = [] print(f"\nBenchmarking {hook['name']}...") for i in range(REPEATS): - safe_git_restore(TARGET_FILES) + prepare_code() subprocess.run(["pre-commit", "clean"]) - t = run_hook(hook["config"], TARGET_FILES) + t = run_hook(hook["config"]) print(f" Run {i + 1}: {t:.3f} seconds") times.append(t) results[hook["name"]] = times @@ -132,20 +115,27 @@ def report(results): f.write(line + "\n") print(f"\nResults saved to {RESULTS_FILE}") - # Write to GitHub Actions summary if available + # Write to GitHub Actions summary summary_path = os.environ.get("GITHUB_STEP_SUMMARY") if summary_path: with open(summary_path, "a") as f: f.write("## Benchmark Results\n\n") - f.write(header_row + "\n") - f.write("-+-".join("-" * w for w in col_widths) + "\n") - for line in lines: - f.write(line + "\n") + # Markdown table header + md_header = "| " + " | ".join(headers) + " |\n" + md_sep = "|" + "|".join(["-" * (w + 2) for w in col_widths]) + "|\n" + f.write(md_header) + f.write(md_sep) + for name, times in results.items(): + avg = statistics.mean(times) + std = statistics.stdev(times) if len(times) > 1 else 0.0 + min_t = min(times) + max_t = max(times) + md_row = f"| {name} | {avg:.3f} | {std:.3f} | {min_t:.3f} | {max_t:.3f} | {len(times)} |\n" + f.write(md_row) f.write("\n") def main(): - git_clone() results = benchmark() report(results) diff --git a/testing/benchmark_results.txt b/testing/benchmark_results.txt new file mode 100644 index 0000000..830714e --- /dev/null +++ b/testing/benchmark_results.txt @@ -0,0 +1,8 @@ +Benchmark Results: + +Hook | Avg (s) | Std (s) | Min (s) | Max (s) | Runs +---------------------+------------------+------------------+------------------+------------------+----------------- +mirrors-clang-format | 0.116 | 0.003 | 0.113 | 0.118 | 5 +cpp-linter-hooks | 0.114 | 0.003 | 0.109 | 0.117 | 5 + +Results saved to testing/benchmark_results.txt